diff --git a/plugins/action/k8s_info.py b/plugins/action/k8s_info.py
index bb4cf8c27d..9fa71d6795 100644
--- a/plugins/action/k8s_info.py
+++ b/plugins/action/k8s_info.py
@@ -3,7 +3,8 @@
# Copyright (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
import copy
@@ -13,7 +14,12 @@
from ansible.config.manager import ensure_type
-from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.errors import (
+ AnsibleError,
+ AnsibleFileNotFound,
+ AnsibleAction,
+ AnsibleActionFail,
+)
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types, iteritems
from ansible.module_utils._text import to_text, to_bytes, to_native
@@ -28,19 +34,19 @@ class ActionModule(ActionBase):
def _ensure_invocation(self, result):
# NOTE: adding invocation arguments here needs to be kept in sync with
# any no_log specified in the argument_spec in the module.
- if 'invocation' not in result:
+ if "invocation" not in result:
if self._play_context.no_log:
- result['invocation'] = "CENSORED: no_log is set"
+ result["invocation"] = "CENSORED: no_log is set"
else:
- result['invocation'] = self._task.args.copy()
- result['invocation']['module_args'] = self._task.args.copy()
+ result["invocation"] = self._task.args.copy()
+ result["invocation"]["module_args"] = self._task.args.copy()
return result
@contextmanager
def get_template_data(self, template_path):
try:
- source = self._find_needle('templates', template_path)
+ source = self._find_needle("templates", template_path)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
@@ -48,15 +54,19 @@ def get_template_data(self, template_path):
try:
tmp_source = self._loader.get_real_file(source)
except AnsibleFileNotFound as e:
- raise AnsibleActionFail("could not find template=%s, %s" % (source, to_text(e)))
- b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+ raise AnsibleActionFail(
+ "could not find template=%s, %s" % (source, to_text(e))
+ )
+ b_tmp_source = to_bytes(tmp_source, errors="surrogate_or_strict")
try:
- with open(b_tmp_source, 'rb') as f:
+ with open(b_tmp_source, "rb") as f:
try:
- template_data = to_text(f.read(), errors='surrogate_or_strict')
+ template_data = to_text(f.read(), errors="surrogate_or_strict")
except UnicodeError:
- raise AnsibleActionFail("Template source files must be utf-8 encoded")
+ raise AnsibleActionFail(
+ "Template source files must be utf-8 encoded"
+ )
yield template_data
except AnsibleAction:
raise
@@ -73,62 +83,99 @@ def get_template_args(self, template):
"block_start_string": None,
"block_end_string": None,
"trim_blocks": True,
- "lstrip_blocks": False
+ "lstrip_blocks": False,
}
if isinstance(template, string_types):
# treat this as raw_params
- template_param['path'] = template
+ template_param["path"] = template
elif isinstance(template, dict):
template_args = template
- template_path = template_args.get('path', None)
+ template_path = template_args.get("path", None)
if not template_path:
raise AnsibleActionFail("Please specify path for template.")
- template_param['path'] = template_path
+ template_param["path"] = template_path
# Options type validation strings
- for s_type in ('newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
- 'block_end_string'):
+ for s_type in (
+ "newline_sequence",
+ "variable_start_string",
+ "variable_end_string",
+ "block_start_string",
+ "block_end_string",
+ ):
if s_type in template_args:
- value = ensure_type(template_args[s_type], 'string')
+ value = ensure_type(template_args[s_type], "string")
if value is not None and not isinstance(value, string_types):
- raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ raise AnsibleActionFail(
+ "%s is expected to be a string, but got %s instead"
+ % (s_type, type(value))
+ )
try:
- template_param.update({
- "trim_blocks": boolean(template_args.get('trim_blocks', True), strict=False),
- "lstrip_blocks": boolean(template_args.get('lstrip_blocks', False), strict=False)
- })
+ template_param.update(
+ {
+ "trim_blocks": boolean(
+ template_args.get("trim_blocks", True), strict=False
+ ),
+ "lstrip_blocks": boolean(
+ template_args.get("lstrip_blocks", False), strict=False
+ ),
+ }
+ )
except TypeError as e:
raise AnsibleActionFail(to_native(e))
- template_param.update({
- "newline_sequence": template_args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE),
- "variable_start_string": template_args.get('variable_start_string', None),
- "variable_end_string": template_args.get('variable_end_string', None),
- "block_start_string": template_args.get('block_start_string', None),
- "block_end_string": template_args.get('block_end_string', None)
- })
+ template_param.update(
+ {
+ "newline_sequence": template_args.get(
+ "newline_sequence", self.DEFAULT_NEWLINE_SEQUENCE
+ ),
+ "variable_start_string": template_args.get(
+ "variable_start_string", None
+ ),
+ "variable_end_string": template_args.get(
+ "variable_end_string", None
+ ),
+ "block_start_string": template_args.get("block_start_string", None),
+ "block_end_string": template_args.get("block_end_string", None),
+ }
+ )
else:
- raise AnsibleActionFail("Error while reading template file - "
- "a string or dict for template expected, but got %s instead" % type(template))
+ raise AnsibleActionFail(
+ "Error while reading template file - "
+ "a string or dict for template expected, but got %s instead"
+ % type(template)
+ )
return template_param
def import_jinja2_lstrip(self, templates):
# Option `lstrip_blocks' was added in Jinja2 version 2.7.
- if any(tmp['lstrip_blocks'] for tmp in templates):
+ if any(tmp["lstrip_blocks"] for tmp in templates):
try:
import jinja2.defaults
except ImportError:
- raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+ raise AnsibleError(
+ "Unable to import Jinja2 defaults for determining Jinja2 features."
+ )
try:
jinja2.defaults.LSTRIP_BLOCKS
except AttributeError:
- raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+ raise AnsibleError(
+ "Option `lstrip_blocks' is only available in Jinja2 versions >=2.7"
+ )
def load_template(self, template, new_module_args, task_vars):
# template is only supported by k8s module.
- if self._task.action not in ('k8s', 'kubernetes.core.k8s', 'community.okd.k8s', 'redhat.openshift.k8s', 'community.kubernetes.k8s'):
- raise AnsibleActionFail("'template' is only a supported parameter for the 'k8s' module.")
+ if self._task.action not in (
+ "k8s",
+ "kubernetes.core.k8s",
+ "community.okd.k8s",
+ "redhat.openshift.k8s",
+ "community.kubernetes.k8s",
+ ):
+ raise AnsibleActionFail(
+ "'template' is only a supported parameter for the 'k8s' module."
+ )
template_params = []
if isinstance(template, string_types) or isinstance(template, dict):
@@ -137,8 +184,11 @@ def load_template(self, template, new_module_args, task_vars):
for element in template:
template_params.append(self.get_template_args(element))
else:
- raise AnsibleActionFail("Error while reading template file - "
- "a string or dict for template expected, but got %s instead" % type(template))
+ raise AnsibleActionFail(
+ "Error while reading template file - "
+ "a string or dict for template expected, but got %s instead"
+ % type(template)
+ )
self.import_jinja2_lstrip(template_params)
@@ -149,20 +199,31 @@ def load_template(self, template, new_module_args, task_vars):
old_vars = self._templar.available_variables
default_environment = {}
- for key in ("newline_sequence", "variable_start_string", "variable_end_string",
- "block_start_string", "block_end_string", "trim_blocks", "lstrip_blocks"):
+ for key in (
+ "newline_sequence",
+ "variable_start_string",
+ "variable_end_string",
+ "block_start_string",
+ "block_end_string",
+ "trim_blocks",
+ "lstrip_blocks",
+ ):
if hasattr(self._templar.environment, key):
default_environment[key] = getattr(self._templar.environment, key)
for template_item in template_params:
# We need to convert unescaped sequences to proper escaped sequences for Jinja2
- newline_sequence = template_item['newline_sequence']
+ newline_sequence = template_item["newline_sequence"]
if newline_sequence in wrong_sequences:
- template_item['newline_sequence'] = allowed_sequences[wrong_sequences.index(newline_sequence)]
+ template_item["newline_sequence"] = allowed_sequences[
+ wrong_sequences.index(newline_sequence)
+ ]
elif newline_sequence not in allowed_sequences:
- raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+ raise AnsibleActionFail(
+ "newline_sequence needs to be one of: \n, \r or \r\n"
+ )
# template the source data locally & get ready to transfer
- with self.get_template_data(template_item['path']) as template_data:
+ with self.get_template_data(template_item["path"]) as template_data:
# add ansible 'template' vars
temp_vars = copy.deepcopy(task_vars)
for key, value in iteritems(template_item):
@@ -170,29 +231,45 @@ def load_template(self, template, new_module_args, task_vars):
if value is not None:
setattr(self._templar.environment, key, value)
else:
- setattr(self._templar.environment, key, default_environment.get(key))
+ setattr(
+ self._templar.environment,
+ key,
+ default_environment.get(key),
+ )
self._templar.available_variables = temp_vars
- result = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ result = self._templar.do_template(
+ template_data,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ )
result_template.append(result)
self._templar.available_variables = old_vars
- resource_definition = self._task.args.get('definition', None)
+ resource_definition = self._task.args.get("definition", None)
if not resource_definition:
- new_module_args.pop('template')
- new_module_args['definition'] = result_template
+ new_module_args.pop("template")
+ new_module_args["definition"] = result_template
def get_file_realpath(self, local_path):
# local_path is only supported by k8s_cp module.
- if self._task.action not in ('k8s_cp', 'kubernetes.core.k8s_cp', 'community.kubernetes.k8s_cp'):
- raise AnsibleActionFail("'local_path' is only supported parameter for 'k8s_cp' module.")
+ if self._task.action not in (
+ "k8s_cp",
+ "kubernetes.core.k8s_cp",
+ "community.kubernetes.k8s_cp",
+ ):
+ raise AnsibleActionFail(
+ "'local_path' is only supported parameter for 'k8s_cp' module."
+ )
if os.path.exists(local_path):
return local_path
try:
# find in expected paths
- return self._find_needle('files', local_path)
+ return self._find_needle("files", local_path)
except AnsibleError:
- raise AnsibleActionFail("%s does not exist in local filesystem" % local_path)
+ raise AnsibleActionFail(
+ "%s does not exist in local filesystem" % local_path
+ )
def get_kubeconfig(self, kubeconfig, remote_transport, new_module_args):
if isinstance(kubeconfig, string_types):
@@ -200,20 +277,22 @@ def get_kubeconfig(self, kubeconfig, remote_transport, new_module_args):
if not remote_transport:
# kubeconfig is local
# find in expected paths
- kubeconfig = self._find_needle('files', kubeconfig)
+ kubeconfig = self._find_needle("files", kubeconfig)
# decrypt kubeconfig found
actual_file = self._loader.get_real_file(kubeconfig, decrypt=True)
- new_module_args['kubeconfig'] = actual_file
+ new_module_args["kubeconfig"] = actual_file
elif isinstance(kubeconfig, dict):
- new_module_args['kubeconfig'] = kubeconfig
+ new_module_args["kubeconfig"] = kubeconfig
else:
- raise AnsibleActionFail("Error while reading kubeconfig parameter - "
- "a string or dict expected, but got %s instead" % type(kubeconfig))
+ raise AnsibleActionFail(
+ "Error while reading kubeconfig parameter - "
+ "a string or dict expected, but got %s instead" % type(kubeconfig)
+ )
def run(self, tmp=None, task_vars=None):
- ''' handler for k8s options '''
+ """ handler for k8s options """
if task_vars is None:
task_vars = dict()
@@ -224,53 +303,61 @@ def run(self, tmp=None, task_vars=None):
# look for kubeconfig and src
# 'local' => look files on Ansible Controller
# Transport other than 'local' => look files on remote node
- remote_transport = self._connection.transport != 'local'
+ remote_transport = self._connection.transport != "local"
new_module_args = copy.deepcopy(self._task.args)
- kubeconfig = self._task.args.get('kubeconfig', None)
+ kubeconfig = self._task.args.get("kubeconfig", None)
if kubeconfig:
try:
self.get_kubeconfig(kubeconfig, remote_transport, new_module_args)
except AnsibleError as e:
- result['failed'] = True
- result['msg'] = to_text(e)
- result['exception'] = traceback.format_exc()
+ result["failed"] = True
+ result["msg"] = to_text(e)
+ result["exception"] = traceback.format_exc()
return result
# find the file in the expected search path
- src = self._task.args.get('src', None)
+ src = self._task.args.get("src", None)
if src:
if remote_transport:
# src is on remote node
- result.update(self._execute_module(module_name=self._task.action, task_vars=task_vars))
+ result.update(
+ self._execute_module(
+ module_name=self._task.action, task_vars=task_vars
+ )
+ )
return self._ensure_invocation(result)
# src is local
try:
# find in expected paths
- src = self._find_needle('files', src)
+ src = self._find_needle("files", src)
except AnsibleError as e:
- result['failed'] = True
- result['msg'] = to_text(e)
- result['exception'] = traceback.format_exc()
+ result["failed"] = True
+ result["msg"] = to_text(e)
+ result["exception"] = traceback.format_exc()
return result
if src:
- new_module_args['src'] = src
+ new_module_args["src"] = src
- template = self._task.args.get('template', None)
+ template = self._task.args.get("template", None)
if template:
self.load_template(template, new_module_args, task_vars)
- local_path = self._task.args.get('local_path')
- state = self._task.args.get('state', None)
- if local_path and state == 'to_pod':
- new_module_args['local_path'] = self.get_file_realpath(local_path)
+ local_path = self._task.args.get("local_path")
+ state = self._task.args.get("state", None)
+ if local_path and state == "to_pod":
+ new_module_args["local_path"] = self.get_file_realpath(local_path)
# Execute the k8s_* module.
- module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars)
+ module_return = self._execute_module(
+ module_name=self._task.action,
+ module_args=new_module_args,
+ task_vars=task_vars,
+ )
# Delete tmp path
self._remove_tmp_path(self._connection._shell.tmpdir)
diff --git a/plugins/connection/kubectl.py b/plugins/connection/kubectl.py
index 5ca70f96cd..a8fcbf5c6e 100644
--- a/plugins/connection/kubectl.py
+++ b/plugins/connection/kubectl.py
@@ -17,7 +17,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
DOCUMENTATION = r"""
@@ -185,26 +186,26 @@
display = Display()
-CONNECTION_TRANSPORT = 'kubectl'
+CONNECTION_TRANSPORT = "kubectl"
CONNECTION_OPTIONS = {
- 'kubectl_container': '-c',
- 'kubectl_namespace': '-n',
- 'kubectl_kubeconfig': '--kubeconfig',
- 'kubectl_context': '--context',
- 'kubectl_host': '--server',
- 'kubectl_username': '--username',
- 'kubectl_password': '--password',
- 'client_cert': '--client-certificate',
- 'client_key': '--client-key',
- 'ca_cert': '--certificate-authority',
- 'validate_certs': '--insecure-skip-tls-verify',
- 'kubectl_token': '--token'
+ "kubectl_container": "-c",
+ "kubectl_namespace": "-n",
+ "kubectl_kubeconfig": "--kubeconfig",
+ "kubectl_context": "--context",
+ "kubectl_host": "--server",
+ "kubectl_username": "--username",
+ "kubectl_password": "--password",
+ "client_cert": "--client-certificate",
+ "client_key": "--client-key",
+ "ca_cert": "--certificate-authority",
+ "validate_certs": "--insecure-skip-tls-verify",
+ "kubectl_token": "--token",
}
class Connection(ConnectionBase):
- ''' Local kubectl based connections '''
+ """ Local kubectl based connections """
transport = CONNECTION_TRANSPORT
connection_options = CONNECTION_OPTIONS
@@ -217,57 +218,70 @@ def __init__(self, play_context, new_stdin, *args, **kwargs):
# Note: kubectl runs commands as the user that started the container.
# It is impossible to set the remote user for a kubectl connection.
- cmd_arg = '{0}_command'.format(self.transport)
+ cmd_arg = "{0}_command".format(self.transport)
if cmd_arg in kwargs:
self.transport_cmd = kwargs[cmd_arg]
else:
self.transport_cmd = distutils.spawn.find_executable(self.transport)
if not self.transport_cmd:
- raise AnsibleError("{0} command not found in PATH".format(self.transport))
+ raise AnsibleError(
+ "{0} command not found in PATH".format(self.transport)
+ )
def _build_exec_cmd(self, cmd):
- """ Build the local kubectl exec command to run cmd on remote_host
- """
+ """Build the local kubectl exec command to run cmd on remote_host"""
local_cmd = [self.transport_cmd]
censored_local_cmd = [self.transport_cmd]
# Build command options based on doc string
doc_yaml = AnsibleLoader(self.documentation).get_single_data()
- for key in doc_yaml.get('options'):
- if key.endswith('verify_ssl') and self.get_option(key) != '':
+ for key in doc_yaml.get("options"):
+ if key.endswith("verify_ssl") and self.get_option(key) != "":
# Translate verify_ssl to skip_verify_ssl, and output as string
skip_verify_ssl = not self.get_option(key)
- local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower()))
- censored_local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower()))
- elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key):
+ local_cmd.append(
+ u"{0}={1}".format(
+ self.connection_options[key], str(skip_verify_ssl).lower()
+ )
+ )
+ censored_local_cmd.append(
+ u"{0}={1}".format(
+ self.connection_options[key], str(skip_verify_ssl).lower()
+ )
+ )
+ elif (
+ not key.endswith("container")
+ and self.get_option(key)
+ and self.connection_options.get(key)
+ ):
cmd_arg = self.connection_options[key]
local_cmd += [cmd_arg, self.get_option(key)]
# Redact password and token from console log
- if key.endswith(('_token', '_password')):
- censored_local_cmd += [cmd_arg, '********']
+ if key.endswith(("_token", "_password")):
+ censored_local_cmd += [cmd_arg, "********"]
else:
censored_local_cmd += [cmd_arg, self.get_option(key)]
- extra_args_name = u'{0}_extra_args'.format(self.transport)
+ extra_args_name = u"{0}_extra_args".format(self.transport)
if self.get_option(extra_args_name):
- local_cmd += self.get_option(extra_args_name).split(' ')
- censored_local_cmd += self.get_option(extra_args_name).split(' ')
+ local_cmd += self.get_option(extra_args_name).split(" ")
+ censored_local_cmd += self.get_option(extra_args_name).split(" ")
- pod = self.get_option(u'{0}_pod'.format(self.transport))
+ pod = self.get_option(u"{0}_pod".format(self.transport))
if not pod:
pod = self._play_context.remote_addr
# -i is needed to keep stdin open which allows pipelining to work
- local_cmd += ['exec', '-i', pod]
- censored_local_cmd += ['exec', '-i', pod]
+ local_cmd += ["exec", "-i", pod]
+ censored_local_cmd += ["exec", "-i", pod]
# if the pod has more than one container, then container is required
- container_arg_name = u'{0}_container'.format(self.transport)
+ container_arg_name = u"{0}_container".format(self.transport)
if self.get_option(container_arg_name):
- local_cmd += ['-c', self.get_option(container_arg_name)]
- censored_local_cmd += ['-c', self.get_option(container_arg_name)]
+ local_cmd += ["-c", self.get_option(container_arg_name)]
+ censored_local_cmd += ["-c", self.get_option(container_arg_name)]
- local_cmd += ['--'] + cmd
- censored_local_cmd += ['--'] + cmd
+ local_cmd += ["--"] + cmd
+ censored_local_cmd += ["--"] + cmd
return local_cmd, censored_local_cmd
@@ -275,33 +289,45 @@ def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
- display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._play_context.remote_addr)
+ display.vvv(
+ u"ESTABLISH {0} CONNECTION".format(self.transport),
+ host=self._play_context.remote_addr,
+ )
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command in the container """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- local_cmd, censored_local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
-
- display.vvv("EXEC %s" % (censored_local_cmd,), host=self._play_context.remote_addr)
- local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
- p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ local_cmd, censored_local_cmd = self._build_exec_cmd(
+ [self._play_context.executable, "-c", cmd]
+ )
+
+ display.vvv(
+ "EXEC %s" % (censored_local_cmd,), host=self._play_context.remote_addr
+ )
+ local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd]
+ p = subprocess.Popen(
+ local_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
- ''' Make sure that we put files into a standard path
+ """Make sure that we put files into a standard path
- If a path is relative, then we need to choose where to put it.
- ssh chooses $HOME but we aren't guaranteed that a home dir will
- exist in any given chroot. So for now we're choosing "/" instead.
- This also happens to be the former default.
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
- '''
+ Can revisit using $HOME instead if it's a problem
+ """
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
@@ -309,61 +335,89 @@ def _prefix_login_path(self, remote_path):
def put_file(self, in_path, out_path):
""" Transfer a file from local to the container """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+ display.vvv(
+ "PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr
+ )
out_path = self._prefix_login_path(out_path)
- if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound(
- "file or module does not exist: %s" % in_path)
+ if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
+ raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
out_path = shlex_quote(out_path)
# kubectl doesn't have native support for copying files into
# running containers, so we use kubectl exec to implement this
- with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
if not os.fstat(in_file.fileno()).st_size:
- count = ' count=0'
+ count = " count=0"
else:
- count = ''
- args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
- args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ count = ""
+ args, dummy = self._build_exec_cmd(
+ [
+ self._play_context.executable,
+ "-c",
+ "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count),
+ ]
+ )
+ args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
try:
- p = subprocess.Popen(args, stdin=in_file,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
except OSError:
- raise AnsibleError("kubectl connection requires dd command in the container to put files")
+ raise AnsibleError(
+ "kubectl connection requires dd command in the container to put files"
+ )
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(
+ "failed to transfer file %s to %s:\n%s\n%s"
+ % (in_path, out_path, stdout, stderr)
+ )
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+ display.vvv(
+ "FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr
+ )
in_path = self._prefix_login_path(in_path)
out_dir = os.path.dirname(out_path)
# kubectl doesn't have native support for fetching files from
# running containers, so we use kubectl exec to implement this
- args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
- args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ args, dummy = self._build_exec_cmd(
+ [self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]
+ )
+ args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
- with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ with open(
+ to_bytes(actual_out_path, errors="surrogate_or_strict"), "wb"
+ ) as out_file:
try:
- p = subprocess.Popen(args, stdin=subprocess.PIPE,
- stdout=out_file, stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ args, stdin=subprocess.PIPE, stdout=out_file, stderr=subprocess.PIPE
+ )
except OSError:
raise AnsibleError(
- "{0} connection requires dd command in the container to fetch files".format(self.transport)
+ "{0} connection requires dd command in the container to fetch files".format(
+ self.transport
+ )
)
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(
+ "failed to fetch file %s to %s:\n%s\n%s"
+ % (in_path, out_path, stdout, stderr)
+ )
if actual_out_path != out_path:
- os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+ os.rename(
+ to_bytes(actual_out_path, errors="strict"),
+ to_bytes(out_path, errors="strict"),
+ )
def close(self):
""" Terminate the connection. Nothing to do for kubectl"""
diff --git a/plugins/doc_fragments/helm_common_options.py b/plugins/doc_fragments/helm_common_options.py
index 046e11b1ca..7085a04683 100644
--- a/plugins/doc_fragments/helm_common_options.py
+++ b/plugins/doc_fragments/helm_common_options.py
@@ -6,13 +6,14 @@
# Options for common Helm modules
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
binary_path:
description:
@@ -56,4 +57,4 @@ class ModuleDocFragment(object):
type: path
aliases: [ ssl_ca_cert ]
version_added: "1.2.0"
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_auth_options.py b/plugins/doc_fragments/k8s_auth_options.py
index f42dfd2d7a..dc88155069 100644
--- a/plugins/doc_fragments/k8s_auth_options.py
+++ b/plugins/doc_fragments/k8s_auth_options.py
@@ -5,13 +5,14 @@
# Options for authenticating with the API.
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
host:
description:
@@ -114,4 +115,4 @@ class ModuleDocFragment(object):
- "To avoid SSL certificate validation errors when C(validate_certs) is I(True), the full
certificate chain for the API server must be provided via C(ca_cert) or in the
kubeconfig file."
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_delete_options.py b/plugins/doc_fragments/k8s_delete_options.py
index 053a4d0fb1..a8f20cf9cf 100644
--- a/plugins/doc_fragments/k8s_delete_options.py
+++ b/plugins/doc_fragments/k8s_delete_options.py
@@ -5,13 +5,14 @@
# Options for specifying object wait
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
delete_options:
type: dict
@@ -48,4 +49,4 @@ class ModuleDocFragment(object):
type: str
description:
- Specify the UID of the target object.
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_name_options.py b/plugins/doc_fragments/k8s_name_options.py
index fe4e5c4792..e14658b035 100644
--- a/plugins/doc_fragments/k8s_name_options.py
+++ b/plugins/doc_fragments/k8s_name_options.py
@@ -5,13 +5,14 @@
# Options for selecting or identifying a specific K8s object
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
api_version:
description:
@@ -49,4 +50,4 @@ class ModuleDocFragment(object):
- If I(resource definition) is provided, the I(metadata.namespace) value from the I(resource_definition)
will override this option.
type: str
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_resource_options.py b/plugins/doc_fragments/k8s_resource_options.py
index b9dcfe1651..93b7378918 100644
--- a/plugins/doc_fragments/k8s_resource_options.py
+++ b/plugins/doc_fragments/k8s_resource_options.py
@@ -5,13 +5,14 @@
# Options for providing an object configuration
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
resource_definition:
description:
@@ -30,4 +31,4 @@ class ModuleDocFragment(object):
I(resource_definition). See Examples below.
- Mutually exclusive with I(template) in case of M(k8s) module.
type: path
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_scale_options.py b/plugins/doc_fragments/k8s_scale_options.py
index 8b10dcefb6..ca0605fdf3 100644
--- a/plugins/doc_fragments/k8s_scale_options.py
+++ b/plugins/doc_fragments/k8s_scale_options.py
@@ -5,13 +5,14 @@
# Options used by scale modules.
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
replicas:
description:
@@ -46,4 +47,4 @@ class ModuleDocFragment(object):
default: 5
type: int
version_added: 2.0.0
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_state_options.py b/plugins/doc_fragments/k8s_state_options.py
index 8f741ba8fa..0333186676 100644
--- a/plugins/doc_fragments/k8s_state_options.py
+++ b/plugins/doc_fragments/k8s_state_options.py
@@ -5,13 +5,14 @@
# Options for specifying object state
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
state:
description:
@@ -27,4 +28,4 @@ class ModuleDocFragment(object):
- If set to C(yes), and I(state) is C(present), an existing object will be replaced.
type: bool
default: no
-'''
+"""
diff --git a/plugins/doc_fragments/k8s_wait_options.py b/plugins/doc_fragments/k8s_wait_options.py
index 06600564c3..e498e3ac6f 100644
--- a/plugins/doc_fragments/k8s_wait_options.py
+++ b/plugins/doc_fragments/k8s_wait_options.py
@@ -5,13 +5,14 @@
# Options for specifying object wait
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
wait:
description:
@@ -64,4 +65,4 @@ class ModuleDocFragment(object):
- The possible reasons in a condition are specific to each resource type in Kubernetes.
- See the API documentation of the status field for a given resource to see possible choices.
type: dict
-'''
+"""
diff --git a/plugins/filter/k8s.py b/plugins/filter/k8s.py
index 3674d30d9f..f5e0170e53 100644
--- a/plugins/filter/k8s.py
+++ b/plugins/filter/k8s.py
@@ -2,12 +2,15 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
from ansible.errors import AnsibleFilterError
-from ansible_collections.kubernetes.core.plugins.module_utils.hashes import generate_hash
+from ansible_collections.kubernetes.core.plugins.module_utils.hashes import (
+ generate_hash,
+)
def k8s_config_resource_name(resource):
@@ -15,15 +18,14 @@ def k8s_config_resource_name(resource):
Generate resource name for the given resource of type ConfigMap, Secret
"""
try:
- return resource['metadata']['name'] + '-' + generate_hash(resource)
+ return resource["metadata"]["name"] + "-" + generate_hash(resource)
except KeyError:
- raise AnsibleFilterError("resource must have a metadata.name key to generate a resource name")
+ raise AnsibleFilterError(
+ "resource must have a metadata.name key to generate a resource name"
+ )
# ---- Ansible filters ----
class FilterModule(object):
-
def filters(self):
- return {
- 'k8s_config_resource_name': k8s_config_resource_name
- }
+ return {"k8s_config_resource_name": k8s_config_resource_name}
diff --git a/plugins/inventory/k8s.py b/plugins/inventory/k8s.py
index 8af31cebda..f71e85825e 100644
--- a/plugins/inventory/k8s.py
+++ b/plugins/inventory/k8s.py
@@ -1,10 +1,11 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = """
name: k8s
plugin_type: inventory
author:
@@ -89,9 +90,9 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = """
# File must be named k8s.yaml or k8s.yml
# Authenticate with token, and return all pods and services for all namespaces
@@ -112,12 +113,17 @@
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
-'''
+"""
import json
from ansible.errors import AnsibleError
-from ansible_collections.kubernetes.core.plugins.module_utils.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER, k8s_import_exception, get_api_client
+from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ K8sAnsibleMixin,
+ HAS_K8S_MODULE_HELPER,
+ k8s_import_exception,
+ get_api_client,
+)
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
try:
@@ -128,13 +134,13 @@
def format_dynamic_api_exc(exc):
if exc.body:
- if exc.headers and exc.headers.get('Content-Type') == 'application/json':
- message = json.loads(exc.body).get('message')
+ if exc.headers and exc.headers.get("Content-Type") == "application/json":
+ message = json.loads(exc.body).get("message")
if message:
return message
return exc.body
else:
- return '%s Reason: %s' % (exc.status, exc.reason)
+ return "%s Reason: %s" % (exc.status, exc.reason)
class K8sInventoryException(Exception):
@@ -142,10 +148,10 @@ class K8sInventoryException(Exception):
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleMixin):
- NAME = 'kubernetes.core.k8s'
+ NAME = "kubernetes.core.k8s"
- connection_plugin = 'kubernetes.core.kubectl'
- transport = 'kubectl'
+ connection_plugin = "kubernetes.core.kubectl"
+ transport = "kubectl"
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
@@ -154,11 +160,13 @@ def parse(self, inventory, loader, path, cache=True):
self.setup(config_data, cache, cache_key)
def setup(self, config_data, cache, cache_key):
- connections = config_data.get('connections')
+ connections = config_data.get("connections")
if not HAS_K8S_MODULE_HELPER:
raise K8sInventoryException(
- "This module requires the Kubernetes Python client. Try `pip install kubernetes`. Detail: {0}".format(k8s_import_exception)
+ "This module requires the Kubernetes Python client. Try `pip install kubernetes`. Detail: {0}".format(
+ k8s_import_exception
+ )
)
source_data = None
@@ -179,11 +187,15 @@ def fetch_objects(self, connections):
for connection in connections:
if not isinstance(connection, dict):
- raise K8sInventoryException("Expecting connection to be a dictionary.")
+ raise K8sInventoryException(
+ "Expecting connection to be a dictionary."
+ )
client = get_api_client(**connection)
- name = connection.get('name', self.get_default_host_name(client.configuration.host))
- if connection.get('namespaces'):
- namespaces = connection['namespaces']
+ name = connection.get(
+ "name", self.get_default_host_name(client.configuration.host)
+ )
+ if connection.get("namespaces"):
+ namespaces = connection["namespaces"]
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
@@ -199,27 +211,36 @@ def fetch_objects(self, connections):
@staticmethod
def get_default_host_name(host):
- return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
+ return (
+ host.replace("https://", "")
+ .replace("http://", "")
+ .replace(".", "-")
+ .replace(":", "_")
+ )
def get_available_namespaces(self, client):
- v1_namespace = client.resources.get(api_version='v1', kind='Namespace')
+ v1_namespace = client.resources.get(api_version="v1", kind="Namespace")
try:
obj = v1_namespace.get()
except DynamicApiError as exc:
self.display.debug(exc)
- raise K8sInventoryException('Error fetching Namespace list: %s' % format_dynamic_api_exc(exc))
+ raise K8sInventoryException(
+ "Error fetching Namespace list: %s" % format_dynamic_api_exc(exc)
+ )
return [namespace.metadata.name for namespace in obj.items]
def get_pods_for_namespace(self, client, name, namespace):
- v1_pod = client.resources.get(api_version='v1', kind='Pod')
+ v1_pod = client.resources.get(api_version="v1", kind="Pod")
try:
obj = v1_pod.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
- raise K8sInventoryException('Error fetching Pod list: %s' % format_dynamic_api_exc(exc))
+ raise K8sInventoryException(
+ "Error fetching Pod list: %s" % format_dynamic_api_exc(exc)
+ )
- namespace_group = 'namespace_{0}'.format(namespace)
- namespace_pods_group = '{0}_pods'.format(namespace_group)
+ namespace_group = "namespace_{0}".format(namespace)
+ namespace_pods_group = "{0}_pods".format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
@@ -230,12 +251,14 @@ def get_pods_for_namespace(self, client, name, namespace):
for pod in obj.items:
pod_name = pod.metadata.name
pod_groups = []
- pod_annotations = {} if not pod.metadata.annotations else dict(pod.metadata.annotations)
+ pod_annotations = (
+ {} if not pod.metadata.annotations else dict(pod.metadata.annotations)
+ )
if pod.metadata.labels:
# create a group for each label_value
for key, value in pod.metadata.labels:
- group_name = 'label_{0}_{1}'.format(key, value)
+ group_name = "label_{0}_{1}".format(key, value)
if group_name not in pod_groups:
pod_groups.append(group_name)
self.inventory.add_group(group_name)
@@ -248,7 +271,7 @@ def get_pods_for_namespace(self, client, name, namespace):
for container in pod.status.containerStatuses:
# add each pod_container to the namespace group, and to each label_value group
- container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
+ container_name = "{0}_{1}".format(pod.metadata.name, container.name)
self.inventory.add_host(container_name)
self.inventory.add_child(namespace_pods_group, container_name)
if pod_groups:
@@ -256,46 +279,85 @@ def get_pods_for_namespace(self, client, name, namespace):
self.inventory.add_child(group, container_name)
# Add hostvars
- self.inventory.set_variable(container_name, 'object_type', 'pod')
- self.inventory.set_variable(container_name, 'labels', pod_labels)
- self.inventory.set_variable(container_name, 'annotations', pod_annotations)
- self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName)
- self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName)
- self.inventory.set_variable(container_name, 'pod_name', pod.spec.name)
- self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP)
- self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
- self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP)
- self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink)
- self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion)
- self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
- self.inventory.set_variable(container_name, 'container_name', container.image)
- self.inventory.set_variable(container_name, 'container_image', container.image)
+ self.inventory.set_variable(container_name, "object_type", "pod")
+ self.inventory.set_variable(container_name, "labels", pod_labels)
+ self.inventory.set_variable(
+ container_name, "annotations", pod_annotations
+ )
+ self.inventory.set_variable(
+ container_name, "cluster_name", pod.metadata.clusterName
+ )
+ self.inventory.set_variable(
+ container_name, "pod_node_name", pod.spec.nodeName
+ )
+ self.inventory.set_variable(container_name, "pod_name", pod.spec.name)
+ self.inventory.set_variable(
+ container_name, "pod_host_ip", pod.status.hostIP
+ )
+ self.inventory.set_variable(
+ container_name, "pod_phase", pod.status.phase
+ )
+ self.inventory.set_variable(container_name, "pod_ip", pod.status.podIP)
+ self.inventory.set_variable(
+ container_name, "pod_self_link", pod.metadata.selfLink
+ )
+ self.inventory.set_variable(
+ container_name, "pod_resource_version", pod.metadata.resourceVersion
+ )
+ self.inventory.set_variable(container_name, "pod_uid", pod.metadata.uid)
+ self.inventory.set_variable(
+ container_name, "container_name", container.image
+ )
+ self.inventory.set_variable(
+ container_name, "container_image", container.image
+ )
if container.state.running:
- self.inventory.set_variable(container_name, 'container_state', 'Running')
+ self.inventory.set_variable(
+ container_name, "container_state", "Running"
+ )
if container.state.terminated:
- self.inventory.set_variable(container_name, 'container_state', 'Terminated')
+ self.inventory.set_variable(
+ container_name, "container_state", "Terminated"
+ )
if container.state.waiting:
- self.inventory.set_variable(container_name, 'container_state', 'Waiting')
- self.inventory.set_variable(container_name, 'container_ready', container.ready)
- self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/')
- self.inventory.set_variable(container_name, 'ansible_connection', self.connection_plugin)
- self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
- pod_name)
- self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
- container.name)
- self.inventory.set_variable(container_name, 'ansible_{0}_namespace'.format(self.transport),
- namespace)
+ self.inventory.set_variable(
+ container_name, "container_state", "Waiting"
+ )
+ self.inventory.set_variable(
+ container_name, "container_ready", container.ready
+ )
+ self.inventory.set_variable(
+ container_name, "ansible_remote_tmp", "/tmp/"
+ )
+ self.inventory.set_variable(
+ container_name, "ansible_connection", self.connection_plugin
+ )
+ self.inventory.set_variable(
+ container_name, "ansible_{0}_pod".format(self.transport), pod_name
+ )
+ self.inventory.set_variable(
+ container_name,
+ "ansible_{0}_container".format(self.transport),
+ container.name,
+ )
+ self.inventory.set_variable(
+ container_name,
+ "ansible_{0}_namespace".format(self.transport),
+ namespace,
+ )
def get_services_for_namespace(self, client, name, namespace):
- v1_service = client.resources.get(api_version='v1', kind='Service')
+ v1_service = client.resources.get(api_version="v1", kind="Service")
try:
obj = v1_service.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
- raise K8sInventoryException('Error fetching Service list: %s' % format_dynamic_api_exc(exc))
+ raise K8sInventoryException(
+ "Error fetching Service list: %s" % format_dynamic_api_exc(exc)
+ )
- namespace_group = 'namespace_{0}'.format(namespace)
- namespace_services_group = '{0}_services'.format(namespace_group)
+ namespace_group = "namespace_{0}".format(namespace)
+ namespace_services_group = "{0}_services".format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
@@ -305,15 +367,21 @@ def get_services_for_namespace(self, client, name, namespace):
for service in obj.items:
service_name = service.metadata.name
- service_labels = {} if not service.metadata.labels else dict(service.metadata.labels)
- service_annotations = {} if not service.metadata.annotations else dict(service.metadata.annotations)
+ service_labels = (
+ {} if not service.metadata.labels else dict(service.metadata.labels)
+ )
+ service_annotations = (
+ {}
+ if not service.metadata.annotations
+ else dict(service.metadata.annotations)
+ )
self.inventory.add_host(service_name)
if service.metadata.labels:
# create a group for each label_value
for key, value in service.metadata.labels:
- group_name = 'label_{0}_{1}'.format(key, value)
+ group_name = "label_{0}_{1}".format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, service_name)
@@ -322,42 +390,75 @@ def get_services_for_namespace(self, client, name, namespace):
except AnsibleError:
raise
- ports = [{'name': port.name,
- 'port': port.port,
- 'protocol': port.protocol,
- 'targetPort': port.targetPort,
- 'nodePort': port.nodePort} for port in service.spec.ports or []]
+ ports = [
+ {
+ "name": port.name,
+ "port": port.port,
+ "protocol": port.protocol,
+ "targetPort": port.targetPort,
+ "nodePort": port.nodePort,
+ }
+ for port in service.spec.ports or []
+ ]
# add hostvars
- self.inventory.set_variable(service_name, 'object_type', 'service')
- self.inventory.set_variable(service_name, 'labels', service_labels)
- self.inventory.set_variable(service_name, 'annotations', service_annotations)
- self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName)
- self.inventory.set_variable(service_name, 'ports', ports)
- self.inventory.set_variable(service_name, 'type', service.spec.type)
- self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink)
- self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion)
- self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
+ self.inventory.set_variable(service_name, "object_type", "service")
+ self.inventory.set_variable(service_name, "labels", service_labels)
+ self.inventory.set_variable(
+ service_name, "annotations", service_annotations
+ )
+ self.inventory.set_variable(
+ service_name, "cluster_name", service.metadata.clusterName
+ )
+ self.inventory.set_variable(service_name, "ports", ports)
+ self.inventory.set_variable(service_name, "type", service.spec.type)
+ self.inventory.set_variable(
+ service_name, "self_link", service.metadata.selfLink
+ )
+ self.inventory.set_variable(
+ service_name, "resource_version", service.metadata.resourceVersion
+ )
+ self.inventory.set_variable(service_name, "uid", service.metadata.uid)
if service.spec.externalTrafficPolicy:
- self.inventory.set_variable(service_name, 'external_traffic_policy',
- service.spec.externalTrafficPolicy)
+ self.inventory.set_variable(
+ service_name,
+ "external_traffic_policy",
+ service.spec.externalTrafficPolicy,
+ )
if service.spec.externalIPs:
- self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs)
+ self.inventory.set_variable(
+ service_name, "external_ips", service.spec.externalIPs
+ )
if service.spec.externalName:
- self.inventory.set_variable(service_name, 'external_name', service.spec.externalName)
+ self.inventory.set_variable(
+ service_name, "external_name", service.spec.externalName
+ )
if service.spec.healthCheckNodePort:
- self.inventory.set_variable(service_name, 'health_check_node_port',
- service.spec.healthCheckNodePort)
+ self.inventory.set_variable(
+ service_name,
+ "health_check_node_port",
+ service.spec.healthCheckNodePort,
+ )
if service.spec.loadBalancerIP:
- self.inventory.set_variable(service_name, 'load_balancer_ip',
- service.spec.loadBalancerIP)
+ self.inventory.set_variable(
+ service_name, "load_balancer_ip", service.spec.loadBalancerIP
+ )
if service.spec.selector:
- self.inventory.set_variable(service_name, 'selector', dict(service.spec.selector))
-
- if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress:
- load_balancer = [{'hostname': ingress.hostname,
- 'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress]
- self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
+ self.inventory.set_variable(
+ service_name, "selector", dict(service.spec.selector)
+ )
+
+ if (
+ hasattr(service.status.loadBalancer, "ingress")
+ and service.status.loadBalancer.ingress
+ ):
+ load_balancer = [
+ {"hostname": ingress.hostname, "ip": ingress.ip}
+ for ingress in service.status.loadBalancer.ingress
+ ]
+ self.inventory.set_variable(
+ service_name, "load_balancer", load_balancer
+ )
diff --git a/plugins/lookup/k8s.py b/plugins/lookup/k8s.py
index 7a0a819b02..b8a9047a0d 100644
--- a/plugins/lookup/k8s.py
+++ b/plugins/lookup/k8s.py
@@ -3,11 +3,11 @@
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = """
lookup: k8s
short_description: Query the K8s API
@@ -117,7 +117,7 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
EXAMPLES = """
- name: Fetch a list of namespaces
@@ -187,11 +187,15 @@
from ansible.module_utils.common._collections_compat import KeysView
from ansible.plugins.lookup import LookupBase
-from ansible_collections.kubernetes.core.plugins.module_utils.common import K8sAnsibleMixin, get_api_client
+from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ K8sAnsibleMixin,
+ get_api_client,
+)
try:
from kubernetes.dynamic.exceptions import NotFoundError
+
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
@@ -200,12 +204,13 @@
class KubernetesLookup(K8sAnsibleMixin):
-
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
- "Requires the Kubernetes Python client. Try `pip install kubernetes`. Detail: {0}".format(k8s_import_exception)
+ "Requires the Kubernetes Python client. Try `pip install kubernetes`. Detail: {0}".format(
+ k8s_import_exception
+ )
)
self.kind = None
@@ -226,31 +231,33 @@ def run(self, terms, variables=None, **kwargs):
self.params = kwargs
self.client = get_api_client(**kwargs)
- cluster_info = kwargs.get('cluster_info')
- if cluster_info == 'version':
+ cluster_info = kwargs.get("cluster_info")
+ if cluster_info == "version":
return [self.client.version]
- if cluster_info == 'api_groups':
+ if cluster_info == "api_groups":
if isinstance(self.client.resources.api_groups, KeysView):
return [list(self.client.resources.api_groups)]
return [self.client.resources.api_groups]
- self.kind = kwargs.get('kind')
- self.name = kwargs.get('resource_name')
- self.namespace = kwargs.get('namespace')
- self.api_version = kwargs.get('api_version', 'v1')
- self.label_selector = kwargs.get('label_selector')
- self.field_selector = kwargs.get('field_selector')
- self.include_uninitialized = kwargs.get('include_uninitialized', False)
+ self.kind = kwargs.get("kind")
+ self.name = kwargs.get("resource_name")
+ self.namespace = kwargs.get("namespace")
+ self.api_version = kwargs.get("api_version", "v1")
+ self.label_selector = kwargs.get("label_selector")
+ self.field_selector = kwargs.get("field_selector")
+ self.include_uninitialized = kwargs.get("include_uninitialized", False)
- resource_definition = kwargs.get('resource_definition')
- src = kwargs.get('src')
+ resource_definition = kwargs.get("resource_definition")
+ src = kwargs.get("src")
if src:
resource_definition = self.load_resource_definitions(src)[0]
if resource_definition:
- self.kind = resource_definition.get('kind', self.kind)
- self.api_version = resource_definition.get('apiVersion', self.api_version)
- self.name = resource_definition.get('metadata', {}).get('name', self.name)
- self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
+ self.kind = resource_definition.get("kind", self.kind)
+ self.api_version = resource_definition.get("apiVersion", self.api_version)
+ self.name = resource_definition.get("metadata", {}).get("name", self.name)
+ self.namespace = resource_definition.get("metadata", {}).get(
+ "namespace", self.namespace
+ )
if not self.kind:
raise AnsibleError(
@@ -260,17 +267,21 @@ def run(self, terms, variables=None, **kwargs):
resource = self.find_resource(self.kind, self.api_version, fail=True)
try:
- k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
+ k8s_obj = resource.get(
+ name=self.name,
+ namespace=self.namespace,
+ label_selector=self.label_selector,
+ field_selector=self.field_selector,
+ )
except NotFoundError:
return []
if self.name:
return [k8s_obj.to_dict()]
- return k8s_obj.to_dict().get('items')
+ return k8s_obj.to_dict().get("items")
class LookupModule(LookupBase):
-
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)
diff --git a/plugins/lookup/kustomize.py b/plugins/lookup/kustomize.py
index c23db145f6..2500fb245c 100644
--- a/plugins/lookup/kustomize.py
+++ b/plugins/lookup/kustomize.py
@@ -3,7 +3,7 @@
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = """
lookup: kustomize
short_description: Build a set of kubernetes resources using a 'kustomization.yaml' file.
@@ -33,7 +33,7 @@
requirements:
- "python >= 3.6"
-'''
+"""
EXAMPLES = """
- name: Run lookup using kustomize
@@ -91,7 +91,7 @@ def get_binary_from_path(name, opt_dirs=None):
if opt_dirs is not None:
if not isinstance(opt_dirs, list):
opt_dirs = [opt_dirs]
- opt_arg['opt_dirs'] = opt_dirs
+ opt_arg["opt_dirs"] = opt_dirs
bin_path = get_bin_path(name, **opt_arg)
return bin_path
except ValueError:
@@ -104,30 +104,41 @@ def run_command(command):
class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, dir=".", binary_path=None, opt_dirs=None, **kwargs):
+ def run(
+ self, terms, variables=None, dir=".", binary_path=None, opt_dirs=None, **kwargs
+ ):
executable_path = binary_path
if executable_path is None:
executable_path = get_binary_from_path(name="kustomize", opt_dirs=opt_dirs)
if executable_path is None:
- executable_path = get_binary_from_path(name="kubectl", opt_dirs=opt_dirs)
+ executable_path = get_binary_from_path(
+ name="kubectl", opt_dirs=opt_dirs
+ )
# validate that at least one tool was found
if executable_path is None:
- raise AnsibleLookupError("Failed to find required executable 'kubectl' and 'kustomize' in paths")
+ raise AnsibleLookupError(
+ "Failed to find required executable 'kubectl' and 'kustomize' in paths"
+ )
# check input directory
kustomization_dir = dir
command = [executable_path]
- if executable_path.endswith('kustomize'):
- command += ['build', kustomization_dir]
- elif executable_path.endswith('kubectl'):
- command += ['kustomize', kustomization_dir]
+ if executable_path.endswith("kustomize"):
+ command += ["build", kustomization_dir]
+ elif executable_path.endswith("kubectl"):
+ command += ["kustomize", kustomization_dir]
else:
- raise AnsibleLookupError("unexpected tool provided as parameter {0}, expected one of kustomize, kubectl.".format(executable_path))
+ raise AnsibleLookupError(
+ "unexpected tool provided as parameter {0}, expected one of kustomize, kubectl.".format(
+ executable_path
+ )
+ )
(out, err) = run_command(command)
if err:
- raise AnsibleLookupError("kustomize command failed with: {0}".format(err.decode("utf-8")))
- return [out.decode('utf-8')]
+ raise AnsibleLookupError(
+ "kustomize command failed with: {0}".format(err.decode("utf-8"))
+ )
+ return [out.decode("utf-8")]
diff --git a/plugins/module_utils/ansiblemodule.py b/plugins/module_utils/ansiblemodule.py
index a983cad9f2..8b17866dcc 100644
--- a/plugins/module_utils/ansiblemodule.py
+++ b/plugins/module_utils/ansiblemodule.py
@@ -1,4 +1,4 @@
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
@@ -17,6 +17,7 @@
from ansible_collections.cloud.common.plugins.module_utils.turbo.module import (
AnsibleTurboModule as AnsibleModule,
) # noqa: F401
+
AnsibleModule.collection_name = "kubernetes.core"
except ImportError:
from ansible.module_utils.basic import AnsibleModule # noqa: F401
diff --git a/plugins/module_utils/apply.py b/plugins/module_utils/apply.py
index 034859cdec..a0ed6a4ec7 100644
--- a/plugins/module_utils/apply.py
+++ b/plugins/module_utils/apply.py
@@ -14,13 +14,16 @@
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
from collections import OrderedDict
import json
from ansible.module_utils.common.dict_transformations import dict_merge
-from ansible_collections.kubernetes.core.plugins.module_utils.exceptions import ApplyException
+from ansible_collections.kubernetes.core.plugins.module_utils.exceptions import (
+ ApplyException,
+)
try:
from kubernetes.dynamic.exceptions import NotFoundError
@@ -28,50 +31,52 @@
pass
-LAST_APPLIED_CONFIG_ANNOTATION = 'kubectl.kubernetes.io/last-applied-configuration'
+LAST_APPLIED_CONFIG_ANNOTATION = "kubectl.kubernetes.io/last-applied-configuration"
POD_SPEC_SUFFIXES = {
- 'containers': 'name',
- 'initContainers': 'name',
- 'ephemeralContainers': 'name',
- 'volumes': 'name',
- 'imagePullSecrets': 'name',
- 'containers.volumeMounts': 'mountPath',
- 'containers.volumeDevices': 'devicePath',
- 'containers.env': 'name',
- 'containers.ports': 'containerPort',
- 'initContainers.volumeMounts': 'mountPath',
- 'initContainers.volumeDevices': 'devicePath',
- 'initContainers.env': 'name',
- 'initContainers.ports': 'containerPort',
- 'ephemeralContainers.volumeMounts': 'mountPath',
- 'ephemeralContainers.volumeDevices': 'devicePath',
- 'ephemeralContainers.env': 'name',
- 'ephemeralContainers.ports': 'containerPort',
+ "containers": "name",
+ "initContainers": "name",
+ "ephemeralContainers": "name",
+ "volumes": "name",
+ "imagePullSecrets": "name",
+ "containers.volumeMounts": "mountPath",
+ "containers.volumeDevices": "devicePath",
+ "containers.env": "name",
+ "containers.ports": "containerPort",
+ "initContainers.volumeMounts": "mountPath",
+ "initContainers.volumeDevices": "devicePath",
+ "initContainers.env": "name",
+ "initContainers.ports": "containerPort",
+ "ephemeralContainers.volumeMounts": "mountPath",
+ "ephemeralContainers.volumeDevices": "devicePath",
+ "ephemeralContainers.env": "name",
+ "ephemeralContainers.ports": "containerPort",
}
POD_SPEC_PREFIXES = [
- 'Pod.spec',
- 'Deployment.spec.template.spec',
- 'DaemonSet.spec.template.spec',
- 'StatefulSet.spec.template.spec',
- 'Job.spec.template.spec',
- 'Cronjob.spec.jobTemplate.spec.template.spec',
+ "Pod.spec",
+ "Deployment.spec.template.spec",
+ "DaemonSet.spec.template.spec",
+ "StatefulSet.spec.template.spec",
+ "Job.spec.template.spec",
+ "Cronjob.spec.jobTemplate.spec.template.spec",
]
# patch merge keys taken from generated.proto files under
# staging/src/k8s.io/api in kubernetes/kubernetes
STRATEGIC_MERGE_PATCH_KEYS = {
- 'Service.spec.ports': 'port',
- 'ServiceAccount.secrets': 'name',
- 'ValidatingWebhookConfiguration.webhooks': 'name',
- 'MutatingWebhookConfiguration.webhooks': 'name',
+ "Service.spec.ports": "port",
+ "ServiceAccount.secrets": "name",
+ "ValidatingWebhookConfiguration.webhooks": "name",
+ "MutatingWebhookConfiguration.webhooks": "name",
}
STRATEGIC_MERGE_PATCH_KEYS.update(
- {"%s.%s" % (prefix, key): value
- for prefix in POD_SPEC_PREFIXES
- for key, value in POD_SPEC_SUFFIXES.items()}
+ {
+ "%s.%s" % (prefix, key): value
+ for prefix in POD_SPEC_PREFIXES
+ for key, value in POD_SPEC_SUFFIXES.items()
+ }
)
@@ -79,21 +84,28 @@ def annotate(desired):
return dict(
metadata=dict(
annotations={
- LAST_APPLIED_CONFIG_ANNOTATION: json.dumps(desired, separators=(',', ':'), indent=None, sort_keys=True)
+ LAST_APPLIED_CONFIG_ANNOTATION: json.dumps(
+ desired, separators=(",", ":"), indent=None, sort_keys=True
+ )
}
)
)
def apply_patch(actual, desired):
- last_applied = actual['metadata'].get('annotations', {}).get(LAST_APPLIED_CONFIG_ANNOTATION)
+ last_applied = (
+ actual["metadata"].get("annotations", {}).get(LAST_APPLIED_CONFIG_ANNOTATION)
+ )
if last_applied:
# ensure that last_applied doesn't come back as a dict of unicode key/value pairs
# json.loads can be used if we stop supporting python 2
last_applied = json.loads(last_applied)
- patch = merge(dict_merge(last_applied, annotate(last_applied)),
- dict_merge(desired, annotate(desired)), actual)
+ patch = merge(
+ dict_merge(last_applied, annotate(last_applied)),
+ dict_merge(desired, annotate(desired)),
+ actual,
+ )
if patch:
return actual, patch
else:
@@ -104,7 +116,10 @@ def apply_patch(actual, desired):
def apply_object(resource, definition):
try:
- actual = resource.get(name=definition['metadata']['name'], namespace=definition['metadata'].get('namespace'))
+ actual = resource.get(
+ name=definition["metadata"]["name"],
+ namespace=definition["metadata"].get("namespace"),
+ )
except NotFoundError:
return None, dict_merge(definition, annotate(definition))
return apply_patch(actual.to_dict(), definition)
@@ -113,14 +128,21 @@ def apply_object(resource, definition):
def k8s_apply(resource, definition, **kwargs):
existing, desired = apply_object(resource, definition)
if not existing:
- return resource.create(body=desired, namespace=definition['metadata'].get('namespace'), **kwargs)
+ return resource.create(
+ body=desired, namespace=definition["metadata"].get("namespace"), **kwargs
+ )
if existing == desired:
- return resource.get(name=definition['metadata']['name'], namespace=definition['metadata'].get('namespace'))
- return resource.patch(body=desired,
- name=definition['metadata']['name'],
- namespace=definition['metadata'].get('namespace'),
- content_type='application/merge-patch+json',
- **kwargs)
+ return resource.get(
+ name=definition["metadata"]["name"],
+ namespace=definition["metadata"].get("namespace"),
+ )
+ return resource.patch(
+ body=desired,
+ name=definition["metadata"]["name"],
+ namespace=definition["metadata"].get("namespace"),
+ content_type="application/merge-patch+json",
+ **kwargs
+ )
# The patch is the difference from actual to desired without deletions, plus deletions
@@ -129,7 +151,7 @@ def k8s_apply(resource, definition, **kwargs):
# deletions, and then apply delta to deletions as a patch, which should be strictly additive.
def merge(last_applied, desired, actual, position=None):
deletions = get_deletions(last_applied, desired)
- delta = get_delta(last_applied, actual, desired, position or desired['kind'])
+ delta = get_delta(last_applied, actual, desired, position or desired["kind"])
return dict_merge(deletions, delta)
@@ -139,7 +161,9 @@ def list_to_dict(lst, key, position):
try:
result[item[key]] = item
except KeyError:
- raise ApplyException("Expected key '%s' not found in position %s" % (key, position))
+ raise ApplyException(
+ "Expected key '%s' not found in position %s" % (key, position)
+ )
return result
@@ -158,7 +182,12 @@ def list_merge(last_applied, actual, desired, position):
if key not in actual_dict or key not in last_applied_dict:
result.append(desired_dict[key])
else:
- patch = merge(last_applied_dict[key], desired_dict[key], actual_dict[key], position)
+ patch = merge(
+ last_applied_dict[key],
+ desired_dict[key],
+ actual_dict[key],
+ position,
+ )
result.append(dict_merge(actual_dict[key], patch))
for key in actual_dict:
if key not in desired_dict and key not in last_applied_dict:
@@ -198,11 +227,11 @@ def recursive_list_diff(list1, list2, position=None):
def recursive_diff(dict1, dict2, position=None):
if not position:
- if 'kind' in dict1 and dict1.get('kind') == dict2.get('kind'):
- position = dict1['kind']
+ if "kind" in dict1 and dict1.get("kind") == dict2.get("kind"):
+ position = dict1["kind"]
left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
- for k in (set(dict1.keys()) & set(dict2.keys())):
+ for k in set(dict1.keys()) & set(dict2.keys()):
if position:
this_position = "%s.%s" % (position, k)
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
@@ -247,11 +276,15 @@ def get_delta(last_applied, actual, desired, position=None):
if actual_value is None:
patch[k] = desired_value
elif isinstance(desired_value, dict):
- p = get_delta(last_applied.get(k, {}), actual_value, desired_value, this_position)
+ p = get_delta(
+ last_applied.get(k, {}), actual_value, desired_value, this_position
+ )
if p:
patch[k] = p
elif isinstance(desired_value, list):
- p = list_merge(last_applied.get(k, []), actual_value, desired_value, this_position)
+ p = list_merge(
+ last_applied.get(k, []), actual_value, desired_value, this_position
+ )
if p:
patch[k] = [item for item in p if item is not None]
elif actual_value != desired_value:
diff --git a/plugins/module_utils/args_common.py b/plugins/module_utils/args_common.py
index b8bc0bcc12..0ed06ef781 100644
--- a/plugins/module_utils/args_common.py
+++ b/plugins/module_utils/args_common.py
@@ -1,4 +1,4 @@
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
from ansible.module_utils.six import string_types
@@ -12,133 +12,83 @@ def list_dict_str(value):
AUTH_PROXY_HEADERS_SPEC = dict(
- proxy_basic_auth=dict(type='str', no_log=True),
- basic_auth=dict(type='str', no_log=True),
- user_agent=dict(type='str')
+ proxy_basic_auth=dict(type="str", no_log=True),
+ basic_auth=dict(type="str", no_log=True),
+ user_agent=dict(type="str"),
)
AUTH_ARG_SPEC = {
- 'kubeconfig': {
- 'type': 'raw',
- },
- 'context': {},
- 'host': {},
- 'api_key': {
- 'no_log': True,
- },
- 'username': {},
- 'password': {
- 'no_log': True,
- },
- 'validate_certs': {
- 'type': 'bool',
- 'aliases': ['verify_ssl'],
- },
- 'ca_cert': {
- 'type': 'path',
- 'aliases': ['ssl_ca_cert'],
- },
- 'client_cert': {
- 'type': 'path',
- 'aliases': ['cert_file'],
- },
- 'client_key': {
- 'type': 'path',
- 'aliases': ['key_file'],
- },
- 'proxy': {
- 'type': 'str',
- },
- 'proxy_headers': {
- 'type': 'dict',
- 'options': AUTH_PROXY_HEADERS_SPEC
- },
- 'persist_config': {
- 'type': 'bool',
- },
+ "kubeconfig": {"type": "raw"},
+ "context": {},
+ "host": {},
+ "api_key": {"no_log": True},
+ "username": {},
+ "password": {"no_log": True},
+ "validate_certs": {"type": "bool", "aliases": ["verify_ssl"]},
+ "ca_cert": {"type": "path", "aliases": ["ssl_ca_cert"]},
+ "client_cert": {"type": "path", "aliases": ["cert_file"]},
+ "client_key": {"type": "path", "aliases": ["key_file"]},
+ "proxy": {"type": "str"},
+ "proxy_headers": {"type": "dict", "options": AUTH_PROXY_HEADERS_SPEC},
+ "persist_config": {"type": "bool"},
}
WAIT_ARG_SPEC = dict(
- wait=dict(type='bool', default=False),
- wait_sleep=dict(type='int', default=5),
- wait_timeout=dict(type='int', default=120),
+ wait=dict(type="bool", default=False),
+ wait_sleep=dict(type="int", default=5),
+ wait_timeout=dict(type="int", default=120),
wait_condition=dict(
- type='dict',
+ type="dict",
default=None,
options=dict(
type=dict(),
status=dict(default=True, choices=[True, False, "Unknown"]),
- reason=dict()
- )
- )
+ reason=dict(),
+ ),
+ ),
)
# Map kubernetes-client parameters to ansible parameters
AUTH_ARG_MAP = {
- 'kubeconfig': 'kubeconfig',
- 'context': 'context',
- 'host': 'host',
- 'api_key': 'api_key',
- 'username': 'username',
- 'password': 'password',
- 'verify_ssl': 'validate_certs',
- 'ssl_ca_cert': 'ca_cert',
- 'cert_file': 'client_cert',
- 'key_file': 'client_key',
- 'proxy': 'proxy',
- 'proxy_headers': 'proxy_headers',
- 'persist_config': 'persist_config',
+ "kubeconfig": "kubeconfig",
+ "context": "context",
+ "host": "host",
+ "api_key": "api_key",
+ "username": "username",
+ "password": "password",
+ "verify_ssl": "validate_certs",
+ "ssl_ca_cert": "ca_cert",
+ "cert_file": "client_cert",
+ "key_file": "client_key",
+ "proxy": "proxy",
+ "proxy_headers": "proxy_headers",
+ "persist_config": "persist_config",
}
NAME_ARG_SPEC = {
- 'kind': {},
- 'name': {},
- 'namespace': {},
- 'api_version': {
- 'default': 'v1',
- 'aliases': ['api', 'version'],
- },
+ "kind": {},
+ "name": {},
+ "namespace": {},
+ "api_version": {"default": "v1", "aliases": ["api", "version"]},
}
COMMON_ARG_SPEC = {
- 'state': {
- 'default': 'present',
- 'choices': ['present', 'absent'],
- },
- 'force': {
- 'type': 'bool',
- 'default': False,
- },
+ "state": {"default": "present", "choices": ["present", "absent"]},
+ "force": {"type": "bool", "default": False},
}
RESOURCE_ARG_SPEC = {
- 'resource_definition': {
- 'type': list_dict_str,
- 'aliases': ['definition', 'inline']
- },
- 'src': {
- 'type': 'path',
- },
+ "resource_definition": {"type": list_dict_str, "aliases": ["definition", "inline"]},
+ "src": {"type": "path"},
}
-ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
+ARG_ATTRIBUTES_BLACKLIST = ("property_path",)
DELETE_OPTS_ARG_SPEC = {
- 'propagationPolicy': {
- 'choices': ['Foreground', 'Background', 'Orphan'],
- },
- 'gracePeriodSeconds': {
- 'type': 'int',
+ "propagationPolicy": {"choices": ["Foreground", "Background", "Orphan"]},
+ "gracePeriodSeconds": {"type": "int"},
+ "preconditions": {
+ "type": "dict",
+ "options": {"resourceVersion": {"type": "str"}, "uid": {"type": "str"}},
},
- 'preconditions': {
- 'type': 'dict',
- 'options': {
- 'resourceVersion': {
- 'type': 'str',
- },
- 'uid': {
- 'type': 'str',
- }
- }
- }
}
diff --git a/plugins/module_utils/client/discovery.py b/plugins/module_utils/client/discovery.py
index ffefab4343..5d0b0f823e 100644
--- a/plugins/module_utils/client/discovery.py
+++ b/plugins/module_utils/client/discovery.py
@@ -23,17 +23,26 @@
import kubernetes.dynamic
import kubernetes.dynamic.discovery
from kubernetes import __version__
-from kubernetes.dynamic.exceptions import (ResourceNotFoundError, ResourceNotUniqueError,
- ServiceUnavailableError)
+from kubernetes.dynamic.exceptions import (
+ ResourceNotFoundError,
+ ResourceNotUniqueError,
+ ServiceUnavailableError,
+)
-from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import ResourceList
+from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import (
+ ResourceList,
+)
class Discoverer(kubernetes.dynamic.discovery.Discoverer):
def __init__(self, client, cache_file):
self.client = client
- default_cache_file_name = 'k8srcp-{0}.json'.format(hashlib.sha256(self.__get_default_cache_id()).hexdigest())
- self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cache_file_name)
+ default_cache_file_name = "k8srcp-{0}.json".format(
+ hashlib.sha256(self.__get_default_cache_id()).hexdigest()
+ )
+ self.__cache_file = cache_file or os.path.join(
+ tempfile.gettempdir(), default_cache_file_name
+ )
self.__init_cache()
def __get_default_cache_id(self):
@@ -42,21 +51,21 @@ def __get_default_cache_id(self):
cache_id = "{0}-{1}".format(self.client.configuration.host, user)
else:
cache_id = self.client.configuration.host
- return cache_id.encode('utf-8')
+ return cache_id.encode("utf-8")
def __get_user(self):
# This is intended to provide a portable method for getting a username.
# It could, and maybe should, be replaced by getpass.getuser() but, due
# to a lack of portability testing the original code is being left in
# place.
- if hasattr(os, 'getlogin'):
+ if hasattr(os, "getlogin"):
try:
user = os.getlogin()
if user:
return str(user)
except OSError:
pass
- if hasattr(os, 'getuid'):
+ if hasattr(os, "getuid"):
try:
user = os.getuid()
if user:
@@ -70,13 +79,13 @@ def __get_user(self):
def __init_cache(self, refresh=False):
if refresh or not os.path.exists(self.__cache_file):
- self._cache = {'library_version': __version__}
+ self._cache = {"library_version": __version__}
refresh = True
else:
try:
- with open(self.__cache_file, 'r') as f:
+ with open(self.__cache_file, "r") as f:
self._cache = json.load(f, cls=partial(CacheDecoder, self.client))
- if self._cache.get('library_version') != __version__:
+ if self._cache.get("library_version") != __version__:
# Version mismatch, need to refresh cache
self.invalidate_cache()
except Exception:
@@ -92,21 +101,25 @@ def get_resources_for_api_version(self, prefix, group, version, preferred):
resources = defaultdict(list)
subresources = defaultdict(dict)
- path = '/'.join(filter(None, [prefix, group, version]))
+ path = "/".join(filter(None, [prefix, group, version]))
try:
- resources_response = self.client.request('GET', path).resources or []
+ resources_response = self.client.request("GET", path).resources or []
except ServiceUnavailableError:
resources_response = []
- resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
- subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
+ resources_raw = list(
+ filter(lambda resource: "/" not in resource["name"], resources_response)
+ )
+ subresources_raw = list(
+ filter(lambda resource: "/" in resource["name"], resources_response)
+ )
for subresource in subresources_raw:
- resource, name = subresource['name'].split('/')
+ resource, name = subresource["name"].split("/")
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
- for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
+ for key in ("prefix", "group", "api_version", "client", "preferred"):
resource.pop(key, None)
resourceobj = kubernetes.dynamic.Resource(
@@ -115,19 +128,25 @@ def get_resources_for_api_version(self, prefix, group, version, preferred):
api_version=version,
client=self.client,
preferred=preferred,
- subresources=subresources.get(resource['name']),
+ subresources=subresources.get(resource["name"]),
**resource
)
- resources[resource['kind']].append(resourceobj)
+ resources[resource["kind"]].append(resourceobj)
resource_lookup = {
- 'prefix': prefix,
- 'group': group,
- 'api_version': version,
- 'kind': resourceobj.kind,
- 'name': resourceobj.name
+ "prefix": prefix,
+ "group": group,
+ "api_version": version,
+ "kind": resourceobj.kind,
+ "name": resourceobj.name,
}
- resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'], base_resource_lookup=resource_lookup)
+ resource_list = ResourceList(
+ self.client,
+ group=group,
+ api_version=version,
+ base_kind=resource["kind"],
+ base_resource_lookup=resource_lookup,
+ )
resources[resource_list.kind].append(resource_list)
return resources
@@ -139,23 +158,32 @@ def get(self, **kwargs):
"""
results = self.search(**kwargs)
# If there are multiple matches, prefer exact matches on api_version
- if len(results) > 1 and kwargs.get('api_version'):
+ if len(results) > 1 and kwargs.get("api_version"):
results = [
- result for result in results if result.group_version == kwargs['api_version']
+ result
+ for result in results
+ if result.group_version == kwargs["api_version"]
]
# If there are multiple matches, prefer non-List kinds
if len(results) > 1 and not all(isinstance(x, ResourceList) for x in results):
- results = [result for result in results if not isinstance(result, ResourceList)]
+ results = [
+ result for result in results if not isinstance(result, ResourceList)
+ ]
# if multiple resources are found that share a GVK, prefer the one with the most supported verbs
- if len(results) > 1 and len(set((x.group_version, x.kind) for x in results)) == 1:
+ if (
+ len(results) > 1
+ and len(set((x.group_version, x.kind) for x in results)) == 1
+ ):
if len(set(len(x.verbs) for x in results)) != 1:
results = [max(results, key=lambda x: len(x.verbs))]
if len(results) == 1:
return results[0]
elif not results:
- raise ResourceNotFoundError('No matches found for {0}'.format(kwargs))
+ raise ResourceNotFoundError("No matches found for {0}".format(kwargs))
else:
- raise ResourceNotUniqueError('Multiple matches found for {0}: {1}'.format(kwargs, results))
+ raise ResourceNotUniqueError(
+ "Multiple matches found for {0}: {1}".format(kwargs, results)
+ )
class LazyDiscoverer(Discoverer, kubernetes.dynamic.LazyDiscoverer):
@@ -174,13 +202,15 @@ def __init__(self, client, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
- if '_type' not in obj:
+ if "_type" not in obj:
return obj
- _type = obj.pop('_type')
- if _type == 'Resource':
+ _type = obj.pop("_type")
+ if _type == "Resource":
return kubernetes.dynamic.Resource(client=self.client, **obj)
- elif _type == 'ResourceList':
+ elif _type == "ResourceList":
return ResourceList(self.client, **obj)
- elif _type == 'ResourceGroup':
- return kubernetes.dynamic.discovery.ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources']))
+ elif _type == "ResourceGroup":
+ return kubernetes.dynamic.discovery.ResourceGroup(
+ obj["preferred"], resources=self.object_hook(obj["resources"])
+ )
return obj
diff --git a/plugins/module_utils/client/resource.py b/plugins/module_utils/client/resource.py
index 39d8a1cfc4..3c0d402aea 100644
--- a/plugins/module_utils/client/resource.py
+++ b/plugins/module_utils/client/resource.py
@@ -14,6 +14,7 @@
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
@@ -21,11 +22,19 @@
class ResourceList(kubernetes.dynamic.resource.ResourceList):
- def __init__(self, client, group='', api_version='v1', base_kind='', kind=None, base_resource_lookup=None):
+ def __init__(
+ self,
+ client,
+ group="",
+ api_version="v1",
+ base_kind="",
+ kind=None,
+ base_resource_lookup=None,
+ ):
self.client = client
self.group = group
self.api_version = api_version
- self.kind = kind or '{0}List'.format(base_kind)
+ self.kind = kind or "{0}List".format(base_kind)
self.base_kind = base_kind
self.base_resource_lookup = base_resource_lookup
self.__base_resource = None
@@ -34,16 +43,18 @@ def base_resource(self):
if self.__base_resource:
return self.__base_resource
elif self.base_resource_lookup:
- self.__base_resource = self.client.resources.get(**self.base_resource_lookup)
+ self.__base_resource = self.client.resources.get(
+ **self.base_resource_lookup
+ )
return self.__base_resource
return None
def to_dict(self):
return {
- '_type': 'ResourceList',
- 'group': self.group,
- 'api_version': self.api_version,
- 'kind': self.kind,
- 'base_kind': self.base_kind,
- 'base_resource_lookup': self.base_resource_lookup
+ "_type": "ResourceList",
+ "group": self.group,
+ "api_version": self.api_version,
+ "kind": self.kind,
+ "base_kind": self.base_kind,
+ "base_resource_lookup": self.base_resource_lookup,
}
diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py
index c3ca521721..d0a6b0f510 100644
--- a/plugins/module_utils/common.py
+++ b/plugins/module_utils/common.py
@@ -16,6 +16,7 @@
# along with Ansible. If not, see .
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
import base64
@@ -27,9 +28,17 @@
from datetime import datetime
from distutils.version import LooseVersion
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (AUTH_ARG_MAP, AUTH_ARG_SPEC, AUTH_PROXY_HEADERS_SPEC)
-from ansible_collections.kubernetes.core.plugins.module_utils.hashes import generate_hash
-from ansible_collections.kubernetes.core.plugins.module_utils.selector import LabelSelectorFilter
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_MAP,
+ AUTH_ARG_SPEC,
+ AUTH_PROXY_HEADERS_SPEC,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.hashes import (
+ generate_hash,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.selector import (
+ LabelSelectorFilter,
+)
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six import iteritems, string_types
@@ -41,10 +50,17 @@
try:
import kubernetes
from kubernetes.dynamic.exceptions import (
- NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, DynamicApiError,
- ConflictError, ForbiddenError, MethodNotAllowedError, BadRequestError,
- KubernetesValidateMissing
+ NotFoundError,
+ ResourceNotFoundError,
+ ResourceNotUniqueError,
+ DynamicApiError,
+ ConflictError,
+ ForbiddenError,
+ MethodNotAllowedError,
+ BadRequestError,
+ KubernetesValidateMissing,
)
+
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
@@ -54,8 +70,13 @@
IMP_K8S_CLIENT = None
try:
- from ansible_collections.kubernetes.core.plugins.module_utils import k8sdynamicclient
- from ansible_collections.kubernetes.core.plugins.module_utils.client.discovery import LazyDiscoverer
+ from ansible_collections.kubernetes.core.plugins.module_utils import (
+ k8sdynamicclient,
+ )
+ from ansible_collections.kubernetes.core.plugins.module_utils.client.discovery import (
+ LazyDiscoverer,
+ )
+
IMP_K8S_CLIENT = True
except ImportError as e:
IMP_K8S_CLIENT = False
@@ -65,6 +86,7 @@
YAML_IMP_ERR = None
try:
import yaml
+
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
@@ -72,24 +94,31 @@
HAS_K8S_APPLY = None
try:
- from ansible_collections.kubernetes.core.plugins.module_utils.apply import apply_object
+ from ansible_collections.kubernetes.core.plugins.module_utils.apply import (
+ apply_object,
+ )
+
HAS_K8S_APPLY = True
except ImportError:
HAS_K8S_APPLY = False
try:
import urllib3
+
urllib3.disable_warnings()
except ImportError:
pass
try:
- from ansible_collections.kubernetes.core.plugins.module_utils.apply import recursive_diff
+ from ansible_collections.kubernetes.core.plugins.module_utils.apply import (
+ recursive_diff,
+ )
except ImportError:
from ansible.module_utils.common.dict_transformations import recursive_diff
try:
from kubernetes.dynamic.resource import ResourceInstance
+
HAS_K8S_INSTANCE_HELPER = True
k8s_import_exception = None
except ImportError as e:
@@ -122,6 +151,7 @@ def _raise_or_fail(exc, msg):
if module:
module.fail_json(msg=msg % to_native(exc))
raise exc
+
# If authorization variables aren't defined, look for them in environment variables
for true_name, arg_name in AUTH_ARG_MAP.items():
if module and module.params.get(arg_name) is not None:
@@ -132,51 +162,64 @@ def _raise_or_fail(exc, msg):
# specific case for 'proxy_headers' which is a dictionary
proxy_headers = {}
for key in AUTH_PROXY_HEADERS_SPEC.keys():
- env_value = os.getenv('K8S_AUTH_PROXY_HEADERS_{0}'.format(key.upper()), None)
+ env_value = os.getenv(
+ "K8S_AUTH_PROXY_HEADERS_{0}".format(key.upper()), None
+ )
if env_value is not None:
- if AUTH_PROXY_HEADERS_SPEC[key].get('type') == 'bool':
- env_value = env_value.lower() not in ['0', 'false', 'no']
+ if AUTH_PROXY_HEADERS_SPEC[key].get("type") == "bool":
+ env_value = env_value.lower() not in ["0", "false", "no"]
proxy_headers[key] = env_value
if proxy_headers is not {}:
auth[true_name] = proxy_headers
else:
- env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
+ env_value = os.getenv(
+ "K8S_AUTH_{0}".format(arg_name.upper()), None
+ ) or os.getenv("K8S_AUTH_{0}".format(true_name.upper()), None)
if env_value is not None:
- if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
- env_value = env_value.lower() not in ['0', 'false', 'no']
+ if AUTH_ARG_SPEC[arg_name].get("type") == "bool":
+ env_value = env_value.lower() not in ["0", "false", "no"]
auth[true_name] = env_value
def auth_set(*names):
return all(auth.get(name) for name in names)
def _load_config():
- kubeconfig = auth.get('kubeconfig')
+ kubeconfig = auth.get("kubeconfig")
optional_arg = {
- 'context': auth.get('context'),
- 'persist_config': auth.get('persist_config'),
+ "context": auth.get("context"),
+ "persist_config": auth.get("persist_config"),
}
if kubeconfig:
if isinstance(kubeconfig, string_types):
- kubernetes.config.load_kube_config(config_file=kubeconfig, **optional_arg)
+ kubernetes.config.load_kube_config(
+ config_file=kubeconfig, **optional_arg
+ )
elif isinstance(kubeconfig, dict):
if LooseVersion(kubernetes.__version__) < LooseVersion("17.17"):
- _raise_or_fail(Exception("kubernetes >= 17.17.0 is required to use in-memory kubeconfig."), 'Failed to load kubeconfig due to: %s')
- kubernetes.config.load_kube_config_from_dict(config_dict=kubeconfig, **optional_arg)
+ _raise_or_fail(
+ Exception(
+ "kubernetes >= 17.17.0 is required to use in-memory kubeconfig."
+ ),
+ "Failed to load kubeconfig due to: %s",
+ )
+ kubernetes.config.load_kube_config_from_dict(
+ config_dict=kubeconfig, **optional_arg
+ )
else:
kubernetes.config.load_kube_config(config_file=None, **optional_arg)
- if auth_set('host'):
+ if auth_set("host"):
# Removing trailing slashes if any from hostname
- auth['host'] = auth.get('host').rstrip('/')
+ auth["host"] = auth.get("host").rstrip("/")
- if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
+ if auth_set("username", "password", "host") or auth_set("api_key", "host"):
# We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
pass
- elif auth_set('kubeconfig') or auth_set('context'):
+ elif auth_set("kubeconfig") or auth_set("context"):
try:
_load_config()
except Exception as err:
- _raise_or_fail(err, 'Failed to load kubeconfig due to %s')
+ _raise_or_fail(err, "Failed to load kubeconfig due to %s")
else:
# First try to do incluster config, then kubeconfig
@@ -186,7 +229,7 @@ def _load_config():
try:
_load_config()
except Exception as err:
- _raise_or_fail(err, 'Failed to load kubeconfig due to %s')
+ _raise_or_fail(err, "Failed to load kubeconfig due to %s")
# Override any values in the default configuration with Ansible parameters
# As of kubernetes-client v12.0.0, get_default_copy() is required here
@@ -197,9 +240,11 @@ def _load_config():
for key, value in iteritems(auth):
if key in AUTH_ARG_MAP.keys() and value is not None:
- if key == 'api_key':
- setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
- elif key == 'proxy_headers':
+ if key == "api_key":
+ setattr(
+ configuration, key, {"authorization": "Bearer {0}".format(value)}
+ )
+ elif key == "proxy_headers":
headers = urllib3.util.make_headers(**value)
setattr(configuration, key, headers)
else:
@@ -211,9 +256,11 @@ def _load_config():
return client
try:
- client = k8sdynamicclient.K8SDynamicClient(kubernetes.client.ApiClient(configuration), discoverer=LazyDiscoverer)
+ client = k8sdynamicclient.K8SDynamicClient(
+ kubernetes.client.ApiClient(configuration), discoverer=LazyDiscoverer
+ )
except Exception as err:
- _raise_or_fail(err, 'Failed to get client due to %s')
+ _raise_or_fail(err, "Failed to get client due to %s")
get_api_client._pool[digest] = client
return client
@@ -223,35 +270,65 @@ def _load_config():
class K8sAnsibleMixin(object):
-
def __init__(self, module, pyyaml_required=True, *args, **kwargs):
if not HAS_K8S_MODULE_HELPER:
- module.fail_json(msg=missing_required_lib('kubernetes'), exception=K8S_IMP_ERR,
- error=to_native(k8s_import_exception))
+ module.fail_json(
+ msg=missing_required_lib("kubernetes"),
+ exception=K8S_IMP_ERR,
+ error=to_native(k8s_import_exception),
+ )
self.kubernetes_version = kubernetes.__version__
- self.supports_dry_run = LooseVersion(self.kubernetes_version) >= LooseVersion("18.20.0")
+ self.supports_dry_run = LooseVersion(self.kubernetes_version) >= LooseVersion(
+ "18.20.0"
+ )
if pyyaml_required and not HAS_YAML:
module.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
def find_resource(self, kind, api_version, fail=False):
- for attribute in ['kind', 'name', 'singular_name']:
+ for attribute in ["kind", "name", "singular_name"]:
try:
- return self.client.resources.get(**{'api_version': api_version, attribute: kind})
+ return self.client.resources.get(
+ **{"api_version": api_version, attribute: kind}
+ )
except (ResourceNotFoundError, ResourceNotUniqueError):
pass
try:
- return self.client.resources.get(api_version=api_version, short_names=[kind])
+ return self.client.resources.get(
+ api_version=api_version, short_names=[kind]
+ )
except (ResourceNotFoundError, ResourceNotUniqueError):
if fail:
- self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
-
- def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None,
- wait=False, wait_sleep=5, wait_timeout=120, state='present', condition=None):
+ self.fail(
+ msg="Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]".format(
+ api_version, kind
+ )
+ )
+
+ def kubernetes_facts(
+ self,
+ kind,
+ api_version,
+ name=None,
+ namespace=None,
+ label_selectors=None,
+ field_selectors=None,
+ wait=False,
+ wait_sleep=5,
+ wait_timeout=120,
+ state="present",
+ condition=None,
+ ):
resource = self.find_resource(kind, api_version)
api_found = bool(resource)
if not api_found:
- return dict(resources=[], msg='Failed to find API for resource with apiVersion "{0}" and kind "{1}"'.format(api_version, kind), api_found=False)
+ return dict(
+ resources=[],
+ msg='Failed to find API for resource with apiVersion "{0}" and kind "{1}"'.format(
+ api_version, kind
+ ),
+ api_found=False,
+ )
if not label_selectors:
label_selectors = []
@@ -260,9 +337,12 @@ def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_s
result = None
try:
- result = resource.get(name=name, namespace=namespace,
- label_selector=','.join(label_selectors),
- field_selector=','.join(field_selectors))
+ result = resource.get(
+ name=name,
+ namespace=namespace,
+ label_selector=",".join(label_selectors),
+ field_selector=",".join(field_selectors),
+ )
except BadRequestError:
return dict(resources=[], api_found=True)
except NotFoundError:
@@ -271,8 +351,8 @@ def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_s
if not wait:
result = result.to_dict()
- if 'items' in result:
- return dict(resources=result['items'], api_found=True)
+ if "items" in result:
+ return dict(resources=result["items"], api_found=True)
return dict(resources=[result], api_found=True)
start = datetime.now()
@@ -283,9 +363,12 @@ def _elapsed():
if result is None:
while _elapsed() < wait_timeout:
try:
- result = resource.get(name=name, namespace=namespace,
- label_selector=','.join(label_selectors),
- field_selector=','.join(field_selectors))
+ result = resource.get(
+ name=name,
+ namespace=namespace,
+ label_selector=",".join(label_selectors),
+ field_selector=",".join(field_selectors),
+ )
break
except NotFoundError:
pass
@@ -296,23 +379,30 @@ def _elapsed():
if isinstance(result, ResourceInstance):
satisfied_by = []
# We have a list of ResourceInstance
- resource_list = result.get('items', [])
+ resource_list = result.get("items", [])
if not resource_list:
resource_list = [result]
for resource_instance in resource_list:
- success, res, duration = self.wait(resource, resource_instance,
- sleep=wait_sleep, timeout=wait_timeout,
- state=state, condition=condition)
+ success, res, duration = self.wait(
+ resource,
+ resource_instance,
+ sleep=wait_sleep,
+ timeout=wait_timeout,
+ state=state,
+ condition=condition,
+ )
if not success:
- self.fail(msg="Failed to gather information about %s(s) even"
- " after waiting for %s seconds" % (res.get('kind'), duration))
+ self.fail(
+ msg="Failed to gather information about %s(s) even"
+ " after waiting for %s seconds" % (res.get("kind"), duration)
+ )
satisfied_by.append(res)
return dict(resources=satisfied_by, api_found=True)
result = result.to_dict()
- if 'items' in result:
- return dict(resources=result['items'], api_found=True)
+ if "items" in result:
+ return dict(resources=result["items"], api_found=True)
return dict(resources=[result], api_found=True)
def remove_aliases(self):
@@ -320,8 +410,8 @@ def remove_aliases(self):
The helper doesn't know what to do with aliased keys
"""
for k, v in iteritems(self.argspec):
- if 'aliases' in v:
- for alias in v['aliases']:
+ if "aliases" in v:
+ for alias in v["aliases"]:
if alias in self.params:
self.params.pop(alias)
@@ -332,7 +422,7 @@ def load_resource_definitions(self, src):
if not os.path.exists(path):
self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
try:
- with open(path, 'r') as f:
+ with open(path, "r") as f:
result = list(yaml.safe_load_all(f))
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
@@ -344,29 +434,43 @@ def diff_objects(self, existing, new):
if not diff:
return True, result
- result['before'] = diff[0]
- result['after'] = diff[1]
+ result["before"] = diff[0]
+ result["after"] = diff[1]
# If only metadata.generation and metadata.resourceVersion changed, ignore it
- ignored_keys = set(['generation', 'resourceVersion'])
+ ignored_keys = set(["generation", "resourceVersion"])
- if list(result['after'].keys()) != ['metadata'] or list(result['before'].keys()) != ['metadata']:
+ if list(result["after"].keys()) != ["metadata"] or list(
+ result["before"].keys()
+ ) != ["metadata"]:
return False, result
- if not set(result['after']['metadata'].keys()).issubset(ignored_keys):
+ if not set(result["after"]["metadata"].keys()).issubset(ignored_keys):
return False, result
- if not set(result['before']['metadata'].keys()).issubset(ignored_keys):
+ if not set(result["before"]["metadata"].keys()).issubset(ignored_keys):
return False, result
- if hasattr(self, 'warn'):
- self.warn('No meaningful diff was generated, but the API may not be idempotent (only metadata.generation or metadata.resourceVersion were changed)')
+ if hasattr(self, "warn"):
+ self.warn(
+ "No meaningful diff was generated, but the API may not be idempotent (only metadata.generation or metadata.resourceVersion were changed)"
+ )
return True, result
def fail(self, msg=None):
self.fail_json(msg=msg)
- def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state, label_selectors=None):
+ def _wait_for(
+ self,
+ resource,
+ name,
+ namespace,
+ predicate,
+ sleep,
+ timeout,
+ state,
+ label_selectors=None,
+ ):
start = datetime.now()
def _wait_for_elapsed():
@@ -377,7 +481,7 @@ def _wait_for_elapsed():
try:
params = dict(name=name, namespace=namespace)
if label_selectors:
- params['label_selector'] = ','.join(label_selectors)
+ params["label_selector"] = ",".join(label_selectors)
response = resource.get(**params)
if predicate(response):
if response:
@@ -385,14 +489,22 @@ def _wait_for_elapsed():
return True, {}, _wait_for_elapsed()
time.sleep(sleep)
except NotFoundError:
- if state == 'absent':
+ if state == "absent":
return True, {}, _wait_for_elapsed()
if response:
response = response.to_dict()
return False, response, _wait_for_elapsed()
- def wait(self, resource, definition, sleep, timeout, state='present', condition=None, label_selectors=None):
-
+ def wait(
+ self,
+ resource,
+ definition,
+ sleep,
+ timeout,
+ state="present",
+ condition=None,
+ label_selectors=None,
+ ):
def _deployment_ready(deployment):
# FIXME: frustratingly bool(deployment.status) is True even if status is empty
# Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
@@ -400,73 +512,97 @@ def _deployment_ready(deployment):
# Scaling up means that we also need to check that we're not in a
# situation where status.replicas == status.availableReplicas
# but spec.replicas != status.replicas
- return (deployment.status
- and deployment.spec.replicas == (deployment.status.replicas or 0)
- and deployment.status.availableReplicas == deployment.status.replicas
- and deployment.status.observedGeneration == deployment.metadata.generation
- and not deployment.status.unavailableReplicas)
+ return (
+ deployment.status
+ and deployment.spec.replicas == (deployment.status.replicas or 0)
+ and deployment.status.availableReplicas == deployment.status.replicas
+ and deployment.status.observedGeneration
+ == deployment.metadata.generation
+ and not deployment.status.unavailableReplicas
+ )
def _pod_ready(pod):
- return (pod.status and pod.status.containerStatuses is not None
- and all(container.ready for container in pod.status.containerStatuses))
+ return (
+ pod.status
+ and pod.status.containerStatuses is not None
+ and all(container.ready for container in pod.status.containerStatuses)
+ )
def _daemonset_ready(daemonset):
- return (daemonset.status and daemonset.status.desiredNumberScheduled is not None
- and daemonset.status.updatedNumberScheduled == daemonset.status.desiredNumberScheduled
- and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled
- and daemonset.status.observedGeneration == daemonset.metadata.generation
- and not daemonset.status.unavailableReplicas)
+ return (
+ daemonset.status
+ and daemonset.status.desiredNumberScheduled is not None
+ and daemonset.status.updatedNumberScheduled
+ == daemonset.status.desiredNumberScheduled
+ and daemonset.status.numberReady
+ == daemonset.status.desiredNumberScheduled
+ and daemonset.status.observedGeneration == daemonset.metadata.generation
+ and not daemonset.status.unavailableReplicas
+ )
def _statefulset_ready(statefulset):
- return (statefulset.status and statefulset.spec.updateStrategy.type == "RollingUpdate"
- and statefulset.status.observedGeneration == (statefulset.metadata.generation or 0)
- and statefulset.status.updateRevision == statefulset.status.currentRevision
- and statefulset.status.updatedReplicas == statefulset.spec.replicas
- and statefulset.status.readyReplicas == statefulset.spec.replicas
- and statefulset.status.replicas == statefulset.spec.replicas)
+ return (
+ statefulset.status
+ and statefulset.spec.updateStrategy.type == "RollingUpdate"
+ and statefulset.status.observedGeneration
+ == (statefulset.metadata.generation or 0)
+ and statefulset.status.updateRevision
+ == statefulset.status.currentRevision
+ and statefulset.status.updatedReplicas == statefulset.spec.replicas
+ and statefulset.status.readyReplicas == statefulset.spec.replicas
+ and statefulset.status.replicas == statefulset.spec.replicas
+ )
def _custom_condition(resource):
if not resource.status or not resource.status.conditions:
return False
- match = [x for x in resource.status.conditions if x.type == condition['type']]
+ match = [
+ x for x in resource.status.conditions if x.type == condition["type"]
+ ]
if not match:
return False
# There should never be more than one condition of a specific type
match = match[0]
- if match.status == 'Unknown':
- if match.status == condition['status']:
- if 'reason' not in condition:
+ if match.status == "Unknown":
+ if match.status == condition["status"]:
+ if "reason" not in condition:
return True
- if condition['reason']:
- return match.reason == condition['reason']
+ if condition["reason"]:
+ return match.reason == condition["reason"]
return False
- status = True if match.status == 'True' else False
- if status == boolean(condition['status'], strict=False):
- if condition.get('reason'):
- return match.reason == condition['reason']
+ status = True if match.status == "True" else False
+ if status == boolean(condition["status"], strict=False):
+ if condition.get("reason"):
+ return match.reason == condition["reason"]
return True
return False
def _resource_absent(resource):
- return not resource or (resource.kind.endswith('List') and resource.items == [])
+ return not resource or (
+ resource.kind.endswith("List") and resource.items == []
+ )
waiter = dict(
StatefulSet=_statefulset_ready,
Deployment=_deployment_ready,
DaemonSet=_daemonset_ready,
- Pod=_pod_ready
+ Pod=_pod_ready,
)
- kind = definition['kind']
- if state == 'present':
- predicate = waiter.get(kind, lambda x: x) if not condition else _custom_condition
+ kind = definition["kind"]
+ if state == "present":
+ predicate = (
+ waiter.get(kind, lambda x: x) if not condition else _custom_condition
+ )
else:
predicate = _resource_absent
- name = definition['metadata']['name']
- namespace = definition['metadata'].get('namespace')
- return self._wait_for(resource, name, namespace, predicate, sleep, timeout, state, label_selectors)
+ name = definition["metadata"]["name"]
+ namespace = definition["metadata"].get("namespace")
+ return self._wait_for(
+ resource, name, namespace, predicate, sleep, timeout, state, label_selectors
+ )
def set_resource_definitions(self, module):
- resource_definition = module.params.get('resource_definition')
+ resource_definition = module.params.get("resource_definition")
self.resource_definitions = []
if resource_definition:
@@ -487,22 +623,26 @@ def set_resource_definitions(self, module):
else:
self.resource_definitions = [resource_definition]
- src = module.params.get('src')
+ src = module.params.get("src")
if src:
self.resource_definitions = self.load_resource_definitions(src)
try:
- self.resource_definitions = [item for item in self.resource_definitions if item]
+ self.resource_definitions = [
+ item for item in self.resource_definitions if item
+ ]
except AttributeError:
pass
if not resource_definition and not src:
implicit_definition = dict(
- kind=module.params['kind'],
- apiVersion=module.params['api_version'],
- metadata=dict(name=module.params['name'])
+ kind=module.params["kind"],
+ apiVersion=module.params["api_version"],
+ metadata=dict(name=module.params["name"]),
)
- if module.params.get('namespace'):
- implicit_definition['metadata']['namespace'] = module.params.get('namespace')
+ if module.params.get("namespace"):
+ implicit_definition["metadata"]["namespace"] = module.params.get(
+ "namespace"
+ )
self.resource_definitions = [implicit_definition]
def check_library_version(self):
@@ -513,8 +653,12 @@ def flatten_list_kind(self, list_resource, definitions):
flattened = []
parent_api_version = list_resource.group_version if list_resource else None
parent_kind = list_resource.kind[:-4] if list_resource else None
- for definition in definitions.get('items', []):
- resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
+ for definition in definitions.get("items", []):
+ resource = self.find_resource(
+ definition.get("kind", parent_kind),
+ definition.get("apiVersion", parent_api_version),
+ fail=True,
+ )
flattened.append((resource, self.set_defaults(resource, definition)))
return flattened
@@ -531,86 +675,95 @@ def execute_module(self):
for definition in self.resource_definitions:
if definition is None:
continue
- kind = definition.get('kind', self.kind)
- api_version = definition.get('apiVersion', self.api_version)
- if kind and kind.endswith('List'):
+ kind = definition.get("kind", self.kind)
+ api_version = definition.get("apiVersion", self.api_version)
+ if kind and kind.endswith("List"):
resource = self.find_resource(kind, api_version, fail=False)
- flattened_definitions.extend(self.flatten_list_kind(resource, definition))
+ flattened_definitions.extend(
+ self.flatten_list_kind(resource, definition)
+ )
else:
resource = self.find_resource(kind, api_version, fail=True)
flattened_definitions.append((resource, definition))
for (resource, definition) in flattened_definitions:
- kind = definition.get('kind', self.kind)
- api_version = definition.get('apiVersion', self.api_version)
+ kind = definition.get("kind", self.kind)
+ api_version = definition.get("apiVersion", self.api_version)
definition = self.set_defaults(resource, definition)
self.warnings = []
- if self.params['validate'] is not None:
+ if self.params["validate"] is not None:
self.warnings = self.validate(definition)
result = self.perform_action(resource, definition)
if self.warnings:
- result['warnings'] = self.warnings
- changed = changed or result['changed']
+ result["warnings"] = self.warnings
+ changed = changed or result["changed"]
results.append(result)
if len(results) == 1:
self.exit_json(**results[0])
- self.exit_json(**{
- 'changed': changed,
- 'result': {
- 'results': results
- }
- })
+ self.exit_json(**{"changed": changed, "result": {"results": results}})
def validate(self, resource):
def _prepend_resource_info(resource, msg):
- return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
+ return "%s %s: %s" % (resource["kind"], resource["metadata"]["name"], msg)
try:
- warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
+ warnings, errors = self.client.validate(
+ resource,
+ self.params["validate"].get("version"),
+ self.params["validate"].get("strict"),
+ )
except KubernetesValidateMissing:
- self.fail_json(msg="kubernetes-validate python library is required to validate resources")
+ self.fail_json(
+ msg="kubernetes-validate python library is required to validate resources"
+ )
- if errors and self.params['validate']['fail_on_error']:
- self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
+ if errors and self.params["validate"]["fail_on_error"]:
+ self.fail_json(
+ msg="\n".join(
+ [_prepend_resource_info(resource, error) for error in errors]
+ )
+ )
else:
return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
def set_defaults(self, resource, definition):
- definition['kind'] = resource.kind
- definition['apiVersion'] = resource.group_version
- metadata = definition.get('metadata', {})
- if not metadata.get('name') and not metadata.get('generateName'):
+ definition["kind"] = resource.kind
+ definition["apiVersion"] = resource.group_version
+ metadata = definition.get("metadata", {})
+ if not metadata.get("name") and not metadata.get("generateName"):
if self.name:
- metadata['name'] = self.name
+ metadata["name"] = self.name
elif self.generate_name:
- metadata['generateName'] = self.generate_name
- if resource.namespaced and self.namespace and not metadata.get('namespace'):
- metadata['namespace'] = self.namespace
- definition['metadata'] = metadata
+ metadata["generateName"] = self.generate_name
+ if resource.namespaced and self.namespace and not metadata.get("namespace"):
+ metadata["namespace"] = self.namespace
+ definition["metadata"] = metadata
return definition
def perform_action(self, resource, definition):
- append_hash = self.params.get('append_hash', False)
- apply = self.params.get('apply', False)
- delete_options = self.params.get('delete_options')
- result = {'changed': False, 'result': {}}
- state = self.params.get('state', None)
- force = self.params.get('force', False)
- name = definition['metadata'].get('name')
- generate_name = definition['metadata'].get('generateName')
- origin_name = definition['metadata'].get('name')
- namespace = definition['metadata'].get('namespace')
+ append_hash = self.params.get("append_hash", False)
+ apply = self.params.get("apply", False)
+ delete_options = self.params.get("delete_options")
+ result = {"changed": False, "result": {}}
+ state = self.params.get("state", None)
+ force = self.params.get("force", False)
+ name = definition["metadata"].get("name")
+ generate_name = definition["metadata"].get("generateName")
+ origin_name = definition["metadata"].get("name")
+ namespace = definition["metadata"].get("namespace")
existing = None
- wait = self.params.get('wait')
- wait_sleep = self.params.get('wait_sleep')
- wait_timeout = self.params.get('wait_timeout')
+ wait = self.params.get("wait")
+ wait_sleep = self.params.get("wait_sleep")
+ wait_timeout = self.params.get("wait_timeout")
wait_condition = None
- continue_on_error = self.params.get('continue_on_error')
- label_selectors = self.params.get('label_selectors')
- if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
- wait_condition = self.params['wait_condition']
+ continue_on_error = self.params.get("continue_on_error")
+ label_selectors = self.params.get("label_selectors")
+ if self.params.get("wait_condition") and self.params["wait_condition"].get(
+ "type"
+ ):
+ wait_condition = self.params["wait_condition"]
def build_error_msg(kind, name, msg):
return "%s %s: %s" % (kind, name, msg)
@@ -619,26 +772,31 @@ def build_error_msg(kind, name, msg):
try:
# ignore append_hash for resources other than ConfigMap and Secret
- if append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
+ if append_hash and definition["kind"] in ["ConfigMap", "Secret"]:
if name:
- name = '%s-%s' % (name, generate_hash(definition))
- definition['metadata']['name'] = name
+ name = "%s-%s" % (name, generate_hash(definition))
+ definition["metadata"]["name"] = name
elif generate_name:
- definition['metadata']['generateName'] = '%s-%s' % (generate_name, generate_hash(definition))
+ definition["metadata"]["generateName"] = "%s-%s" % (
+ generate_name,
+ generate_hash(definition),
+ )
params = {}
if name:
- params['name'] = name
+ params["name"] = name
if namespace:
- params['namespace'] = namespace
+ params["namespace"] = namespace
if label_selectors:
- params['label_selector'] = ','.join(label_selectors)
+ params["label_selector"] = ",".join(label_selectors)
if "name" in params or "label_selector" in params:
existing = resource.get(**params)
- elif state == 'absent':
- msg = "At least one of name|label_selectors is required to delete object."
+ elif state == "absent":
+ msg = (
+ "At least one of name|label_selectors is required to delete object."
+ )
if continue_on_error:
- result['error'] = dict(msg=msg)
+ result["error"] = dict(msg=msg)
return result
else:
self.fail_json(msg=msg)
@@ -650,34 +808,69 @@ def build_error_msg(kind, name, msg):
# no sys.exc_clear on python3
pass
except ForbiddenError as exc:
- if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
+ if (
+ definition["kind"] in ["Project", "ProjectRequest"]
+ and state != "absent"
+ ):
return self.create_project_request(definition)
- msg = 'Failed to retrieve requested object: {0}'.format(exc.body)
+ msg = "Failed to retrieve requested object: {0}".format(exc.body)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
except DynamicApiError as exc:
- msg = 'Failed to retrieve requested object: {0}'.format(exc.body)
+ msg = "Failed to retrieve requested object: {0}".format(exc.body)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
except ValueError as value_exc:
- msg = 'Failed to retrieve requested object: {0}'.format(to_native(value_exc))
+ msg = "Failed to retrieve requested object: {0}".format(
+ to_native(value_exc)
+ )
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), error='', status='', reason='')
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error="",
+ status="",
+ reason="",
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error='', status='', reason='')
+ self.fail_json(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error="",
+ status="",
+ reason="",
+ )
- if state == 'absent':
- result['method'] = "delete"
+ if state == "absent":
+ result["method"] = "delete"
def _empty_resource_list():
- if existing and existing.kind.endswith('List'):
+ if existing and existing.kind.endswith("List"):
return existing.items == []
return False
@@ -686,53 +879,87 @@ def _empty_resource_list():
return result
else:
# Delete the object
- result['changed'] = True
+ result["changed"] = True
if self.check_mode and not self.supports_dry_run:
return result
else:
if delete_options:
body = {
- 'apiVersion': 'v1',
- 'kind': 'DeleteOptions',
+ "apiVersion": "v1",
+ "kind": "DeleteOptions",
}
body.update(delete_options)
- params['body'] = body
+ params["body"] = body
if self.check_mode:
- params['dry_run'] = "All"
+ params["dry_run"] = "All"
try:
k8s_obj = resource.delete(**params)
- result['result'] = k8s_obj.to_dict()
+ result["result"] = k8s_obj.to_dict()
except DynamicApiError as exc:
msg = "Failed to delete object: {0}".format(exc.body)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg),
- error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
- else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
if wait and not self.check_mode:
- success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent', label_selectors=label_selectors)
- result['duration'] = duration
+ success, resource, duration = self.wait(
+ resource,
+ definition,
+ wait_sleep,
+ wait_timeout,
+ "absent",
+ label_selectors=label_selectors,
+ )
+ result["duration"] = duration
if not success:
msg = "Resource deletion timed out"
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ **result
+ )
return result
- else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ self.fail_json(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ **result
+ )
return result
else:
if label_selectors:
filter_selector = LabelSelectorFilter(label_selectors)
if not filter_selector.isMatching(definition):
- result['changed'] = False
- result['msg'] = "resource 'kind={kind},name={name},namespace={namespace}' filtered by label_selectors.".format(
- kind=definition['kind'], name=origin_name, namespace=namespace)
+ result["changed"] = False
+ result["msg"] = (
+ "resource 'kind={kind},name={name},namespace={namespace}' "
+ "filtered by label_selectors.".format(
+ kind=definition["kind"],
+ name=origin_name,
+ namespace=namespace,
+ )
+ )
return result
if apply:
if self.check_mode and not self.supports_dry_run:
- ignored, patch = apply_object(resource, _encode_stringdata(definition))
+ ignored, patch = apply_object(
+ resource, _encode_stringdata(definition)
+ )
if existing:
k8s_obj = dict_merge(existing.to_dict(), patch)
else:
@@ -741,93 +968,158 @@ def _empty_resource_list():
try:
params = {}
if self.check_mode:
- params['dry_run'] = 'All'
- k8s_obj = resource.apply(definition, namespace=namespace, **params).to_dict()
+ params["dry_run"] = "All"
+ k8s_obj = resource.apply(
+ definition, namespace=namespace, **params
+ ).to_dict()
except DynamicApiError as exc:
msg = "Failed to apply object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'],
- origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
success = True
- result['result'] = k8s_obj
+ result["result"] = k8s_obj
if wait and not self.check_mode:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ success, result["result"], result["duration"] = self.wait(
+ resource,
+ definition,
+ wait_sleep,
+ wait_timeout,
+ condition=wait_condition,
+ )
if existing:
existing = existing.to_dict()
else:
existing = {}
- match, diffs = self.diff_objects(existing, result['result'])
- result['changed'] = not match
+ match, diffs = self.diff_objects(existing, result["result"])
+ result["changed"] = not match
if self.module._diff:
- result['diff'] = diffs
- result['method'] = 'apply'
+ result["diff"] = diffs
+ result["method"] = "apply"
if not success:
msg = "Resource apply timed out"
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ **result
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ self.fail_json(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ **result
+ )
return result
if not existing:
- if state == 'patched':
+ if state == "patched":
# Silently skip this resource (do not raise an error) as 'patch_only' is set to true
- result['changed'] = False
- result['warning'] = "resource 'kind={kind},name={name}' was not found but will not be created as 'state'\
+ result["changed"] = False
+ result[
+ "warning"
+ ] = "resource 'kind={kind},name={name}' was not found but will not be created as 'state'\
parameter has been set to '{state}'".format(
- kind=definition['kind'], name=origin_name, state=state)
+ kind=definition["kind"], name=origin_name, state=state
+ )
return result
elif self.check_mode and not self.supports_dry_run:
k8s_obj = _encode_stringdata(definition)
else:
params = {}
if self.check_mode:
- params['dry_run'] = "All"
+ params["dry_run"] = "All"
try:
- k8s_obj = resource.create(definition, namespace=namespace, **params).to_dict()
+ k8s_obj = resource.create(
+ definition, namespace=namespace, **params
+ ).to_dict()
except ConflictError:
# Some resources, like ProjectRequests, can't be created multiple times,
# because the resources that they create don't match their kind
# In this case we'll mark it as unchanged and warn the user
- self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
- if the resource you are creating does not directly create a resource of the same kind.".format(name))
+ self.warn(
+ "{0} was not found, but creating it returned a 409 Conflict error. This can happen \
+ if the resource you are creating does not directly create a resource of the same kind.".format(
+ name
+ )
+ )
return result
except DynamicApiError as exc:
msg = "Failed to create object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg),
- error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
else:
- self.fail_json(msg=build_error_msg(definition['kind'], origin_name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
except Exception as exc:
msg = "Failed to create object: {0}".format(exc)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), error='', status='', reason='')
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error="",
+ status="",
+ reason="",
+ )
return result
else:
- self.fail_json(msg=msg, error='', status='', reason='')
+ self.fail_json(msg=msg, error="", status="", reason="")
success = True
- result['result'] = k8s_obj
+ result["result"] = k8s_obj
if wait and not self.check_mode:
- definition['metadata'].update({'name': k8s_obj['metadata']['name']})
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- result['changed'] = True
- result['method'] = 'create'
+ definition["metadata"].update({"name": k8s_obj["metadata"]["name"]})
+ success, result["result"], result["duration"] = self.wait(
+ resource,
+ definition,
+ wait_sleep,
+ wait_timeout,
+ condition=wait_condition,
+ )
+ result["changed"] = True
+ result["method"] = "create"
if not success:
msg = "Resource creation timed out"
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ **result
+ )
return result
else:
self.fail_json(msg=msg, **result)
@@ -836,39 +1128,65 @@ def _empty_resource_list():
match = False
diffs = []
- if state == 'present' and existing and force:
+ if state == "present" and existing and force:
if self.check_mode and not self.supports_dry_run:
k8s_obj = _encode_stringdata(definition)
else:
params = {}
if self.check_mode:
- params['dry_run'] = "All"
+ params["dry_run"] = "All"
try:
- k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=append_hash, **params).to_dict()
+ k8s_obj = resource.replace(
+ definition,
+ name=name,
+ namespace=namespace,
+ append_hash=append_hash,
+ **params
+ ).to_dict()
except DynamicApiError as exc:
msg = "Failed to replace object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg),
- error=exc.status, status=exc.status, reason=exc.reason)
+ result["error"] = dict(
+ msg=build_error_msg(
+ definition["kind"], origin_name, msg
+ ),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
return result
else:
- self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ self.fail_json(
+ msg=msg,
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
success = True
- result['result'] = k8s_obj
+ result["result"] = k8s_obj
if wait and not self.check_mode:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- match, diffs = self.diff_objects(existing.to_dict(), result['result'])
- result['changed'] = not match
- result['method'] = 'replace'
+ success, result["result"], result["duration"] = self.wait(
+ resource,
+ definition,
+ wait_sleep,
+ wait_timeout,
+ condition=wait_condition,
+ )
+ match, diffs = self.diff_objects(existing.to_dict(), result["result"])
+ result["changed"] = not match
+ result["method"] = "replace"
if self.module._diff:
- result['diff'] = diffs
+ result["diff"] = diffs
if not success:
msg = "Resource replacement timed out"
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ **result
+ )
return result
else:
self.fail_json(msg=msg, **result)
@@ -878,49 +1196,73 @@ def _empty_resource_list():
if self.check_mode and not self.supports_dry_run:
k8s_obj = dict_merge(existing.to_dict(), _encode_stringdata(definition))
else:
- for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
- k8s_obj, error = self.patch_resource(resource, definition, existing, name,
- namespace, merge_type=merge_type)
+ for merge_type in self.params["merge_type"] or [
+ "strategic-merge",
+ "merge",
+ ]:
+ k8s_obj, error = self.patch_resource(
+ resource,
+ definition,
+ existing,
+ name,
+ namespace,
+ merge_type=merge_type,
+ )
if not error:
break
if error:
if continue_on_error:
- result['error'] = error
- result['error']['msg'] = build_error_msg(definition['kind'], origin_name, result['error'].get('msg'))
+ result["error"] = error
+ result["error"]["msg"] = build_error_msg(
+ definition["kind"], origin_name, result["error"].get("msg")
+ )
return result
else:
self.fail_json(**error)
success = True
- result['result'] = k8s_obj
+ result["result"] = k8s_obj
if wait and not self.check_mode:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- match, diffs = self.diff_objects(existing.to_dict(), result['result'])
- result['changed'] = not match
- result['method'] = 'patch'
+ success, result["result"], result["duration"] = self.wait(
+ resource,
+ definition,
+ wait_sleep,
+ wait_timeout,
+ condition=wait_condition,
+ )
+ match, diffs = self.diff_objects(existing.to_dict(), result["result"])
+ result["changed"] = not match
+ result["method"] = "patch"
if self.module._diff:
- result['diff'] = diffs
+ result["diff"] = diffs
if not success:
msg = "Resource update timed out"
if continue_on_error:
- result['error'] = dict(msg=build_error_msg(definition['kind'], origin_name, msg), **result)
+ result["error"] = dict(
+ msg=build_error_msg(definition["kind"], origin_name, msg),
+ **result
+ )
return result
else:
self.fail_json(msg=msg, **result)
return result
- def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
+ def patch_resource(
+ self, resource, definition, existing, name, namespace, merge_type=None
+ ):
if merge_type == "json":
self.module.deprecate(
msg="json as a merge_type value is deprecated. Please use the k8s_json_patch module instead.",
- version="3.0.0", collection_name="kubernetes.core")
+ version="3.0.0",
+ collection_name="kubernetes.core",
+ )
try:
params = dict(name=name, namespace=namespace)
if self.check_mode:
- params['dry_run'] = 'All'
+ params["dry_run"] = "All"
if merge_type:
- params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
+ params["content_type"] = "application/{0}-patch+json".format(merge_type)
k8s_obj = resource.patch(definition, **params).to_dict()
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
error = {}
@@ -929,35 +1271,53 @@ def patch_resource(self, resource, definition, existing, name, namespace, merge_
msg = "Failed to patch object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
- error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
+ error = dict(
+ msg=msg,
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ warnings=self.warnings,
+ )
return None, error
except Exception as exc:
msg = "Failed to patch object: {0}".format(exc)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
- error = dict(msg=msg, error=to_native(exc), status='', reason='', warnings=self.warnings)
+ error = dict(
+ msg=msg,
+ error=to_native(exc),
+ status="",
+ reason="",
+ warnings=self.warnings,
+ )
return None, error
def create_project_request(self, definition):
- definition['kind'] = 'ProjectRequest'
- result = {'changed': False, 'result': {}}
- resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
+ definition["kind"] = "ProjectRequest"
+ result = {"changed": False, "result": {}}
+ resource = self.find_resource(
+ "ProjectRequest", definition["apiVersion"], fail=True
+ )
if not self.check_mode:
try:
k8s_obj = resource.create(definition)
- result['result'] = k8s_obj.to_dict()
+ result["result"] = k8s_obj.to_dict()
except DynamicApiError as exc:
- self.fail_json(msg="Failed to create object: {0}".format(exc.body),
- error=exc.status, status=exc.status, reason=exc.reason)
- result['changed'] = True
- result['method'] = 'create'
+ self.fail_json(
+ msg="Failed to create object: {0}".format(exc.body),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
+ result["changed"] = True
+ result["method"] = "create"
return result
def _encode_stringdata(definition):
- if definition['kind'] == 'Secret' and 'stringData' in definition:
- for k, v in definition['stringData'].items():
+ if definition["kind"] == "Secret" and "stringData" in definition:
+ for k, v in definition["stringData"].items():
encoded = base64.b64encode(to_bytes(v))
- definition.setdefault('data', {})[k] = to_text(encoded)
- del definition['stringData']
+ definition.setdefault("data", {})[k] = to_text(encoded)
+ del definition["stringData"]
return definition
diff --git a/plugins/module_utils/exceptions.py b/plugins/module_utils/exceptions.py
index 35d3c2fd63..6967ec3f30 100644
--- a/plugins/module_utils/exceptions.py
+++ b/plugins/module_utils/exceptions.py
@@ -14,6 +14,7 @@
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
diff --git a/plugins/module_utils/hashes.py b/plugins/module_utils/hashes.py
index 6af6bc1be6..3d44a7d906 100644
--- a/plugins/module_utils/hashes.py
+++ b/plugins/module_utils/hashes.py
@@ -15,7 +15,8 @@
# Implement ConfigMapHash and SecretHash equivalents
# Based on https://github.com/kubernetes/kubernetes/pull/49961
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
import json
@@ -23,6 +24,7 @@
try:
import string
+
maketrans = string.maketrans
except AttributeError:
maketrans = str.maketrans
@@ -44,21 +46,21 @@ def sorted_dict(unsorted_dict):
def generate_hash(resource):
# Get name from metadata
- metada = resource.get('metadata', {})
- key = 'name'
- resource['name'] = metada.get('name', '')
- generate_name = metada.get('generateName', '')
- if resource['name'] == '' and generate_name:
- del(resource['name'])
- key = 'generateName'
- resource['generateName'] = generate_name
- if resource['kind'] == 'ConfigMap':
- marshalled = marshal(sorted_dict(resource), ['data', 'kind', key])
- del(resource[key])
+ metada = resource.get("metadata", {})
+ key = "name"
+ resource["name"] = metada.get("name", "")
+ generate_name = metada.get("generateName", "")
+ if resource["name"] == "" and generate_name:
+ del resource["name"]
+ key = "generateName"
+ resource["generateName"] = generate_name
+ if resource["kind"] == "ConfigMap":
+ marshalled = marshal(sorted_dict(resource), ["data", "kind", key])
+ del resource[key]
return encode(marshalled)
- if resource['kind'] == 'Secret':
- marshalled = marshal(sorted_dict(resource), ['data', 'kind', key, 'type'])
- del(resource[key])
+ if resource["kind"] == "Secret":
+ marshalled = marshal(sorted_dict(resource), ["data", "kind", key, "type"])
+ del resource[key]
return encode(marshalled)
raise NotImplementedError
@@ -67,8 +69,10 @@ def marshal(data, keys):
ordered = OrderedDict()
for key in keys:
ordered[key] = data.get(key, "")
- return json.dumps(ordered, separators=(',', ':')).encode('utf-8')
+ return json.dumps(ordered, separators=(",", ":")).encode("utf-8")
def encode(resource):
- return hashlib.sha256(resource).hexdigest()[:10].translate(maketrans("013ae", "ghkmt"))
+ return (
+ hashlib.sha256(resource).hexdigest()[:10].translate(maketrans("013ae", "ghkmt"))
+ )
diff --git a/plugins/module_utils/helm.py b/plugins/module_utils/helm.py
index d0c1d84a63..685b585e77 100644
--- a/plugins/module_utils/helm.py
+++ b/plugins/module_utils/helm.py
@@ -18,6 +18,7 @@
try:
import yaml
+
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
@@ -28,11 +29,11 @@
def prepare_helm_environ_update(module):
environ_update = {}
file_to_cleam_up = None
- kubeconfig_path = module.params.get('kubeconfig')
- if module.params.get('context') is not None:
- environ_update["HELM_KUBECONTEXT"] = module.params.get('context')
- if module.params.get('release_namespace'):
- environ_update["HELM_NAMESPACE"] = module.params.get('release_namespace')
+ kubeconfig_path = module.params.get("kubeconfig")
+ if module.params.get("context") is not None:
+ environ_update["HELM_KUBECONTEXT"] = module.params.get("context")
+ if module.params.get("release_namespace"):
+ environ_update["HELM_NAMESPACE"] = module.params.get("release_namespace")
if module.params.get("api_key"):
environ_update["HELM_KUBETOKEN"] = module.params["api_key"]
if module.params.get("host"):
@@ -41,7 +42,8 @@ def prepare_helm_environ_update(module):
kubeconfig_path = write_temp_kubeconfig(
module.params["host"],
validate_certs=module.params["validate_certs"],
- ca_cert=module.params["ca_cert"])
+ ca_cert=module.params["ca_cert"],
+ )
file_to_cleam_up = kubeconfig_path
if kubeconfig_path is not None:
environ_update["KUBECONFIG"] = kubeconfig_path
@@ -61,7 +63,9 @@ def run_helm(module, command, fails_on_error=True):
rc, out, err = module.run_command(command, environ_update=environ_update)
if fails_on_error and rc != 0:
module.fail_json(
- msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(
+ rc, out, err
+ ),
stdout=out,
stderr=err,
command=command,
@@ -90,23 +94,11 @@ def write_temp_kubeconfig(server, validate_certs=True, ca_cert=None):
content = {
"apiVersion": "v1",
"kind": "Config",
- "clusters": [
- {
- "cluster": {
- "server": server,
- },
- "name": "generated-cluster"
- }
- ],
+ "clusters": [{"cluster": {"server": server}, "name": "generated-cluster"}],
"contexts": [
- {
- "context": {
- "cluster": "generated-cluster"
- },
- "name": "generated-context"
- }
+ {"context": {"cluster": "generated-cluster"}, "name": "generated-context"}
],
- "current-context": "generated-context"
+ "current-context": "generated-context",
}
if not validate_certs:
@@ -115,7 +107,7 @@ def write_temp_kubeconfig(server, validate_certs=True, ca_cert=None):
content["clusters"][0]["cluster"]["certificate-authority"] = ca_cert
_fd, file_name = tempfile.mkstemp()
- with os.fdopen(_fd, 'w') as fp:
+ with os.fdopen(_fd, "w") as fp:
yaml.dump(content, fp)
return file_name
@@ -128,7 +120,7 @@ def get_helm_plugin_list(module, helm_bin=None):
return []
helm_plugin_list = helm_bin + " list"
rc, out, err = run_helm(module, helm_plugin_list)
- if rc != 0 or (out == '' and err == ''):
+ if rc != 0 or (out == "" and err == ""):
module.fail_json(
msg="Failed to get Helm plugin info",
command=helm_plugin_list,
@@ -150,11 +142,11 @@ def parse_helm_plugin_list(module, output=None):
for line in output:
if line.startswith("NAME"):
continue
- name, version, description = line.split('\t', 3)
+ name, version, description = line.split("\t", 3)
name = name.strip()
version = version.strip()
description = description.strip()
- if name == '':
+ if name == "":
continue
ret.append((name, version, description))
diff --git a/plugins/module_utils/k8sdynamicclient.py b/plugins/module_utils/k8sdynamicclient.py
index 73fe61971d..b1beca4cd7 100644
--- a/plugins/module_utils/k8sdynamicclient.py
+++ b/plugins/module_utils/k8sdynamicclient.py
@@ -14,26 +14,37 @@
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
from kubernetes.dynamic import DynamicClient
from ansible_collections.kubernetes.core.plugins.module_utils.apply import k8s_apply
-from ansible_collections.kubernetes.core.plugins.module_utils.exceptions import ApplyException
+from ansible_collections.kubernetes.core.plugins.module_utils.exceptions import (
+ ApplyException,
+)
class K8SDynamicClient(DynamicClient):
def apply(self, resource, body=None, name=None, namespace=None, **kwargs):
body = super().serialize_body(body)
- body['metadata'] = body.get('metadata', dict())
- name = name or body['metadata'].get('name')
+ body["metadata"] = body.get("metadata", dict())
+ name = name or body["metadata"].get("name")
if not name:
- raise ValueError("name is required to apply {0}.{1}".format(resource.group_version, resource.kind))
+ raise ValueError(
+ "name is required to apply {0}.{1}".format(
+ resource.group_version, resource.kind
+ )
+ )
if resource.namespaced:
- body['metadata']['namespace'] = super().ensure_namespace(resource, namespace, body)
+ body["metadata"]["namespace"] = super().ensure_namespace(
+ resource, namespace, body
+ )
try:
return k8s_apply(resource, body, **kwargs)
except ApplyException as e:
- raise ValueError("Could not apply strategic merge to %s/%s: %s" %
- (body['kind'], body['metadata']['name'], e))
+ raise ValueError(
+ "Could not apply strategic merge to %s/%s: %s"
+ % (body["kind"], body["metadata"]["name"], e)
+ )
diff --git a/plugins/module_utils/selector.py b/plugins/module_utils/selector.py
index 2d21bbf4cf..2a85d0bfde 100644
--- a/plugins/module_utils/selector.py
+++ b/plugins/module_utils/selector.py
@@ -17,7 +17,7 @@
class Selector(object):
- equality_based_operators = ('==', '!=', '=')
+ equality_based_operators = ("==", "!=", "=")
def __init__(self, data):
self._operator = None
@@ -27,18 +27,23 @@ def __init__(self, data):
for op in self.equality_based_operators:
idx = no_whitespace_data.find(op)
if idx != -1:
- self._operator = "in" if op == '==' or op == '=' else "notin"
+ self._operator = "in" if op == "==" or op == "=" else "notin"
self._key = no_whitespace_data[0:idx]
+ # fmt: off
self._data = [no_whitespace_data[idx + len(op):]]
+ # fmt: on
break
def parse_set_based_requirement(self, data):
- m = re.match(r'( *)([a-z0-9A-Z][a-z0-9A-Z\._-]*[a-z0-9A-Z])( +)(notin|in)( +)\((.*)\)( *)', data)
+ m = re.match(
+ r"( *)([a-z0-9A-Z][a-z0-9A-Z\._-]*[a-z0-9A-Z])( +)(notin|in)( +)\((.*)\)( *)",
+ data,
+ )
if m:
self._set_based_requirement = True
self._key = m.group(2)
self._operator = m.group(4)
- self._data = [x.replace(' ', '') for x in m.group(6).split(',') if x != '']
+ self._data = [x.replace(" ", "") for x in m.group(6).split(",") if x != ""]
return True
elif all(x not in data for x in self.equality_based_operators):
self._key = data.rstrip(" ").lstrip(" ")
@@ -54,18 +59,21 @@ def isMatch(self, labels):
elif self._operator == "notin":
return self._key not in labels or labels.get(self._key) not in self._data
else:
- return self._key not in labels if self._operator == "!" else self._key in labels
+ return (
+ self._key not in labels
+ if self._operator == "!"
+ else self._key in labels
+ )
class LabelSelectorFilter(object):
-
def __init__(self, label_selectors):
self.selectors = [Selector(data) for data in label_selectors]
def isMatching(self, definition):
- if "metadata" not in definition or "labels" not in definition['metadata']:
+ if "metadata" not in definition or "labels" not in definition["metadata"]:
return False
- labels = definition['metadata']['labels']
+ labels = definition["metadata"]["labels"]
if not isinstance(labels, dict):
return None
return all(sel.isMatch(labels) for sel in self.selectors)
diff --git a/plugins/modules/helm.py b/plugins/modules/helm.py
index 20de0151fd..7e9a7067a9 100644
--- a/plugins/modules/helm.py
+++ b/plugins/modules/helm.py
@@ -4,10 +4,11 @@
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: helm
@@ -159,9 +160,9 @@
version_added: "2.2.0"
extends_documentation_fragment:
- kubernetes.core.helm_common_options
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Deploy latest version of Prometheus chart inside monitoring namespace (and create it)
kubernetes.core.helm:
name: test
@@ -248,7 +249,7 @@
enabled: True
logging:
enabled: True
-'''
+"""
RETURN = r"""
status:
@@ -311,6 +312,7 @@
try:
import yaml
+
IMP_YAML = True
except ImportError:
IMP_YAML_ERR = traceback.format_exc()
@@ -333,7 +335,7 @@ def get_release(state, release_name):
if state is not None:
for release in state:
- if release['name'] == release_name:
+ if release["name"] == release_name:
return release
return None
@@ -352,7 +354,7 @@ def get_release_status(module, command, release_name):
if release is None: # not install
return None
- release['values'] = get_values(module, command, release_name)
+ release["values"] = get_values(module, command, release_name)
return release
@@ -376,9 +378,23 @@ def fetch_chart_info(module, command, chart_ref):
return yaml.safe_load(out)
-def deploy(command, release_name, release_values, chart_name, wait,
- wait_timeout, disable_hook, force, values_files, history_max, atomic=False,
- create_namespace=False, replace=False, skip_crds=False, timeout=None):
+def deploy(
+ command,
+ release_name,
+ release_values,
+ chart_name,
+ wait,
+ wait_timeout,
+ disable_hook,
+ force,
+ values_files,
+ history_max,
+ atomic=False,
+ create_namespace=False,
+ replace=False,
+ skip_crds=False,
+ timeout=None,
+):
"""
Install/upgrade/rollback release chart
"""
@@ -419,8 +435,8 @@ def deploy(command, release_name, release_values, chart_name, wait,
deploy_command += " --values=" + value_file
if release_values != {}:
- fd, path = tempfile.mkstemp(suffix='.yml')
- with open(path, 'w') as yaml_file:
+ fd, path = tempfile.mkstemp(suffix=".yml")
+ with open(path, "w") as yaml_file:
yaml.dump(release_values, yaml_file, default_flow_style=False)
deploy_command += " -f=" + path
@@ -434,8 +450,7 @@ def deploy(command, release_name, release_values, chart_name, wait,
return deploy_command
-def delete(command, release_name, purge, disable_hook,
- wait, wait_timeout):
+def delete(command, release_name, purge, disable_hook, wait, wait_timeout):
"""
Delete release chart
"""
@@ -462,7 +477,7 @@ def delete(command, release_name, purge, disable_hook,
def load_values_files(values_files):
values = {}
for values_file in values_files or []:
- with open(values_file, 'r') as fd:
+ with open(values_file, "r") as fd:
content = yaml.safe_load(fd)
if not isinstance(content, dict):
continue
@@ -489,8 +504,16 @@ def has_plugin(command, plugin):
return False
-def helmdiff_check(module, helm_cmd, release_name, chart_ref, release_values,
- values_files=None, chart_version=None, replace=False):
+def helmdiff_check(
+ module,
+ helm_cmd,
+ release_name,
+ chart_ref,
+ release_values,
+ values_files=None,
+ chart_version=None,
+ replace=False,
+):
"""
Use helm diff to determine if a release would change by upgrading a chart.
"""
@@ -504,8 +527,8 @@ def helmdiff_check(module, helm_cmd, release_name, chart_ref, release_values,
cmd += " " + "--reset-values"
if release_values != {}:
- fd, path = tempfile.mkstemp(suffix='.yml')
- with open(path, 'w') as yaml_file:
+ fd, path = tempfile.mkstemp(suffix=".yml")
+ with open(path, "w") as yaml_file:
yaml.dump(release_values, yaml_file, default_flow_style=False)
cmd += " -f=" + path
@@ -522,60 +545,83 @@ def default_check(release_status, chart_info, values=None, values_files=None):
Use default check to determine if release would change by upgrading a chart.
"""
# the 'appVersion' specification is optional in a chart
- chart_app_version = chart_info.get('appVersion', None)
- released_app_version = release_status.get('app_version', None)
+ chart_app_version = chart_info.get("appVersion", None)
+ released_app_version = release_status.get("app_version", None)
# when deployed without an 'appVersion' chart value the 'helm list' command will return the entry `app_version: ""`
- appversion_is_same = (chart_app_version == released_app_version) or (chart_app_version is None and released_app_version == "")
+ appversion_is_same = (chart_app_version == released_app_version) or (
+ chart_app_version is None and released_app_version == ""
+ )
if values_files:
- values_match = release_status['values'] == load_values_files(values_files)
+ values_match = release_status["values"] == load_values_files(values_files)
else:
- values_match = release_status['values'] == values
- return not values_match \
- or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"] \
+ values_match = release_status["values"] == values
+ return (
+ not values_match
+ or (chart_info["name"] + "-" + chart_info["version"]) != release_status["chart"]
or not appversion_is_same
+ )
def main():
global module
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- chart_ref=dict(type='path'),
- chart_repo_url=dict(type='str'),
- chart_version=dict(type='str'),
- release_name=dict(type='str', required=True, aliases=['name']),
- release_namespace=dict(type='str', required=True, aliases=['namespace']),
- release_state=dict(default='present', choices=['present', 'absent'], aliases=['state']),
- release_values=dict(type='dict', default={}, aliases=['values']),
- values_files=dict(type='list', default=[], elements='str'),
- update_repo_cache=dict(type='bool', default=False),
-
+ binary_path=dict(type="path"),
+ chart_ref=dict(type="path"),
+ chart_repo_url=dict(type="str"),
+ chart_version=dict(type="str"),
+ release_name=dict(type="str", required=True, aliases=["name"]),
+ release_namespace=dict(type="str", required=True, aliases=["namespace"]),
+ release_state=dict(
+ default="present", choices=["present", "absent"], aliases=["state"]
+ ),
+ release_values=dict(type="dict", default={}, aliases=["values"]),
+ values_files=dict(type="list", default=[], elements="str"),
+ update_repo_cache=dict(type="bool", default=False),
# Helm options
- disable_hook=dict(type='bool', default=False),
- force=dict(type='bool', default=False),
- context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
- kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
- purge=dict(type='bool', default=True),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='str'),
- timeout=dict(type='str'),
- atomic=dict(type='bool', default=False),
- create_namespace=dict(type='bool', default=False),
- replace=dict(type='bool', default=False),
- skip_crds=dict(type='bool', default=False),
- history_max=dict(type='int'),
-
+ disable_hook=dict(type="bool", default=False),
+ force=dict(type="bool", default=False),
+ context=dict(
+ type="str",
+ aliases=["kube_context"],
+ fallback=(env_fallback, ["K8S_AUTH_CONTEXT"]),
+ ),
+ kubeconfig=dict(
+ type="path",
+ aliases=["kubeconfig_path"],
+ fallback=(env_fallback, ["K8S_AUTH_KUBECONFIG"]),
+ ),
+ purge=dict(type="bool", default=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="str"),
+ timeout=dict(type="str"),
+ atomic=dict(type="bool", default=False),
+ create_namespace=dict(type="bool", default=False),
+ replace=dict(type="bool", default=False),
+ skip_crds=dict(type="bool", default=False),
+ history_max=dict(type="int"),
# Generic auth key
- host=dict(type='str', fallback=(env_fallback, ['K8S_AUTH_HOST'])),
- ca_cert=dict(type='path', aliases=['ssl_ca_cert'], fallback=(env_fallback, ['K8S_AUTH_SSL_CA_CERT'])),
- validate_certs=dict(type='bool', default=True, aliases=['verify_ssl'], fallback=(env_fallback, ['K8S_AUTH_VERIFY_SSL'])),
- api_key=dict(type='str', no_log=True, fallback=(env_fallback, ['K8S_AUTH_API_KEY']))
+ host=dict(type="str", fallback=(env_fallback, ["K8S_AUTH_HOST"])),
+ ca_cert=dict(
+ type="path",
+ aliases=["ssl_ca_cert"],
+ fallback=(env_fallback, ["K8S_AUTH_SSL_CA_CERT"]),
+ ),
+ validate_certs=dict(
+ type="bool",
+ default=True,
+ aliases=["verify_ssl"],
+ fallback=(env_fallback, ["K8S_AUTH_VERIFY_SSL"]),
+ ),
+ api_key=dict(
+ type="str", no_log=True, fallback=(env_fallback, ["K8S_AUTH_API_KEY"])
+ ),
),
required_if=[
- ('release_state', 'present', ['release_name', 'chart_ref']),
- ('release_state', 'absent', ['release_name'])
+ ("release_state", "present", ["release_name", "chart_ref"]),
+ ("release_state", "absent", ["release_name"]),
],
mutually_exclusive=[
("context", "ca_cert"),
@@ -591,33 +637,33 @@ def main():
changed = False
- bin_path = module.params.get('binary_path')
- chart_ref = module.params.get('chart_ref')
- chart_repo_url = module.params.get('chart_repo_url')
- chart_version = module.params.get('chart_version')
- release_name = module.params.get('release_name')
- release_state = module.params.get('release_state')
- release_values = module.params.get('release_values')
- values_files = module.params.get('values_files')
- update_repo_cache = module.params.get('update_repo_cache')
+ bin_path = module.params.get("binary_path")
+ chart_ref = module.params.get("chart_ref")
+ chart_repo_url = module.params.get("chart_repo_url")
+ chart_version = module.params.get("chart_version")
+ release_name = module.params.get("release_name")
+ release_state = module.params.get("release_state")
+ release_values = module.params.get("release_values")
+ values_files = module.params.get("values_files")
+ update_repo_cache = module.params.get("update_repo_cache")
# Helm options
- disable_hook = module.params.get('disable_hook')
- force = module.params.get('force')
- purge = module.params.get('purge')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- atomic = module.params.get('atomic')
- create_namespace = module.params.get('create_namespace')
- replace = module.params.get('replace')
- skip_crds = module.params.get('skip_crds')
- history_max = module.params.get('history_max')
- timeout = module.params.get('timeout')
+ disable_hook = module.params.get("disable_hook")
+ force = module.params.get("force")
+ purge = module.params.get("purge")
+ wait = module.params.get("wait")
+ wait_timeout = module.params.get("wait_timeout")
+ atomic = module.params.get("atomic")
+ create_namespace = module.params.get("create_namespace")
+ replace = module.params.get("replace")
+ skip_crds = module.params.get("skip_crds")
+ history_max = module.params.get("history_max")
+ timeout = module.params.get("timeout")
if bin_path is not None:
helm_cmd_common = bin_path
else:
- helm_cmd_common = module.get_bin_path('helm', required=True)
+ helm_cmd_common = module.get_bin_path("helm", required=True)
if update_repo_cache:
run_repo_update(module, helm_cmd_common)
@@ -635,11 +681,15 @@ def main():
if wait:
helm_version = get_helm_version(module, helm_cmd_common)
if LooseVersion(helm_version) < LooseVersion("3.7.0"):
- opt_result['warnings'] = []
- opt_result['warnings'].append("helm uninstall support option --wait for helm release >= 3.7.0")
+ opt_result["warnings"] = []
+ opt_result["warnings"].append(
+ "helm uninstall support option --wait for helm release >= 3.7.0"
+ )
wait = False
- helm_cmd = delete(helm_cmd, release_name, purge, disable_hook, wait, wait_timeout)
+ helm_cmd = delete(
+ helm_cmd, release_name, purge, disable_hook, wait, wait_timeout
+ )
changed = True
elif release_state == "present":
@@ -653,54 +703,87 @@ def main():
chart_info = fetch_chart_info(module, helm_cmd, chart_ref)
if release_status is None: # Not installed
- helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout,
- disable_hook, False, values_files=values_files, atomic=atomic,
- create_namespace=create_namespace, replace=replace,
- skip_crds=skip_crds, history_max=history_max, timeout=timeout)
+ helm_cmd = deploy(
+ helm_cmd,
+ release_name,
+ release_values,
+ chart_ref,
+ wait,
+ wait_timeout,
+ disable_hook,
+ False,
+ values_files=values_files,
+ atomic=atomic,
+ create_namespace=create_namespace,
+ replace=replace,
+ skip_crds=skip_crds,
+ history_max=history_max,
+ timeout=timeout,
+ )
changed = True
else:
if has_plugin(helm_cmd_common, "diff") and not chart_repo_url:
- would_change = helmdiff_check(module, helm_cmd_common, release_name, chart_ref,
- release_values, values_files, chart_version, replace)
+ would_change = helmdiff_check(
+ module,
+ helm_cmd_common,
+ release_name,
+ chart_ref,
+ release_values,
+ values_files,
+ chart_version,
+ replace,
+ )
else:
- module.warn("The default idempotency check can fail to report changes in certain cases. "
- "Install helm diff for better results.")
- would_change = default_check(release_status, chart_info, release_values, values_files)
+ module.warn(
+ "The default idempotency check can fail to report changes in certain cases. "
+ "Install helm diff for better results."
+ )
+ would_change = default_check(
+ release_status, chart_info, release_values, values_files
+ )
if force or would_change:
- helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout,
- disable_hook, force, values_files=values_files, atomic=atomic,
- create_namespace=create_namespace, replace=replace,
- skip_crds=skip_crds, history_max=history_max, timeout=timeout)
+ helm_cmd = deploy(
+ helm_cmd,
+ release_name,
+ release_values,
+ chart_ref,
+ wait,
+ wait_timeout,
+ disable_hook,
+ force,
+ values_files=values_files,
+ atomic=atomic,
+ create_namespace=create_namespace,
+ replace=replace,
+ skip_crds=skip_crds,
+ history_max=history_max,
+ timeout=timeout,
+ )
changed = True
if module.check_mode:
- check_status = {
- 'values': {
- "current": {},
- "declared": {},
- }
- }
+ check_status = {"values": {"current": {}, "declared": {}}}
if release_status:
- check_status['values']['current'] = release_status['values']
- check_status['values']['declared'] = release_status
+ check_status["values"]["current"] = release_status["values"]
+ check_status["values"]["declared"] = release_status
module.exit_json(
changed=changed,
command=helm_cmd,
status=check_status,
- stdout='',
- stderr='',
+ stdout="",
+ stderr="",
**opt_result,
)
elif not changed:
module.exit_json(
changed=False,
status=release_status,
- stdout='',
- stderr='',
+ stdout="",
+ stderr="",
command=helm_cmd,
**opt_result,
)
@@ -717,5 +800,5 @@ def main():
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/helm_info.py b/plugins/modules/helm_info.py
index 02d735239e..47cc6b4a15 100644
--- a/plugins/modules/helm_info.py
+++ b/plugins/modules/helm_info.py
@@ -4,10 +4,11 @@
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: helm_info
@@ -40,16 +41,16 @@
aliases: [ namespace ]
extends_documentation_fragment:
- kubernetes.core.helm_common_options
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Deploy latest version of Grafana chart inside monitoring namespace
kubernetes.core.helm_info:
name: test
release_namespace: monitoring
-'''
+"""
-RETURN = r'''
+RETURN = r"""
status:
type: complex
description: A dictionary of status output
@@ -87,26 +88,30 @@
type: str
returned: always
description: Dict of Values used to deploy
-'''
+"""
import traceback
try:
import yaml
+
IMP_YAML = True
except ImportError:
IMP_YAML_ERR = traceback.format_exc()
IMP_YAML = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
-from ansible_collections.kubernetes.core.plugins.module_utils.helm import run_helm, get_values
+from ansible_collections.kubernetes.core.plugins.module_utils.helm import (
+ run_helm,
+ get_values,
+)
# Get Release from all deployed releases
def get_release(state, release_name):
if state is not None:
for release in state:
- if release['name'] == release_name:
+ if release["name"] == release_name:
return release
return None
@@ -119,8 +124,10 @@ def get_release_status(module, command, release_name):
if rc != 0:
module.fail_json(
- msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
- command=list_command
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(
+ rc, out, err
+ ),
+ command=list_command,
)
release = get_release(yaml.safe_load(out), release_name)
@@ -128,7 +135,7 @@ def get_release_status(module, command, release_name):
if release is None: # not install
return None
- release['values'] = get_values(module, command, release_name)
+ release["values"] = get_values(module, command, release_name)
return release
@@ -138,25 +145,42 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- release_name=dict(type='str', required=True, aliases=['name']),
- release_namespace=dict(type='str', required=True, aliases=['namespace']),
-
+ binary_path=dict(type="path"),
+ release_name=dict(type="str", required=True, aliases=["name"]),
+ release_namespace=dict(type="str", required=True, aliases=["namespace"]),
# Helm options
- context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
- kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
-
+ context=dict(
+ type="str",
+ aliases=["kube_context"],
+ fallback=(env_fallback, ["K8S_AUTH_CONTEXT"]),
+ ),
+ kubeconfig=dict(
+ type="path",
+ aliases=["kubeconfig_path"],
+ fallback=(env_fallback, ["K8S_AUTH_KUBECONFIG"]),
+ ),
# Generic auth key
- host=dict(type='str', fallback=(env_fallback, ['K8S_AUTH_HOST'])),
- ca_cert=dict(type='path', aliases=['ssl_ca_cert'], fallback=(env_fallback, ['K8S_AUTH_SSL_CA_CERT'])),
- validate_certs=dict(type='bool', default=True, aliases=['verify_ssl'], fallback=(env_fallback, ['K8S_AUTH_VERIFY_SSL'])),
- api_key=dict(type='str', no_log=True, fallback=(env_fallback, ['K8S_AUTH_API_KEY']))
+ host=dict(type="str", fallback=(env_fallback, ["K8S_AUTH_HOST"])),
+ ca_cert=dict(
+ type="path",
+ aliases=["ssl_ca_cert"],
+ fallback=(env_fallback, ["K8S_AUTH_SSL_CA_CERT"]),
+ ),
+ validate_certs=dict(
+ type="bool",
+ default=True,
+ aliases=["verify_ssl"],
+ fallback=(env_fallback, ["K8S_AUTH_VERIFY_SSL"]),
+ ),
+ api_key=dict(
+ type="str", no_log=True, fallback=(env_fallback, ["K8S_AUTH_API_KEY"])
+ ),
),
mutually_exclusive=[
("context", "ca_cert"),
("context", "validate_certs"),
("kubeconfig", "ca_cert"),
- ("kubeconfig", "validate_certs")
+ ("kubeconfig", "validate_certs"),
],
supports_check_mode=True,
)
@@ -164,13 +188,13 @@ def main():
if not IMP_YAML:
module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
- bin_path = module.params.get('binary_path')
- release_name = module.params.get('release_name')
+ bin_path = module.params.get("binary_path")
+ release_name = module.params.get("release_name")
if bin_path is not None:
helm_cmd_common = bin_path
else:
- helm_cmd_common = module.get_bin_path('helm', required=True)
+ helm_cmd_common = module.get_bin_path("helm", required=True)
release_status = get_release_status(module, helm_cmd_common, release_name)
@@ -180,5 +204,5 @@ def main():
module.exit_json(changed=False)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/helm_plugin.py b/plugins/modules/helm_plugin.py
index f814968742..bb97222f2b 100644
--- a/plugins/modules/helm_plugin.py
+++ b/plugins/modules/helm_plugin.py
@@ -8,7 +8,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: helm_plugin
short_description: Manage Helm plugins
@@ -50,9 +50,9 @@
version_added: "2.3.0"
extends_documentation_fragment:
- kubernetes.core.helm_common_options
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Install Helm env plugin
kubernetes.core.helm_plugin:
plugin_path: https://github.com/adamreese/helm-env
@@ -78,9 +78,9 @@
kubernetes.core.helm_plugin:
plugin_name: secrets
state: latest
-'''
+"""
-RETURN = r'''
+RETURN = r"""
stdout:
type: str
description: Full `helm` command stdout, in case you want to display it or examine the event log
@@ -106,33 +106,53 @@
description: Helm plugin command return code
returned: always
sample: 1
-'''
+"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible_collections.kubernetes.core.plugins.module_utils.helm import (
run_helm,
get_helm_plugin_list,
- parse_helm_plugin_list
+ parse_helm_plugin_list,
)
def main():
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- state=dict(type='str', default='present', choices=['present', 'absent', 'latest']),
- plugin_path=dict(type='str',),
- plugin_name=dict(type='str',),
- plugin_version=dict(type='str',),
+ binary_path=dict(type="path"),
+ state=dict(
+ type="str", default="present", choices=["present", "absent", "latest"]
+ ),
+ plugin_path=dict(type="str",),
+ plugin_name=dict(type="str",),
+ plugin_version=dict(type="str",),
# Helm options
- context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
- kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
-
+ context=dict(
+ type="str",
+ aliases=["kube_context"],
+ fallback=(env_fallback, ["K8S_AUTH_CONTEXT"]),
+ ),
+ kubeconfig=dict(
+ type="path",
+ aliases=["kubeconfig_path"],
+ fallback=(env_fallback, ["K8S_AUTH_KUBECONFIG"]),
+ ),
# Generic auth key
- host=dict(type='str', fallback=(env_fallback, ['K8S_AUTH_HOST'])),
- ca_cert=dict(type='path', aliases=['ssl_ca_cert'], fallback=(env_fallback, ['K8S_AUTH_SSL_CA_CERT'])),
- validate_certs=dict(type='bool', default=True, aliases=['verify_ssl'], fallback=(env_fallback, ['K8S_AUTH_VERIFY_SSL'])),
- api_key=dict(type='str', no_log=True, fallback=(env_fallback, ['K8S_AUTH_API_KEY']))
+ host=dict(type="str", fallback=(env_fallback, ["K8S_AUTH_HOST"])),
+ ca_cert=dict(
+ type="path",
+ aliases=["ssl_ca_cert"],
+ fallback=(env_fallback, ["K8S_AUTH_SSL_CA_CERT"]),
+ ),
+ validate_certs=dict(
+ type="bool",
+ default=True,
+ aliases=["verify_ssl"],
+ fallback=(env_fallback, ["K8S_AUTH_VERIFY_SSL"]),
+ ),
+ api_key=dict(
+ type="str", no_log=True, fallback=(env_fallback, ["K8S_AUTH_API_KEY"])
+ ),
),
supports_check_mode=True,
required_if=[
@@ -141,37 +161,37 @@ def main():
("state", "latest", ("plugin_name",)),
],
mutually_exclusive=[
- ('plugin_name', 'plugin_path'),
+ ("plugin_name", "plugin_path"),
("context", "ca_cert"),
("context", "validate_certs"),
("kubeconfig", "ca_cert"),
- ("kubeconfig", "validate_certs")
+ ("kubeconfig", "validate_certs"),
],
)
- bin_path = module.params.get('binary_path')
- state = module.params.get('state')
+ bin_path = module.params.get("binary_path")
+ state = module.params.get("state")
if bin_path is not None:
helm_cmd_common = bin_path
else:
- helm_cmd_common = 'helm'
+ helm_cmd_common = "helm"
helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)
helm_cmd_common += " plugin"
- if state == 'present':
- helm_cmd_common += " install %s" % module.params.get('plugin_path')
- plugin_version = module.params.get('plugin_version')
+ if state == "present":
+ helm_cmd_common += " install %s" % module.params.get("plugin_path")
+ plugin_version = module.params.get("plugin_version")
if plugin_version is not None:
helm_cmd_common += " --version=%s" % plugin_version
if not module.check_mode:
rc, out, err = run_helm(module, helm_cmd_common, fails_on_error=False)
else:
- rc, out, err = (0, '', '')
+ rc, out, err = (0, "", "")
- if rc == 1 and 'plugin already exists' in err:
+ if rc == 1 and "plugin already exists" in err:
module.exit_json(
failed=False,
changed=False,
@@ -179,7 +199,7 @@ def main():
command=helm_cmd_common,
stdout=out,
stderr=err,
- rc=rc
+ rc=rc,
)
elif rc == 0:
module.exit_json(
@@ -199,8 +219,8 @@ def main():
stderr=err,
rc=rc,
)
- elif state == 'absent':
- plugin_name = module.params.get('plugin_name')
+ elif state == "absent":
+ plugin_name = module.params.get("plugin_name")
rc, output, err = get_helm_plugin_list(module, helm_bin=helm_cmd_common)
out = parse_helm_plugin_list(module, output=output.splitlines())
@@ -212,7 +232,7 @@ def main():
command=helm_cmd_common + " list",
stdout=output,
stderr=err,
- rc=rc
+ rc=rc,
)
found = False
@@ -228,14 +248,14 @@ def main():
command=helm_cmd_common + " list",
stdout=output,
stderr=err,
- rc=rc
+ rc=rc,
)
helm_uninstall_cmd = "%s uninstall %s" % (helm_cmd_common, plugin_name)
if not module.check_mode:
rc, out, err = run_helm(module, helm_uninstall_cmd, fails_on_error=False)
else:
- rc, out, err = (0, '', '')
+ rc, out, err = (0, "", "")
if rc == 0:
module.exit_json(
@@ -244,7 +264,7 @@ def main():
command=helm_uninstall_cmd,
stdout=out,
stderr=err,
- rc=rc
+ rc=rc,
)
module.fail_json(
msg="Failed to get Helm plugin uninstall",
@@ -253,8 +273,8 @@ def main():
stderr=err,
rc=rc,
)
- elif state == 'latest':
- plugin_name = module.params.get('plugin_name')
+ elif state == "latest":
+ plugin_name = module.params.get("plugin_name")
rc, output, err = get_helm_plugin_list(module, helm_bin=helm_cmd_common)
out = parse_helm_plugin_list(module, output=output.splitlines())
@@ -266,7 +286,7 @@ def main():
command=helm_cmd_common + " list",
stdout=output,
stderr=err,
- rc=rc
+ rc=rc,
)
found = False
@@ -282,14 +302,14 @@ def main():
command=helm_cmd_common + " list",
stdout=output,
stderr=err,
- rc=rc
+ rc=rc,
)
helm_update_cmd = "%s update %s" % (helm_cmd_common, plugin_name)
if not module.check_mode:
rc, out, err = run_helm(module, helm_update_cmd, fails_on_error=False)
else:
- rc, out, err = (0, '', '')
+ rc, out, err = (0, "", "")
if rc == 0:
module.exit_json(
@@ -298,7 +318,7 @@ def main():
command=helm_update_cmd,
stdout=out,
stderr=err,
- rc=rc
+ rc=rc,
)
module.fail_json(
msg="Failed to get Helm plugin update",
@@ -309,5 +329,5 @@ def main():
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/helm_plugin_info.py b/plugins/modules/helm_plugin_info.py
index 9d43c594e8..bf14e31111 100644
--- a/plugins/modules/helm_plugin_info.py
+++ b/plugins/modules/helm_plugin_info.py
@@ -8,7 +8,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: helm_plugin_info
short_description: Gather information about Helm plugins
@@ -27,18 +27,18 @@
type: str
extends_documentation_fragment:
- kubernetes.core.helm_common_options
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Gather Helm plugin info
kubernetes.core.helm_plugin_info:
- name: Gather Helm env plugin info
kubernetes.core.helm_plugin_info:
plugin_name: env
-'''
+"""
-RETURN = r'''
+RETURN = r"""
stdout:
type: str
description: Full `helm` command stdout, in case you want to display it or examine the event log
@@ -68,7 +68,7 @@
description: Helm plugin command return code
returned: always
sample: 1
-'''
+"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible_collections.kubernetes.core.plugins.module_utils.helm import (
@@ -80,39 +80,57 @@
def main():
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- plugin_name=dict(type='str',),
+ binary_path=dict(type="path"),
+ plugin_name=dict(type="str",),
# Helm options
- context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
- kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
-
+ context=dict(
+ type="str",
+ aliases=["kube_context"],
+ fallback=(env_fallback, ["K8S_AUTH_CONTEXT"]),
+ ),
+ kubeconfig=dict(
+ type="path",
+ aliases=["kubeconfig_path"],
+ fallback=(env_fallback, ["K8S_AUTH_KUBECONFIG"]),
+ ),
# Generic auth key
- host=dict(type='str', fallback=(env_fallback, ['K8S_AUTH_HOST'])),
- ca_cert=dict(type='path', aliases=['ssl_ca_cert'], fallback=(env_fallback, ['K8S_AUTH_SSL_CA_CERT'])),
- validate_certs=dict(type='bool', default=True, aliases=['verify_ssl'], fallback=(env_fallback, ['K8S_AUTH_VERIFY_SSL'])),
- api_key=dict(type='str', no_log=True, fallback=(env_fallback, ['K8S_AUTH_API_KEY']))
+ host=dict(type="str", fallback=(env_fallback, ["K8S_AUTH_HOST"])),
+ ca_cert=dict(
+ type="path",
+ aliases=["ssl_ca_cert"],
+ fallback=(env_fallback, ["K8S_AUTH_SSL_CA_CERT"]),
+ ),
+ validate_certs=dict(
+ type="bool",
+ default=True,
+ aliases=["verify_ssl"],
+ fallback=(env_fallback, ["K8S_AUTH_VERIFY_SSL"]),
+ ),
+ api_key=dict(
+ type="str", no_log=True, fallback=(env_fallback, ["K8S_AUTH_API_KEY"])
+ ),
),
mutually_exclusive=[
("context", "ca_cert"),
("context", "validate_certs"),
("kubeconfig", "ca_cert"),
- ("kubeconfig", "validate_certs")
+ ("kubeconfig", "validate_certs"),
],
supports_check_mode=True,
)
- bin_path = module.params.get('binary_path')
+ bin_path = module.params.get("binary_path")
if bin_path is not None:
helm_cmd_common = bin_path
else:
- helm_cmd_common = 'helm'
+ helm_cmd_common = "helm"
helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)
helm_cmd_common += " plugin"
- plugin_name = module.params.get('plugin_name')
+ plugin_name = module.params.get("plugin_name")
plugin_list = []
@@ -123,21 +141,13 @@ def main():
for line in out:
if plugin_name is None:
plugin_list.append(
- {
- "name": line[0],
- "version": line[1],
- "description": line[2],
- }
+ {"name": line[0], "version": line[1], "description": line[2]}
)
continue
if plugin_name == line[0]:
plugin_list.append(
- {
- "name": line[0],
- "version": line[1],
- "description": line[2],
- }
+ {"name": line[0], "version": line[1], "description": line[2]}
)
break
@@ -151,5 +161,5 @@ def main():
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/helm_repository.py b/plugins/modules/helm_repository.py
index f125209afc..72045da962 100644
--- a/plugins/modules/helm_repository.py
+++ b/plugins/modules/helm_repository.py
@@ -4,10 +4,11 @@
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: helm_repository
@@ -64,9 +65,9 @@
default: present
aliases: [ state ]
type: str
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Add a repository
kubernetes.core.helm_repository:
name: stable
@@ -76,9 +77,9 @@
kubernetes.core.helm_repository:
name: redhat-charts
repo_url: https://redhat-developer.github.com/redhat-helm-charts
-'''
+"""
-RETURN = r'''
+RETURN = r"""
stdout:
type: str
description: Full `helm` command stdout, in case you want to display it or examine the event log
@@ -109,12 +110,13 @@
description: Error message returned by `helm` command
returned: on failure
sample: 'Repository already have a repository named bitnami'
-'''
+"""
import traceback
try:
import yaml
+
IMP_YAML = True
except ImportError:
IMP_YAML_ERR = traceback.format_exc()
@@ -128,7 +130,7 @@
def get_repository(state, repo_name):
if state is not None:
for repository in state:
- if repository['name'] == repo_name:
+ if repository["name"] == repo_name:
return repository
return None
@@ -144,15 +146,19 @@ def get_repository_status(module, command, repository_name):
return None
elif rc != 0:
module.fail_json(
- msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
- command=list_command
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(
+ rc, out, err
+ ),
+ command=list_command,
)
return get_repository(yaml.safe_load(out), repository_name)
# Install repository
-def install_repository(command, repository_name, repository_url, repository_username, repository_password):
+def install_repository(
+ command, repository_name, repository_url, repository_username, repository_password
+):
install_command = command + " repo add " + repository_name + " " + repository_url
if repository_username is not None and repository_password is not None:
@@ -174,19 +180,17 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- repo_name=dict(type='str', aliases=['name'], required=True),
- repo_url=dict(type='str', aliases=['url']),
- repo_username=dict(type='str', aliases=['username']),
- repo_password=dict(type='str', aliases=['password'], no_log=True),
- repo_state=dict(default='present', choices=['present', 'absent'], aliases=['state']),
+ binary_path=dict(type="path"),
+ repo_name=dict(type="str", aliases=["name"], required=True),
+ repo_url=dict(type="str", aliases=["url"]),
+ repo_username=dict(type="str", aliases=["username"]),
+ repo_password=dict(type="str", aliases=["password"], no_log=True),
+ repo_state=dict(
+ default="present", choices=["present", "absent"], aliases=["state"]
+ ),
),
- required_together=[
- ['repo_username', 'repo_password']
- ],
- required_if=[
- ('repo_state', 'present', ['repo_url']),
- ],
+ required_together=[["repo_username", "repo_password"]],
+ required_if=[("repo_state", "present", ["repo_url"])],
supports_check_mode=True,
)
@@ -195,17 +199,17 @@ def main():
changed = False
- bin_path = module.params.get('binary_path')
- repo_name = module.params.get('repo_name')
- repo_url = module.params.get('repo_url')
- repo_username = module.params.get('repo_username')
- repo_password = module.params.get('repo_password')
- repo_state = module.params.get('repo_state')
+ bin_path = module.params.get("binary_path")
+ repo_name = module.params.get("repo_name")
+ repo_url = module.params.get("repo_url")
+ repo_username = module.params.get("repo_username")
+ repo_password = module.params.get("repo_password")
+ repo_state = module.params.get("repo_state")
if bin_path is not None:
helm_cmd = bin_path
else:
- helm_cmd = module.get_bin_path('helm', required=True)
+ helm_cmd = module.get_bin_path("helm", required=True)
repository_status = get_repository_status(module, helm_cmd, repo_name)
@@ -214,10 +218,14 @@ def main():
changed = True
elif repo_state == "present":
if repository_status is None:
- helm_cmd = install_repository(helm_cmd, repo_name, repo_url, repo_username, repo_password)
+ helm_cmd = install_repository(
+ helm_cmd, repo_name, repo_url, repo_username, repo_password
+ )
changed = True
- elif repository_status['url'] != repo_url:
- module.fail_json(msg="Repository already have a repository named {0}".format(repo_name))
+ elif repository_status["url"] != repo_url:
+ module.fail_json(
+ msg="Repository already have a repository named {0}".format(repo_name)
+ )
if module.check_mode:
module.exit_json(changed=changed)
@@ -227,16 +235,18 @@ def main():
rc, out, err = run_helm(module, helm_cmd)
if repo_password is not None:
- helm_cmd = helm_cmd.replace(repo_password, '******')
+ helm_cmd = helm_cmd.replace(repo_password, "******")
if rc != 0:
module.fail_json(
- msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
- command=helm_cmd
+ msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(
+ rc, out, err
+ ),
+ command=helm_cmd,
)
module.exit_json(changed=changed, stdout=out, stderr=err, command=helm_cmd)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/helm_template.py b/plugins/modules/helm_template.py
index 6135dfb31e..d0df4252d5 100644
--- a/plugins/modules/helm_template.py
+++ b/plugins/modules/helm_template.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: helm_template
@@ -79,9 +79,9 @@
- Run C(helm repo update) before the operation. Can be run as part of the template generation or as a separate step.
default: false
type: bool
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Render templates to specified directory
kubernetes.core.helm_template:
chart_ref: stable/prometheus
@@ -96,9 +96,9 @@
copy:
dest: myfile.yaml
content: "{{ result.stdout }}"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
stdout:
type: str
description: Full C(helm) command stdout. If no I(output_dir) has been provided this will contain the rendered templates as concatenated yaml documents.
@@ -114,13 +114,14 @@
description: Full C(helm) command run by this module, in case you want to re-run the command outside the module or debug a problem.
returned: always
sample: helm template --output-dir mychart nginx-stable/nginx-ingress
-'''
+"""
import tempfile
import traceback
try:
import yaml
+
IMP_YAML = True
except ImportError:
IMP_YAML_ERR = traceback.format_exc()
@@ -130,8 +131,16 @@
from ansible_collections.kubernetes.core.plugins.module_utils.helm import run_helm
-def template(cmd, chart_ref, chart_repo_url=None, chart_version=None, output_dir=None,
- release_values=None, values_files=None, include_crds=False):
+def template(
+ cmd,
+ chart_ref,
+ chart_repo_url=None,
+ chart_version=None,
+ output_dir=None,
+ release_values=None,
+ values_files=None,
+ include_crds=False,
+):
cmd += " template " + chart_ref
if chart_repo_url:
@@ -144,8 +153,8 @@ def template(cmd, chart_ref, chart_repo_url=None, chart_version=None, output_dir
cmd += " --output-dir=" + output_dir
if release_values:
- fd, path = tempfile.mkstemp(suffix='.yml')
- with open(path, 'w') as yaml_file:
+ fd, path = tempfile.mkstemp(suffix=".yml")
+ with open(path, "w") as yaml_file:
yaml.dump(release_values, yaml_file, default_flow_style=False)
cmd += " -f=" + path
@@ -162,43 +171,49 @@ def template(cmd, chart_ref, chart_repo_url=None, chart_version=None, output_dir
def main():
module = AnsibleModule(
argument_spec=dict(
- binary_path=dict(type='path'),
- chart_ref=dict(type='path', required=True),
- chart_repo_url=dict(type='str'),
- chart_version=dict(type='str'),
- include_crds=dict(type='bool', default=False),
- output_dir=dict(type='path'),
- release_values=dict(type='dict', default={}, aliases=['values']),
- values_files=dict(type='list', default=[], elements='str'),
- update_repo_cache=dict(type='bool', default=False)
+ binary_path=dict(type="path"),
+ chart_ref=dict(type="path", required=True),
+ chart_repo_url=dict(type="str"),
+ chart_version=dict(type="str"),
+ include_crds=dict(type="bool", default=False),
+ output_dir=dict(type="path"),
+ release_values=dict(type="dict", default={}, aliases=["values"]),
+ values_files=dict(type="list", default=[], elements="str"),
+ update_repo_cache=dict(type="bool", default=False),
),
- supports_check_mode=True
+ supports_check_mode=True,
)
check_mode = module.check_mode
- bin_path = module.params.get('binary_path')
- chart_ref = module.params.get('chart_ref')
- chart_repo_url = module.params.get('chart_repo_url')
- chart_version = module.params.get('chart_version')
- include_crds = module.params.get('include_crds')
- output_dir = module.params.get('output_dir')
- release_values = module.params.get('release_values')
- values_files = module.params.get('values_files')
- update_repo_cache = module.params.get('update_repo_cache')
+ bin_path = module.params.get("binary_path")
+ chart_ref = module.params.get("chart_ref")
+ chart_repo_url = module.params.get("chart_repo_url")
+ chart_version = module.params.get("chart_version")
+ include_crds = module.params.get("include_crds")
+ output_dir = module.params.get("output_dir")
+ release_values = module.params.get("release_values")
+ values_files = module.params.get("values_files")
+ update_repo_cache = module.params.get("update_repo_cache")
if not IMP_YAML:
module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
- helm_cmd = bin_path or module.get_bin_path('helm', required=True)
+ helm_cmd = bin_path or module.get_bin_path("helm", required=True)
if update_repo_cache:
update_cmd = helm_cmd + " repo update"
run_helm(module, update_cmd)
- tmpl_cmd = template(helm_cmd, chart_ref, chart_repo_url=chart_repo_url,
- chart_version=chart_version, output_dir=output_dir,
- release_values=release_values, values_files=values_files,
- include_crds=include_crds)
+ tmpl_cmd = template(
+ helm_cmd,
+ chart_ref,
+ chart_repo_url=chart_repo_url,
+ chart_version=chart_version,
+ output_dir=output_dir,
+ release_values=release_values,
+ values_files=values_files,
+ include_crds=include_crds,
+ )
if not check_mode:
rc, out, err = run_helm(module, tmpl_cmd)
@@ -207,14 +222,9 @@ def main():
rc = 0
module.exit_json(
- failed=False,
- changed=True,
- command=tmpl_cmd,
- stdout=out,
- stderr=err,
- rc=rc
+ failed=False, changed=True, command=tmpl_cmd, stdout=out, stderr=err, rc=rc
)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s.py b/plugins/modules/k8s.py
index ced9c016cc..e805bc58e2 100644
--- a/plugins/modules/k8s.py
+++ b/plugins/modules/k8s.py
@@ -10,7 +10,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s
@@ -158,9 +158,9 @@
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
- "jsonpatch"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create a k8s namespace
kubernetes.core.k8s:
name: testing
@@ -302,9 +302,9 @@
- name: py
image: python:3.7-alpine
imagePullPolicy: IfNotPresent
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
@@ -344,20 +344,27 @@
description: error while trying to create/delete the object.
returned: error
type: complex
-'''
+"""
import copy
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
- AUTH_ARG_SPEC, WAIT_ARG_SPEC, NAME_ARG_SPEC, RESOURCE_ARG_SPEC, DELETE_OPTS_ARG_SPEC)
+ AUTH_ARG_SPEC,
+ WAIT_ARG_SPEC,
+ NAME_ARG_SPEC,
+ RESOURCE_ARG_SPEC,
+ DELETE_OPTS_ARG_SPEC,
+)
def validate_spec():
return dict(
- fail_on_error=dict(type='bool'),
+ fail_on_error=dict(type="bool"),
version=dict(),
- strict=dict(type='bool', default=True)
+ strict=dict(type="bool", default=True),
)
@@ -366,17 +373,23 @@ def argspec():
argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC))
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec.update(copy.deepcopy(WAIT_ARG_SPEC))
- argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge'])
- argument_spec['validate'] = dict(type='dict', default=None, options=validate_spec())
- argument_spec['append_hash'] = dict(type='bool', default=False)
- argument_spec['apply'] = dict(type='bool', default=False)
- argument_spec['template'] = dict(type='raw', default=None)
- argument_spec['delete_options'] = dict(type='dict', default=None, options=copy.deepcopy(DELETE_OPTS_ARG_SPEC))
- argument_spec['continue_on_error'] = dict(type='bool', default=False)
- argument_spec['state'] = dict(default='present', choices=['present', 'absent', 'patched'])
- argument_spec['force'] = dict(type='bool', default=False)
- argument_spec['label_selectors'] = dict(type='list', elements='str')
- argument_spec['generate_name'] = dict()
+ argument_spec["merge_type"] = dict(
+ type="list", elements="str", choices=["json", "merge", "strategic-merge"]
+ )
+ argument_spec["validate"] = dict(type="dict", default=None, options=validate_spec())
+ argument_spec["append_hash"] = dict(type="bool", default=False)
+ argument_spec["apply"] = dict(type="bool", default=False)
+ argument_spec["template"] = dict(type="raw", default=None)
+ argument_spec["delete_options"] = dict(
+ type="dict", default=None, options=copy.deepcopy(DELETE_OPTS_ARG_SPEC)
+ )
+ argument_spec["continue_on_error"] = dict(type="bool", default=False)
+ argument_spec["state"] = dict(
+ default="present", choices=["present", "absent", "patched"]
+ )
+ argument_spec["force"] = dict(type="bool", default=False)
+ argument_spec["label_selectors"] = dict(type="list", elements="str")
+ argument_spec["generate_name"] = dict()
return argument_spec
@@ -392,11 +405,11 @@ def execute_module(module, k8s_ansible_mixin):
k8s_ansible_mixin.warn = k8s_ansible_mixin.module.warn
k8s_ansible_mixin.warnings = []
- k8s_ansible_mixin.kind = k8s_ansible_mixin.params.get('kind')
- k8s_ansible_mixin.api_version = k8s_ansible_mixin.params.get('api_version')
- k8s_ansible_mixin.name = k8s_ansible_mixin.params.get('name')
- k8s_ansible_mixin.generate_name = k8s_ansible_mixin.params.get('generate_name')
- k8s_ansible_mixin.namespace = k8s_ansible_mixin.params.get('namespace')
+ k8s_ansible_mixin.kind = k8s_ansible_mixin.params.get("kind")
+ k8s_ansible_mixin.api_version = k8s_ansible_mixin.params.get("api_version")
+ k8s_ansible_mixin.name = k8s_ansible_mixin.params.get("name")
+ k8s_ansible_mixin.generate_name = k8s_ansible_mixin.params.get("generate_name")
+ k8s_ansible_mixin.namespace = k8s_ansible_mixin.params.get("namespace")
k8s_ansible_mixin.check_library_version()
k8s_ansible_mixin.set_resource_definitions(module)
@@ -405,20 +418,26 @@ def execute_module(module, k8s_ansible_mixin):
def main():
mutually_exclusive = [
- ('resource_definition', 'src'),
- ('merge_type', 'apply'),
- ('template', 'resource_definition'),
- ('template', 'src'),
- ('name', 'generate_name'),
+ ("resource_definition", "src"),
+ ("merge_type", "apply"),
+ ("template", "resource_definition"),
+ ("template", "src"),
+ ("name", "generate_name"),
]
- module = AnsibleModule(argument_spec=argspec(), mutually_exclusive=mutually_exclusive, supports_check_mode=True)
+ module = AnsibleModule(
+ argument_spec=argspec(),
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_cluster_info.py b/plugins/modules/k8s_cluster_info.py
index b17f07bec0..e84b9eb748 100644
--- a/plugins/modules/k8s_cluster_info.py
+++ b/plugins/modules/k8s_cluster_info.py
@@ -4,10 +4,11 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_cluster_info
version_added: "0.11.1"
@@ -36,9 +37,9 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Get Cluster information
kubernetes.core.k8s_cluster_info:
register: api_status
@@ -47,9 +48,9 @@
kubernetes.core.k8s_cluster_info:
invalidate_cache: False
register: api_status
-'''
+"""
-RETURN = r'''
+RETURN = r"""
connection:
description:
- Connection information
@@ -136,7 +137,7 @@
description: Resource singular name
returned: success
type: str
-'''
+"""
import copy
@@ -145,7 +146,10 @@
HAS_K8S = False
try:
- from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import ResourceList
+ from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import (
+ ResourceList,
+ )
+
HAS_K8S = True
except ImportError as e:
K8S_IMP_ERR = e
@@ -154,12 +158,18 @@
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.parsing.convert_bool import boolean
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (AUTH_ARG_SPEC)
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+)
def execute_module(module, client):
- invalidate_cache = boolean(module.params.get('invalidate_cache', True), strict=False)
+ invalidate_cache = boolean(
+ module.params.get("invalidate_cache", True), strict=False
+ )
if invalidate_cache:
client.resources.invalidate_cache()
results = defaultdict(dict)
@@ -167,47 +177,60 @@ def execute_module(module, client):
resource = resource[0]
if isinstance(resource, ResourceList):
continue
- key = resource.group_version if resource.group == '' else '/'.join([resource.group, resource.group_version.split('/')[-1]])
+ key = (
+ resource.group_version
+ if resource.group == ""
+ else "/".join([resource.group, resource.group_version.split("/")[-1]])
+ )
results[key][resource.kind] = {
- 'categories': resource.categories if resource.categories else [],
- 'name': resource.name,
- 'namespaced': resource.namespaced,
- 'preferred': resource.preferred,
- 'short_names': resource.short_names if resource.short_names else [],
- 'singular_name': resource.singular_name,
+ "categories": resource.categories if resource.categories else [],
+ "name": resource.name,
+ "namespaced": resource.namespaced,
+ "preferred": resource.preferred,
+ "short_names": resource.short_names if resource.short_names else [],
+ "singular_name": resource.singular_name,
}
configuration = client.configuration
connection = {
- 'cert_file': configuration.cert_file,
- 'host': configuration.host,
- 'password': configuration.password,
- 'proxy': configuration.proxy,
- 'ssl_ca_cert': configuration.ssl_ca_cert,
- 'username': configuration.username,
- 'verify_ssl': configuration.verify_ssl,
+ "cert_file": configuration.cert_file,
+ "host": configuration.host,
+ "password": configuration.password,
+ "proxy": configuration.proxy,
+ "ssl_ca_cert": configuration.ssl_ca_cert,
+ "username": configuration.username,
+ "verify_ssl": configuration.verify_ssl,
}
from kubernetes import __version__ as version
+
version_info = {
- 'client': version,
- 'server': client.version,
+ "client": version,
+ "server": client.version,
}
- module.exit_json(changed=False, apis=results, connection=connection, version=version_info)
+ module.exit_json(
+ changed=False, apis=results, connection=connection, version=version_info
+ )
def argspec():
spec = copy.deepcopy(AUTH_ARG_SPEC)
- spec['invalidate_cache'] = dict(type='bool', default=True)
+ spec["invalidate_cache"] = dict(type="bool", default=True)
return spec
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
if not HAS_K8S:
- module.fail_json(msg=missing_required_lib('kubernetes'), exception=K8S_IMP_EXC,
- error=to_native(K8S_IMP_ERR))
- from ansible_collections.kubernetes.core.plugins.module_utils.common import get_api_client
+ module.fail_json(
+ msg=missing_required_lib("kubernetes"),
+ exception=K8S_IMP_EXC,
+ error=to_native(K8S_IMP_ERR),
+ )
+ from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ get_api_client,
+ )
+
execute_module(module, client=get_api_client(module=module))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_cp.py b/plugins/modules/k8s_cp.py
index b5491a9d31..74d811c885 100644
--- a/plugins/modules/k8s_cp.py
+++ b/plugins/modules/k8s_cp.py
@@ -4,10 +4,11 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_cp
@@ -78,9 +79,9 @@
notes:
- the tar binary is required on the container when copying from local filesystem to pod.
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# kubectl cp /tmp/foo some-namespace/some-pod:/tmp/bar
- name: Copy /tmp/foo local file to /tmp/bar in a remote pod
kubernetes.core.k8s_cp:
@@ -125,16 +126,16 @@
pod: some-pod
remote_path: /tmp/foo.txt
content: "This content will be copied into remote file"
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- message describing the copy operation successfully done.
returned: success
type: str
-'''
+"""
import copy
import os
@@ -146,13 +147,23 @@
# from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
-from ansible_collections.kubernetes.core.plugins.module_utils.common import K8sAnsibleMixin, get_api_client
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ K8sAnsibleMixin,
+ get_api_client,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+)
try:
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
- from kubernetes.stream.ws_client import STDOUT_CHANNEL, STDERR_CHANNEL, ERROR_CHANNEL, ABNF
+ from kubernetes.stream.ws_client import (
+ STDOUT_CHANNEL,
+ STDERR_CHANNEL,
+ ERROR_CHANNEL,
+ ABNF,
+ )
except ImportError:
pass
@@ -164,22 +175,21 @@
class K8SCopy(metaclass=ABCMeta):
-
def __init__(self, module, client):
self.client = client
self.module = module
self.api_instance = core_v1_api.CoreV1Api(client.client)
- self.local_path = module.params.get('local_path')
- self.name = module.params.get('pod')
- self.namespace = module.params.get('namespace')
- self.remote_path = module.params.get('remote_path')
- self.content = module.params.get('content')
+ self.local_path = module.params.get("local_path")
+ self.name = module.params.get("pod")
+ self.namespace = module.params.get("namespace")
+ self.remote_path = module.params.get("remote_path")
+ self.content = module.params.get("content")
- self.no_preserve = module.params.get('no_preserve')
+ self.no_preserve = module.params.get("no_preserve")
self.container_arg = {}
- if module.params.get('container'):
- self.container_arg['container'] = module.params.get('container')
+ if module.params.get("container"):
+ self.container_arg["container"] = module.params.get("container")
@abstractmethod
def run(self):
@@ -190,6 +200,7 @@ class K8SCopyFromPod(K8SCopy):
"""
Copy files/directory from Pod into local filesystem
"""
+
def __init__(self, module, client):
super(K8SCopyFromPod, self).__init__(module, client)
self.is_remote_path_dir = None
@@ -201,31 +212,48 @@ def list_remote_files(self):
if it is a directory the file list will be updated accordingly
"""
try:
- find_cmd = ['find', self.remote_path, '-type', 'f', '-name', '*']
- response = stream(self.api_instance.connect_get_namespaced_pod_exec,
- self.name,
- self.namespace,
- command=find_cmd,
- stdout=True, stderr=True,
- stdin=False, tty=False,
- _preload_content=False, **self.container_arg)
+ find_cmd = ["find", self.remote_path, "-type", "f", "-name", "*"]
+ response = stream(
+ self.api_instance.connect_get_namespaced_pod_exec,
+ self.name,
+ self.namespace,
+ command=find_cmd,
+ stdout=True,
+ stderr=True,
+ stdin=False,
+ tty=False,
+ _preload_content=False,
+ **self.container_arg
+ )
except Exception as e:
- self.module.fail_json(msg="Failed to execute on pod {0}/{1} due to : {2}".format(self.namespace, self.name, to_native(e)))
+ self.module.fail_json(
+ msg="Failed to execute on pod {0}/{1} due to : {2}".format(
+ self.namespace, self.name, to_native(e)
+ )
+ )
stderr = []
while response.is_open():
response.update(timeout=1)
if response.peek_stdout():
- self.files_to_copy.extend(response.read_stdout().rstrip('\n').split('\n'))
+ self.files_to_copy.extend(
+ response.read_stdout().rstrip("\n").split("\n")
+ )
if response.peek_stderr():
err = response.read_stderr()
if "No such file or directory" in err:
- self.module.fail_json(msg="{0} does not exist in remote pod filesystem".format(self.remote_path))
+ self.module.fail_json(
+ msg="{0} does not exist in remote pod filesystem".format(
+ self.remote_path
+ )
+ )
stderr.append(err)
error = response.read_channel(ERROR_CHANNEL)
response.close()
error = yaml.safe_load(error)
- if error['status'] != 'Success':
- self.module.fail_json(msg="Failed to execute on Pod due to: {0}".format(error))
+ if error["status"] != "Success":
+ self.module.fail_json(
+ msg="Failed to execute on Pod due to: {0}".format(error)
+ )
def read(self):
self.stdout = None
@@ -235,12 +263,15 @@ def read(self):
if not self.response.sock.connected:
self.response._connected = False
else:
- ret, out, err = select((self.response.sock.sock, ), (), (), 0)
+ ret, out, err = select((self.response.sock.sock,), (), (), 0)
if ret:
code, frame = self.response.sock.recv_data_frame(True)
if code == ABNF.OPCODE_CLOSE:
self.response._connected = False
- elif code in (ABNF.OPCODE_BINARY, ABNF.OPCODE_TEXT) and len(frame.data) > 1:
+ elif (
+ code in (ABNF.OPCODE_BINARY, ABNF.OPCODE_TEXT)
+ and len(frame.data) > 1
+ ):
channel = frame.data[0]
content = frame.data[1:]
if content:
@@ -250,7 +281,9 @@ def read(self):
self.stderr = content.decode("utf-8", "replace")
def copy(self):
- is_remote_path_dir = len(self.files_to_copy) > 1 or self.files_to_copy[0] != self.remote_path
+ is_remote_path_dir = (
+ len(self.files_to_copy) > 1 or self.files_to_copy[0] != self.remote_path
+ )
relpath_start = self.remote_path
if is_remote_path_dir and os.path.isdir(self.local_path):
relpath_start = os.path.dirname(self.remote_path)
@@ -258,20 +291,27 @@ def copy(self):
for remote_file in self.files_to_copy:
dest_file = self.local_path
if is_remote_path_dir:
- dest_file = os.path.join(self.local_path, os.path.relpath(remote_file, start=relpath_start))
+ dest_file = os.path.join(
+ self.local_path, os.path.relpath(remote_file, start=relpath_start)
+ )
# create directory to copy file in
os.makedirs(os.path.dirname(dest_file), exist_ok=True)
- pod_command = ['cat', remote_file]
- self.response = stream(self.api_instance.connect_get_namespaced_pod_exec,
- self.name,
- self.namespace,
- command=pod_command,
- stderr=True, stdin=True,
- stdout=True, tty=False,
- _preload_content=False, **self.container_arg)
+ pod_command = ["cat", remote_file]
+ self.response = stream(
+ self.api_instance.connect_get_namespaced_pod_exec,
+ self.name,
+ self.namespace,
+ command=pod_command,
+ stderr=True,
+ stdin=True,
+ stdout=True,
+ tty=False,
+ _preload_content=False,
+ **self.container_arg
+ )
errors = []
- with open(dest_file, 'wb') as fh:
+ with open(dest_file, "wb") as fh:
while self.response._connected:
self.read()
if self.stdout:
@@ -279,35 +319,57 @@ def copy(self):
if self.stderr:
errors.append(self.stderr)
if errors:
- self.module.fail_json(msg="Failed to copy file from Pod: {0}".format(''.join(errors)))
- self.module.exit_json(changed=True, result="{0} successfully copied locally into {1}".format(self.remote_path, self.local_path))
+ self.module.fail_json(
+ msg="Failed to copy file from Pod: {0}".format("".join(errors))
+ )
+ self.module.exit_json(
+ changed=True,
+ result="{0} successfully copied locally into {1}".format(
+ self.remote_path, self.local_path
+ ),
+ )
def run(self):
try:
self.list_remote_files()
if self.files_to_copy == []:
- self.module.exit_json(changed=False, warning="No file found from directory '{0}' into remote Pod.".format(self.remote_path))
+ self.module.exit_json(
+ changed=False,
+ warning="No file found from directory '{0}' into remote Pod.".format(
+ self.remote_path
+ ),
+ )
self.copy()
except Exception as e:
- self.module.fail_json(msg="Failed to copy file/directory from Pod due to: {0}".format(to_native(e)))
+ self.module.fail_json(
+ msg="Failed to copy file/directory from Pod due to: {0}".format(
+ to_native(e)
+ )
+ )
class K8SCopyToPod(K8SCopy):
"""
Copy files/directory from local filesystem into remote Pod
"""
+
def __init__(self, module, client):
super(K8SCopyToPod, self).__init__(module, client)
self.files_to_copy = list()
def run_from_pod(self, command):
- response = stream(self.api_instance.connect_get_namespaced_pod_exec,
- self.name,
- self.namespace,
- command=command,
- stderr=True, stdin=False,
- stdout=True, tty=False,
- _preload_content=False, **self.container_arg)
+ response = stream(
+ self.api_instance.connect_get_namespaced_pod_exec,
+ self.name,
+ self.namespace,
+ command=command,
+ stderr=True,
+ stdin=False,
+ stdout=True,
+ tty=False,
+ _preload_content=False,
+ **self.container_arg
+ )
errors = []
while response.is_open():
response.update(timeout=1)
@@ -317,24 +379,31 @@ def run_from_pod(self, command):
err = response.read_channel(ERROR_CHANNEL)
err = yaml.safe_load(err)
response.close()
- if err['status'] != 'Success':
- self.module.fail_json(msg="Failed to run {0} on Pod.".format(command), errors=errors)
+ if err["status"] != "Success":
+ self.module.fail_json(
+ msg="Failed to run {0} on Pod.".format(command), errors=errors
+ )
def is_remote_path_dir(self):
- pod_command = ['test', '-d', self.remote_path]
- response = stream(self.api_instance.connect_get_namespaced_pod_exec,
- self.name,
- self.namespace,
- command=pod_command,
- stdout=True, stderr=True,
- stdin=False, tty=False,
- _preload_content=False, **self.container_arg)
+ pod_command = ["test", "-d", self.remote_path]
+ response = stream(
+ self.api_instance.connect_get_namespaced_pod_exec,
+ self.name,
+ self.namespace,
+ command=pod_command,
+ stdout=True,
+ stderr=True,
+ stdin=False,
+ tty=False,
+ _preload_content=False,
+ **self.container_arg
+ )
while response.is_open():
response.update(timeout=1)
err = response.read_channel(ERROR_CHANNEL)
err = yaml.safe_load(err)
response.close()
- if err['status'] == 'Success':
+ if err["status"] == "Success":
return True
return False
@@ -355,33 +424,52 @@ def run(self):
src_file = self.named_temp_file.name
else:
if not os.path.exists(self.local_path):
- self.module.fail_json(msg="{0} does not exist in local filesystem".format(self.local_path))
+ self.module.fail_json(
+ msg="{0} does not exist in local filesystem".format(
+ self.local_path
+ )
+ )
if not os.access(self.local_path, os.R_OK):
- self.module.fail_json(msg="{0} not readable".format(self.local_path))
+ self.module.fail_json(
+ msg="{0} not readable".format(self.local_path)
+ )
if self.is_remote_path_dir():
if self.content:
- self.module.fail_json(msg="When content is specified, remote path should not be an existing directory")
+ self.module.fail_json(
+ msg="When content is specified, remote path should not be an existing directory"
+ )
else:
dest_file = os.path.join(dest_file, os.path.basename(src_file))
if self.no_preserve:
- tar_command = ['tar', '--no-same-permissions', '--no-same-owner', '-xmf', '-']
+ tar_command = [
+ "tar",
+ "--no-same-permissions",
+ "--no-same-owner",
+ "-xmf",
+ "-",
+ ]
else:
- tar_command = ['tar', '-xmf', '-']
+ tar_command = ["tar", "-xmf", "-"]
if dest_file.startswith("/"):
- tar_command.extend(['-C', '/'])
-
- response = stream(self.api_instance.connect_get_namespaced_pod_exec,
- self.name,
- self.namespace,
- command=tar_command,
- stderr=True, stdin=True,
- stdout=True, tty=False,
- _preload_content=False, **self.container_arg)
+ tar_command.extend(["-C", "/"])
+
+ response = stream(
+ self.api_instance.connect_get_namespaced_pod_exec,
+ self.name,
+ self.namespace,
+ command=tar_command,
+ stderr=True,
+ stdin=True,
+ stdout=True,
+ tty=False,
+ _preload_content=False,
+ **self.container_arg
+ )
with TemporaryFile() as tar_buffer:
- with tarfile.open(fileobj=tar_buffer, mode='w') as tar:
+ with tarfile.open(fileobj=tar_buffer, mode="w") as tar:
tar.add(src_file, dest_file)
tar_buffer.seek(0)
commands = []
@@ -407,36 +495,59 @@ def run(self):
response.close()
if stderr:
self.close_temp_file()
- self.module.fail_json(command=tar_command, msg="Failed to copy local file/directory into Pod due to: {0}".format(''.join(stderr)))
+ self.module.fail_json(
+ command=tar_command,
+ msg="Failed to copy local file/directory into Pod due to: {0}".format(
+ "".join(stderr)
+ ),
+ )
self.close_temp_file()
if self.content:
- self.module.exit_json(changed=True, result="Content successfully copied into {0} on remote Pod".format(self.remote_path))
- self.module.exit_json(changed=True, result="{0} successfully copied into remote Pod into {1}".format(self.local_path, self.remote_path))
+ self.module.exit_json(
+ changed=True,
+ result="Content successfully copied into {0} on remote Pod".format(
+ self.remote_path
+ ),
+ )
+ self.module.exit_json(
+ changed=True,
+ result="{0} successfully copied into remote Pod into {1}".format(
+ self.local_path, self.remote_path
+ ),
+ )
except Exception as e:
- self.module.fail_json(msg="Failed to copy local file/directory into Pod due to: {0}".format(to_native(e)))
+ self.module.fail_json(
+ msg="Failed to copy local file/directory into Pod due to: {0}".format(
+ to_native(e)
+ )
+ )
def check_pod(k8s_ansible_mixin, module):
resource = k8s_ansible_mixin.find_resource("Pod", None, True)
- namespace = module.params.get('namespace')
- name = module.params.get('pod')
- container = module.params.get('container')
+ namespace = module.params.get("namespace")
+ name = module.params.get("pod")
+ container = module.params.get("container")
def _fail(exc):
arg = {}
- if hasattr(exc, 'body'):
- msg = "Namespace={0} Kind=Pod Name={1}: Failed requested object: {2}".format(namespace, name, exc.body)
+ if hasattr(exc, "body"):
+ msg = "Namespace={0} Kind=Pod Name={1}: Failed requested object: {2}".format(
+ namespace, name, exc.body
+ )
else:
msg = to_native(exc)
- for attr in ['status', 'reason']:
+ for attr in ["status", "reason"]:
if hasattr(exc, attr):
arg[attr] = getattr(exc, attr)
module.fail_json(msg=msg, **arg)
try:
result = resource.get(name=name, namespace=namespace)
- containers = [c['name'] for c in result.to_dict()['status']['containerStatuses']]
+ containers = [
+ c["name"] for c in result.to_dict()["status"]["containerStatuses"]
+ ]
if container and container not in containers:
module.fail_json(msg="Pod has no container {0}".format(container))
return containers
@@ -457,12 +568,14 @@ def execute_module(module):
k8s_ansible_mixin.client = get_api_client(module=module)
containers = check_pod(k8s_ansible_mixin, module)
- if len(containers) > 1 and module.params.get('container') is None:
- module.fail_json(msg="Pod contains more than 1 container, option 'container' should be set")
+ if len(containers) > 1 and module.params.get("container") is None:
+ module.fail_json(
+ msg="Pod contains more than 1 container, option 'container' should be set"
+ )
try:
- load_class = {'to_pod': K8SCopyToPod, 'from_pod': K8SCopyFromPod}
- state = module.params.get('state')
+ load_class = {"to_pod": K8SCopyToPod, "from_pod": K8SCopyFromPod}
+ state = module.params.get("state")
k8s_copy = load_class.get(state)(module, k8s_ansible_mixin.client)
k8s_copy.run()
except Exception as e:
@@ -471,23 +584,29 @@ def execute_module(module):
def main():
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
- argument_spec['namespace'] = {'type': 'str', 'required': True}
- argument_spec['pod'] = {'type': 'str', 'required': True}
- argument_spec['container'] = {}
- argument_spec['remote_path'] = {'type': 'path', 'required': True}
- argument_spec['local_path'] = {'type': 'path'}
- argument_spec['content'] = {'type': 'str'}
- argument_spec['state'] = {'type': 'str', 'default': 'to_pod', 'choices': ['to_pod', 'from_pod']}
- argument_spec['no_preserve'] = {'type': 'bool', 'default': False}
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[('local_path', 'content')],
- required_if=[('state', 'from_pod', ['local_path'])],
- required_one_of=[['local_path', 'content']],
- supports_check_mode=True)
+ argument_spec["namespace"] = {"type": "str", "required": True}
+ argument_spec["pod"] = {"type": "str", "required": True}
+ argument_spec["container"] = {}
+ argument_spec["remote_path"] = {"type": "path", "required": True}
+ argument_spec["local_path"] = {"type": "path"}
+ argument_spec["content"] = {"type": "str"}
+ argument_spec["state"] = {
+ "type": "str",
+ "default": "to_pod",
+ "choices": ["to_pod", "from_pod"],
+ }
+ argument_spec["no_preserve"] = {"type": "bool", "default": False}
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[("local_path", "content")],
+ required_if=[("state", "from_pod", ["local_path"])],
+ required_one_of=[["local_path", "content"]],
+ supports_check_mode=True,
+ )
execute_module(module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_drain.py b/plugins/modules/k8s_drain.py
index dd4f96413b..28ff24df00 100644
--- a/plugins/modules/k8s_drain.py
+++ b/plugins/modules/k8s_drain.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_drain
@@ -83,9 +83,9 @@
requirements:
- python >= 3.6
- kubernetes >= 12.0.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
kubernetes.core.k8s_drain:
state: drain
@@ -109,20 +109,24 @@
state: cordon
name: foo
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- The node status and the number of pods deleted.
returned: success
type: str
-'''
+"""
import copy
from datetime import datetime
import time
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+)
from ansible.module_utils._text import to_native
try:
@@ -144,7 +148,7 @@ def filter_pods(pods, force, ignore_daemonset):
continue
# Any finished pod can be deleted
- if pod.status.phase in ('Succeeded', 'Failed'):
+ if pod.status.phase in ("Succeeded", "Failed"):
to_delete.append((pod.metadata.namespace, pod.metadata.name))
continue
@@ -167,19 +171,29 @@ def filter_pods(pods, force, ignore_daemonset):
warnings, errors = [], []
if unmanaged:
- pod_names = ','.join([pod[0] + "/" + pod[1] for pod in unmanaged])
+ pod_names = ",".join([pod[0] + "/" + pod[1] for pod in unmanaged])
if not force:
- errors.append("cannot delete Pods not managed by ReplicationController, ReplicaSet, Job,"
- " DaemonSet or StatefulSet (use option force set to yes): {0}.".format(pod_names))
+ errors.append(
+ "cannot delete Pods not managed by ReplicationController, ReplicaSet, Job,"
+ " DaemonSet or StatefulSet (use option force set to yes): {0}.".format(
+ pod_names
+ )
+ )
else:
# Pod not managed will be deleted as 'force' is true
- warnings.append("Deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: {0}.".format(pod_names))
+ warnings.append(
+ "Deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: {0}.".format(
+ pod_names
+ )
+ )
to_delete += unmanaged
# mirror pods warning
if mirror:
- pod_names = ','.join([pod[0] + "/" + pod[1] for pod in mirror])
- warnings.append("cannot delete mirror Pods using API server: {0}.".format(pod_names))
+ pod_names = ",".join([pod[0] + "/" + pod[1] for pod in mirror])
+ warnings.append(
+ "cannot delete mirror Pods using API server: {0}.".format(pod_names)
+ )
# local storage
if localStorage:
@@ -187,19 +201,24 @@ def filter_pods(pods, force, ignore_daemonset):
# DaemonSet managed Pods
if daemonSet:
- pod_names = ','.join([pod[0] + "/" + pod[1] for pod in daemonSet])
+ pod_names = ",".join([pod[0] + "/" + pod[1] for pod in daemonSet])
if not ignore_daemonset:
- errors.append("cannot delete DaemonSet-managed Pods (use option ignore_daemonset set to yes): {0}.".format(pod_names))
+ errors.append(
+ "cannot delete DaemonSet-managed Pods (use option ignore_daemonset set to yes): {0}.".format(
+ pod_names
+ )
+ )
else:
warnings.append("Ignoring DaemonSet-managed Pods: {0}.".format(pod_names))
return to_delete, warnings, errors
class K8sDrainAnsible(object):
-
def __init__(self, module):
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
self._module = module
self._k8s_ansible_mixin = K8sAnsibleMixin(module)
@@ -215,17 +234,25 @@ def __init__(self, module):
self._k8s_ansible_mixin.warn = self._module.warn
self._k8s_ansible_mixin.warnings = []
- self._api_instance = core_v1_api.CoreV1Api(self._k8s_ansible_mixin.client.client)
+ self._api_instance = core_v1_api.CoreV1Api(
+ self._k8s_ansible_mixin.client.client
+ )
self._k8s_ansible_mixin.check_library_version()
# delete options
- self._drain_options = module.params.get('delete_options', {})
+ self._drain_options = module.params.get("delete_options", {})
self._delete_options = None
- if self._drain_options.get('terminate_grace_period'):
+ if self._drain_options.get("terminate_grace_period"):
self._delete_options = {}
- self._delete_options.update({'apiVersion': 'v1'})
- self._delete_options.update({'kind': 'DeleteOptions'})
- self._delete_options.update({'gracePeriodSeconds': self._drain_options.get('terminate_grace_period')})
+ self._delete_options.update({"apiVersion": "v1"})
+ self._delete_options.update({"kind": "DeleteOptions"})
+ self._delete_options.update(
+ {
+ "gracePeriodSeconds": self._drain_options.get(
+ "terminate_grace_period"
+ )
+ }
+ )
self._changed = False
@@ -241,13 +268,17 @@ def _elapsed_time():
if not pod:
pod = pods.pop()
try:
- response = self._api_instance.read_namespaced_pod(namespace=pod[0], name=pod[1])
+ response = self._api_instance.read_namespaced_pod(
+ namespace=pod[0], name=pod[1]
+ )
if not response:
pod = None
time.sleep(wait_sleep)
except ApiException as exc:
if exc.reason != "Not Found":
- self._module.fail_json(msg="Exception raised: {0}".format(exc.reason))
+ self._module.fail_json(
+ msg="Exception raised: {0}".format(exc.reason)
+ )
pod = None
except Exception as e:
self._module.fail_json(msg="Exception raised: {0}".format(to_native(e)))
@@ -257,37 +288,50 @@ def _elapsed_time():
def evict_pods(self, pods):
for namespace, name in pods:
- definition = {
- 'metadata': {
- 'name': name,
- 'namespace': namespace
- }
- }
+ definition = {"metadata": {"name": name, "namespace": namespace}}
if self._delete_options:
- definition.update({'delete_options': self._delete_options})
+ definition.update({"delete_options": self._delete_options})
try:
- if self._drain_options.get('disable_eviction'):
+ if self._drain_options.get("disable_eviction"):
body = V1DeleteOptions(**definition)
- self._api_instance.delete_namespaced_pod(name=name, namespace=namespace, body=body)
+ self._api_instance.delete_namespaced_pod(
+ name=name, namespace=namespace, body=body
+ )
else:
body = V1beta1Eviction(**definition)
- self._api_instance.create_namespaced_pod_eviction(name=name, namespace=namespace, body=body)
+ self._api_instance.create_namespaced_pod_eviction(
+ name=name, namespace=namespace, body=body
+ )
self._changed = True
except ApiException as exc:
if exc.reason != "Not Found":
- self._module.fail_json(msg="Failed to delete pod {0}/{1} due to: {2}".format(namespace, name, exc.reason))
+ self._module.fail_json(
+ msg="Failed to delete pod {0}/{1} due to: {2}".format(
+ namespace, name, exc.reason
+ )
+ )
except Exception as exc:
- self._module.fail_json(msg="Failed to delete pod {0}/{1} due to: {2}".format(namespace, name, to_native(exc)))
+ self._module.fail_json(
+ msg="Failed to delete pod {0}/{1} due to: {2}".format(
+ namespace, name, to_native(exc)
+ )
+ )
def delete_or_evict_pods(self, node_unschedulable):
# Mark node as unschedulable
result = []
if not node_unschedulable:
self.patch_node(unschedulable=True)
- result.append("node {0} marked unschedulable.".format(self._module.params.get('name')))
+ result.append(
+ "node {0} marked unschedulable.".format(self._module.params.get("name"))
+ )
self._changed = True
else:
- result.append("node {0} already marked unschedulable.".format(self._module.params.get('name')))
+ result.append(
+ "node {0} already marked unschedulable.".format(
+ self._module.params.get("name")
+ )
+ )
def _revert_node_patch():
if self._changed:
@@ -295,77 +339,109 @@ def _revert_node_patch():
self.patch_node(unschedulable=False)
try:
- field_selector = "spec.nodeName={name}".format(name=self._module.params.get('name'))
- pod_list = self._api_instance.list_pod_for_all_namespaces(field_selector=field_selector)
+ field_selector = "spec.nodeName={name}".format(
+ name=self._module.params.get("name")
+ )
+ pod_list = self._api_instance.list_pod_for_all_namespaces(
+ field_selector=field_selector
+ )
# Filter pods
- force = self._drain_options.get('force', False)
- ignore_daemonset = self._drain_options.get('ignore_daemonsets', False)
- pods, warnings, errors = filter_pods(pod_list.items, force, ignore_daemonset)
+ force = self._drain_options.get("force", False)
+ ignore_daemonset = self._drain_options.get("ignore_daemonsets", False)
+ pods, warnings, errors = filter_pods(
+ pod_list.items, force, ignore_daemonset
+ )
if errors:
_revert_node_patch()
- self._module.fail_json(msg="Pod deletion errors: {0}".format(" ".join(errors)))
+ self._module.fail_json(
+ msg="Pod deletion errors: {0}".format(" ".join(errors))
+ )
except ApiException as exc:
if exc.reason != "Not Found":
_revert_node_patch()
- self._module.fail_json(msg="Failed to list pod from node {name} due to: {reason}".format(
- name=self._module.params.get('name'), reason=exc.reason), status=exc.status)
+ self._module.fail_json(
+ msg="Failed to list pod from node {name} due to: {reason}".format(
+ name=self._module.params.get("name"), reason=exc.reason
+ ),
+ status=exc.status,
+ )
pods = []
except Exception as exc:
_revert_node_patch()
- self._module.fail_json(msg="Failed to list pod from node {name} due to: {error}".format(
- name=self._module.params.get('name'), error=to_native(exc)))
+ self._module.fail_json(
+ msg="Failed to list pod from node {name} due to: {error}".format(
+ name=self._module.params.get("name"), error=to_native(exc)
+ )
+ )
# Delete Pods
if pods:
self.evict_pods(pods)
number_pod = len(pods)
- if self._drain_options.get('wait_timeout') is not None:
- warn = self.wait_for_pod_deletion(pods,
- self._drain_options.get('wait_timeout'),
- self._drain_options.get('wait_sleep'))
+ if self._drain_options.get("wait_timeout") is not None:
+ warn = self.wait_for_pod_deletion(
+ pods,
+ self._drain_options.get("wait_timeout"),
+ self._drain_options.get("wait_sleep"),
+ )
if warn:
warnings.append(warn)
result.append("{0} Pod(s) deleted from node.".format(number_pod))
if warnings:
- return dict(result=' '.join(result), warnings=warnings)
- return dict(result=' '.join(result))
+ return dict(result=" ".join(result), warnings=warnings)
+ return dict(result=" ".join(result))
def patch_node(self, unschedulable):
- body = {
- 'spec': {'unschedulable': unschedulable}
- }
+ body = {"spec": {"unschedulable": unschedulable}}
try:
- self._api_instance.patch_node(name=self._module.params.get('name'), body=body)
+ self._api_instance.patch_node(
+ name=self._module.params.get("name"), body=body
+ )
except Exception as exc:
- self._module.fail_json(msg="Failed to patch node due to: {0}".format(to_native(exc)))
+ self._module.fail_json(
+ msg="Failed to patch node due to: {0}".format(to_native(exc))
+ )
def execute_module(self):
- state = self._module.params.get('state')
- name = self._module.params.get('name')
+ state = self._module.params.get("state")
+ name = self._module.params.get("name")
try:
node = self._api_instance.read_node(name=name)
except ApiException as exc:
if exc.reason == "Not Found":
self._module.fail_json(msg="Node {0} not found.".format(name))
- self._module.fail_json(msg="Failed to retrieve node '{0}' due to: {1}".format(name, exc.reason), status=exc.status)
+ self._module.fail_json(
+ msg="Failed to retrieve node '{0}' due to: {1}".format(
+ name, exc.reason
+ ),
+ status=exc.status,
+ )
except Exception as exc:
- self._module.fail_json(msg="Failed to retrieve node '{0}' due to: {1}".format(name, to_native(exc)))
+ self._module.fail_json(
+ msg="Failed to retrieve node '{0}' due to: {1}".format(
+ name, to_native(exc)
+ )
+ )
result = {}
if state == "cordon":
if node.spec.unschedulable:
- self._module.exit_json(result="node {0} already marked unschedulable.".format(name))
+ self._module.exit_json(
+ result="node {0} already marked unschedulable.".format(name)
+ )
self.patch_node(unschedulable=True)
- result['result'] = "node {0} marked unschedulable.".format(name)
+ result["result"] = "node {0} marked unschedulable.".format(name)
self._changed = True
elif state == "uncordon":
if not node.spec.unschedulable:
- self._module.exit_json(result="node {0} already marked schedulable.".format(name))
+ self._module.exit_json(
+ result="node {0} already marked schedulable.".format(name)
+ )
self.patch_node(unschedulable=False)
- result['result'] = "node {0} marked schedulable.".format(name)
+ result["result"] = "node {0} marked schedulable.".format(name)
self._changed = True
else:
@@ -384,16 +460,16 @@ def argspec():
state=dict(default="drain", choices=["cordon", "drain", "uncordon"]),
name=dict(required=True),
delete_options=dict(
- type='dict',
+ type="dict",
default={},
options=dict(
- terminate_grace_period=dict(type='int'),
- force=dict(type='bool', default=False),
- ignore_daemonsets=dict(type='bool', default=False),
- disable_eviction=dict(type='bool', default=False),
- wait_timeout=dict(type='int'),
- wait_sleep=dict(type='int', default=5),
- )
+ terminate_grace_period=dict(type="int"),
+ force=dict(type="bool", default=False),
+ ignore_daemonsets=dict(type="bool", default=False),
+ disable_eviction=dict(type="bool", default=False),
+ wait_timeout=dict(type="int"),
+ wait_sleep=dict(type="int", default=5),
+ ),
),
)
)
@@ -407,5 +483,5 @@ def main():
k8s_drain.execute_module()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_exec.py b/plugins/modules/k8s_exec.py
index fcd462eb81..fb36a39b32 100644
--- a/plugins/modules/k8s_exec.py
+++ b/plugins/modules/k8s_exec.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_exec
@@ -63,9 +63,9 @@
- The command to execute
type: str
required: yes
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Execute a command
kubernetes.core.k8s_exec:
namespace: myproject
@@ -84,9 +84,9 @@
debug:
msg: "cmd failed"
when: command_status.rc != 0
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- The command object
@@ -112,7 +112,7 @@
return_code:
description: The command status code. This attribute is deprecated and will be removed in a future release. Please use rc instead.
type: int
-'''
+"""
import copy
import shlex
@@ -123,10 +123,12 @@
# ImportError are managed by the common module already.
pass
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible.module_utils._text import to_native
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- AUTH_ARG_SPEC
+ AUTH_ARG_SPEC,
)
try:
@@ -139,10 +141,10 @@
def argspec():
spec = copy.deepcopy(AUTH_ARG_SPEC)
- spec['namespace'] = dict(type='str', required=True)
- spec['pod'] = dict(type='str', required=True)
- spec['container'] = dict(type='str')
- spec['command'] = dict(type='str', required=True)
+ spec["namespace"] = dict(type="str", required=True)
+ spec["pod"] = dict(type="str", required=True)
+ spec["container"] = dict(type="str")
+ spec["command"] = dict(type="str", required=True)
return spec
@@ -153,8 +155,8 @@ def execute_module(module, k8s_ansible_mixin):
# hack because passing the container as None breaks things
optional_kwargs = {}
- if module.params.get('container'):
- optional_kwargs['container'] = module.params['container']
+ if module.params.get("container"):
+ optional_kwargs["container"] = module.params["container"]
try:
resp = stream(
api.connect_get_namespaced_pod_exec,
@@ -165,10 +167,14 @@ def execute_module(module, k8s_ansible_mixin):
stderr=True,
stdin=False,
tty=False,
- _preload_content=False, **optional_kwargs)
+ _preload_content=False,
+ **optional_kwargs
+ )
except Exception as e:
- module.fail_json(msg="Failed to execute on pod %s"
- " due to : %s" % (module.params.get('pod'), to_native(e)))
+ module.fail_json(
+ msg="Failed to execute on pod %s"
+ " due to : %s" % (module.params.get("pod"), to_native(e))
+ )
stdout, stderr, rc = [], [], 0
while resp.is_open():
resp.update(timeout=1)
@@ -178,34 +184,37 @@ def execute_module(module, k8s_ansible_mixin):
stderr.append(resp.read_stderr())
err = resp.read_channel(3)
err = yaml.safe_load(err)
- if err['status'] == 'Success':
+ if err["status"] == "Success":
rc = 0
else:
- rc = int(err['details']['causes'][0]['message'])
+ rc = int(err["details"]["causes"][0]["message"])
- module.deprecate("The 'return_code' return key is deprecated. Please use 'rc' instead.", version="4.0.0", collection_name="kubernetes.core")
+ module.deprecate(
+ "The 'return_code' return key is deprecated. Please use 'rc' instead.",
+ version="4.0.0",
+ collection_name="kubernetes.core",
+ )
module.exit_json(
# Some command might change environment, but ultimately failing at end
changed=True,
stdout="".join(stdout),
stderr="".join(stderr),
rc=rc,
- return_code=rc
+ return_code=rc,
)
def main():
- module = AnsibleModule(
- argument_spec=argspec(),
- supports_check_mode=True,
- )
+ module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True,)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_info.py b/plugins/modules/k8s_info.py
index 50059e4124..6983314d98 100644
--- a/plugins/modules/k8s_info.py
+++ b/plugins/modules/k8s_info.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_info
short_description: Describe Kubernetes (K8s) objects
@@ -52,9 +52,9 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Get an existing Service object
kubernetes.core.k8s_info:
api_version: v1
@@ -109,9 +109,9 @@
namespace: default
wait_sleep: 10
wait_timeout: 360
-'''
+"""
-RETURN = r'''
+RETURN = r"""
api_found:
description:
- Whether the specified api_version and kind were successfully mapped to an existing API on the targeted cluster.
@@ -144,12 +144,17 @@
description: Current status details for the object.
returned: success
type: dict
-'''
+"""
import copy
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (AUTH_ARG_SPEC, WAIT_ARG_SPEC)
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+ WAIT_ARG_SPEC,
+)
def execute_module(module, k8s_ansible_mixin):
@@ -174,11 +179,11 @@ def argspec():
args.update(
dict(
kind=dict(required=True),
- api_version=dict(default='v1', aliases=['api', 'version']),
+ api_version=dict(default="v1", aliases=["api", "version"]),
name=dict(),
namespace=dict(),
- label_selectors=dict(type='list', elements='str', default=[]),
- field_selectors=dict(type='list', elements='str', default=[]),
+ label_selectors=dict(type="list", elements="str", default=[]),
+ field_selectors=dict(type="list", elements="str", default=[]),
)
)
return args
@@ -187,12 +192,14 @@ def argspec():
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_json_patch.py b/plugins/modules/k8s_json_patch.py
index f3adf8a39e..bc4442e316 100644
--- a/plugins/modules/k8s_json_patch.py
+++ b/plugins/modules/k8s_json_patch.py
@@ -8,7 +8,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_json_patch
short_description: Apply JSON patch operations to existing objects
@@ -66,9 +66,9 @@
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
- "jsonpatch"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Apply multiple patch operations to an existing Pod
kubernetes.core.k8s_json_patch:
kind: Pod
@@ -81,9 +81,9 @@
- op: replace
patch: /spec/containers/0/image
value: nginx
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description: The modified object.
returned: success
@@ -122,17 +122,24 @@
"msg": "Failed to import the required Python library (jsonpatch) ...",
"exception": "Traceback (most recent call last): ..."
}
-'''
+"""
import copy
import traceback
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_native
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC, WAIT_ARG_SPEC
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+ WAIT_ARG_SPEC,
+)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- get_api_client, K8sAnsibleMixin)
+ get_api_client,
+ K8sAnsibleMixin,
+)
try:
from kubernetes.dynamic.exceptions import DynamicApiError
@@ -143,6 +150,7 @@
JSON_PATCH_IMPORT_ERR = None
try:
import jsonpatch
+
HAS_JSON_PATCH = True
except ImportError:
HAS_JSON_PATCH = False
@@ -150,33 +158,18 @@
JSON_PATCH_ARGS = {
- 'api_version': {
- 'default': 'v1',
- 'aliases': ['api', 'version'],
- },
- "kind": {
- "type": "str",
- "required": True,
- },
- "namespace": {
- "type": "str",
- },
- "name": {
- "type": "str",
- "required": True,
- },
- "patch": {
- "type": "list",
- "required": True,
- "elements": "dict",
- },
+ "api_version": {"default": "v1", "aliases": ["api", "version"]},
+ "kind": {"type": "str", "required": True},
+ "namespace": {"type": "str"},
+ "name": {"type": "str", "required": True},
+ "patch": {"type": "list", "required": True, "elements": "dict"},
}
def json_patch(existing, patch):
if not HAS_JSON_PATCH:
error = {
- "msg": missing_required_lib('jsonpatch'),
+ "msg": missing_required_lib("jsonpatch"),
"exception": JSON_PATCH_IMPORT_ERR,
}
return None, error
@@ -185,16 +178,10 @@ def json_patch(existing, patch):
patched = patch.apply(existing)
return patched, None
except jsonpatch.InvalidJsonPatch as e:
- error = {
- "msg": "Invalid JSON patch",
- "exception": e
- }
+ error = {"msg": "Invalid JSON patch", "exception": e}
return None, error
except jsonpatch.JsonPatchConflict as e:
- error = {
- "msg": "Patch could not be applied due to a conflict",
- "exception": e
- }
+ error = {"msg": "Patch could not be applied due to a conflict", "exception": e}
return None, error
@@ -209,15 +196,14 @@ def execute_module(k8s_module, module):
wait_sleep = module.params.get("wait_sleep")
wait_timeout = module.params.get("wait_timeout")
wait_condition = None
- if module.params.get("wait_condition") and module.params.get("wait_condition").get("type"):
- wait_condition = module.params['wait_condition']
+ if module.params.get("wait_condition") and module.params.get("wait_condition").get(
+ "type"
+ ):
+ wait_condition = module.params["wait_condition"]
# definition is needed for wait
definition = {
"kind": kind,
- "metadata": {
- "name": name,
- "namespace": namespace,
- }
+ "metadata": {"name": name, "namespace": namespace},
}
def build_error_msg(kind, name, msg):
@@ -228,11 +214,18 @@ def build_error_msg(kind, name, msg):
try:
existing = resource.get(name=name, namespace=namespace)
except DynamicApiError as exc:
- msg = 'Failed to retrieve requested object: {0}'.format(exc.body)
- module.fail_json(msg=build_error_msg(kind, name, msg), error=exc.status, status=exc.status, reason=exc.reason)
+ msg = "Failed to retrieve requested object: {0}".format(exc.body)
+ module.fail_json(
+ msg=build_error_msg(kind, name, msg),
+ error=exc.status,
+ status=exc.status,
+ reason=exc.reason,
+ )
except ValueError as exc:
- msg = 'Failed to retrieve requested object: {0}'.format(to_native(exc))
- module.fail_json(msg=build_error_msg(kind, name, msg), error='', status='', reason='')
+ msg = "Failed to retrieve requested object: {0}".format(to_native(exc))
+ module.fail_json(
+ msg=build_error_msg(kind, name, msg), error="", status="", reason=""
+ )
if module.check_mode and not k8s_module.supports_dry_run:
obj, error = json_patch(existing.to_dict(), patch)
@@ -243,18 +236,28 @@ def build_error_msg(kind, name, msg):
if module.check_mode:
params["dry_run"] = "All"
try:
- obj = resource.patch(patch, name=name, namespace=namespace, content_type="application/json-patch+json", **params).to_dict()
+ obj = resource.patch(
+ patch,
+ name=name,
+ namespace=namespace,
+ content_type="application/json-patch+json",
+ **params
+ ).to_dict()
except DynamicApiError as exc:
msg = "Failed to patch existing object: {0}".format(exc.body)
- module.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ module.fail_json(
+ msg=msg, error=exc.status, status=exc.status, reason=exc.reason
+ )
except Exception as exc:
msg = "Failed to patch existing object: {0}".format(exc)
- module.fail_json(msg=msg, error=to_native(exc), status='', reason='')
+ module.fail_json(msg=msg, error=to_native(exc), status="", reason="")
success = True
result = {"result": obj}
if wait and not module.check_mode:
- success, result['result'], result['duration'] = k8s_module.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ success, result["result"], result["duration"] = k8s_module.wait(
+ resource, definition, wait_sleep, wait_timeout, condition=wait_condition
+ )
match, diffs = k8s_module.diff_objects(existing.to_dict(), obj)
result["changed"] = not match
if module._diff:
diff --git a/plugins/modules/k8s_log.py b/plugins/modules/k8s_log.py
index 0c07ce7077..0ba7791278 100644
--- a/plugins/modules/k8s_log.py
+++ b/plugins/modules/k8s_log.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_log
short_description: Fetch logs from Kubernetes resources
@@ -65,9 +65,9 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Get a log from a Pod
kubernetes.core.k8s_log:
name: example-1
@@ -100,9 +100,9 @@
namespace: testing
name: example
register: log
-'''
+"""
-RETURN = r'''
+RETURN = r"""
log:
type: str
description:
@@ -113,15 +113,20 @@
description:
- The log of the object, split on newlines
returned: success
-'''
+"""
import copy
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible.module_utils.six import PY2
-from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (AUTH_ARG_SPEC, NAME_ARG_SPEC)
+from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
+ AUTH_ARG_SPEC,
+ NAME_ARG_SPEC,
+)
def argspec():
@@ -129,55 +134,62 @@ def argspec():
args.update(NAME_ARG_SPEC)
args.update(
dict(
- kind=dict(type='str', default='Pod'),
+ kind=dict(type="str", default="Pod"),
container=dict(),
since_seconds=dict(),
- label_selectors=dict(type='list', elements='str', default=[]),
+ label_selectors=dict(type="list", elements="str", default=[]),
)
)
return args
def execute_module(module, k8s_ansible_mixin):
- name = module.params.get('name')
- namespace = module.params.get('namespace')
- label_selector = ','.join(module.params.get('label_selectors', {}))
+ name = module.params.get("name")
+ namespace = module.params.get("namespace")
+ label_selector = ",".join(module.params.get("label_selectors", {}))
if name and label_selector:
- module.fail(msg='Only one of name or label_selectors can be provided')
+ module.fail(msg="Only one of name or label_selectors can be provided")
- resource = k8s_ansible_mixin.find_resource(module.params['kind'], module.params['api_version'], fail=True)
- v1_pods = k8s_ansible_mixin.find_resource('Pod', 'v1', fail=True)
+ resource = k8s_ansible_mixin.find_resource(
+ module.params["kind"], module.params["api_version"], fail=True
+ )
+ v1_pods = k8s_ansible_mixin.find_resource("Pod", "v1", fail=True)
- if 'log' not in resource.subresources:
+ if "log" not in resource.subresources:
if not name:
- module.fail(msg='name must be provided for resources that do not support the log subresource')
+ module.fail(
+ msg="name must be provided for resources that do not support the log subresource"
+ )
instance = resource.get(name=name, namespace=namespace)
- label_selector = ','.join(extract_selectors(module, instance))
+ label_selector = ",".join(extract_selectors(module, instance))
resource = v1_pods
if label_selector:
instances = v1_pods.get(namespace=namespace, label_selector=label_selector)
if not instances.items:
- module.fail(msg='No pods in namespace {0} matched selector {1}'.format(namespace, label_selector))
+ module.fail(
+ msg="No pods in namespace {0} matched selector {1}".format(
+ namespace, label_selector
+ )
+ )
# This matches the behavior of kubectl when logging pods via a selector
name = instances.items[0].metadata.name
resource = v1_pods
kwargs = {}
- if module.params.get('container'):
- kwargs['query_params'] = dict(container=module.params['container'])
+ if module.params.get("container"):
+ kwargs["query_params"] = dict(container=module.params["container"])
- if module.params.get('since_seconds'):
- kwargs.setdefault('query_params', {}).update({'sinceSeconds': module.params['since_seconds']})
+ if module.params.get("since_seconds"):
+ kwargs.setdefault("query_params", {}).update(
+ {"sinceSeconds": module.params["since_seconds"]}
+ )
- log = serialize_log(resource.log.get(
- name=name,
- namespace=namespace,
- serialize=False,
- **kwargs
- ))
+ log = serialize_log(
+ resource.log.get(name=name, namespace=namespace, serialize=False, **kwargs)
+ )
- module.exit_json(changed=False, log=log, log_lines=log.split('\n'))
+ module.exit_json(changed=False, log=log, log_lines=log.split("\n"))
def extract_selectors(module, instance):
@@ -185,35 +197,46 @@ def extract_selectors(module, instance):
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
selectors = []
if not instance.spec.selector:
- module.fail(msg='{0} {1} does not support the log subresource directly, and no Pod selector was found on the object'.format(
- '/'.join(instance.group, instance.apiVersion), instance.kind))
+ module.fail(
+ msg="{0} {1} does not support the log subresource directly, and no Pod selector was found on the object".format(
+ "/".join(instance.group, instance.apiVersion), instance.kind
+ )
+ )
- if not (instance.spec.selector.matchLabels or instance.spec.selector.matchExpressions):
+ if not (
+ instance.spec.selector.matchLabels or instance.spec.selector.matchExpressions
+ ):
# A few resources (like DeploymentConfigs) just use a simple key:value style instead of supporting expressions
for k, v in dict(instance.spec.selector).items():
- selectors.append('{0}={1}'.format(k, v))
+ selectors.append("{0}={1}".format(k, v))
return selectors
if instance.spec.selector.matchLabels:
for k, v in dict(instance.spec.selector.matchLabels).items():
- selectors.append('{0}={1}'.format(k, v))
+ selectors.append("{0}={1}".format(k, v))
if instance.spec.selector.matchExpressions:
for expression in instance.spec.selector.matchExpressions:
operator = expression.operator
- if operator == 'Exists':
+ if operator == "Exists":
selectors.append(expression.key)
- elif operator == 'DoesNotExist':
- selectors.append('!{0}'.format(expression.key))
- elif operator in ['In', 'NotIn']:
- selectors.append('{key} {operator} {values}'.format(
- key=expression.key,
- operator=operator.lower(),
- values='({0})'.format(', '.join(expression.values))
- ))
+ elif operator == "DoesNotExist":
+ selectors.append("!{0}".format(expression.key))
+ elif operator in ["In", "NotIn"]:
+ selectors.append(
+ "{key} {operator} {values}".format(
+ key=expression.key,
+ operator=operator.lower(),
+ values="({0})".format(", ".join(expression.values)),
+ )
+ )
else:
- module.fail(msg='The k8s_log module does not support the {0} matchExpression operator'.format(operator.lower()))
+ module.fail(
+ msg="The k8s_log module does not support the {0} matchExpression operator".format(
+ operator.lower()
+ )
+ )
return selectors
@@ -221,18 +244,20 @@ def extract_selectors(module, instance):
def serialize_log(response):
if PY2:
return response.data
- return response.data.decode('utf8')
+ return response.data.decode("utf8")
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_rollback.py b/plugins/modules/k8s_rollback.py
index f12d3da432..236ec99790 100644
--- a/plugins/modules/k8s_rollback.py
+++ b/plugins/modules/k8s_rollback.py
@@ -5,10 +5,11 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_rollback
short_description: Rollback Kubernetes (K8S) Deployments and DaemonSets
version_added: "1.0.0"
@@ -34,18 +35,18 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Rollback a failed deployment
kubernetes.core.k8s_rollback:
api_version: apps/v1
kind: Deployment
name: web
namespace: testing
-'''
+"""
-RETURN = r'''
+RETURN = r"""
rollback_info:
description:
- The object that was rolled back.
@@ -74,25 +75,29 @@
description: Current status details for the object.
returned: success
type: dict
-'''
+"""
import copy
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
- AUTH_ARG_SPEC, NAME_ARG_SPEC)
+ AUTH_ARG_SPEC,
+ NAME_ARG_SPEC,
+)
def get_managed_resource(module):
managed_resource = {}
- kind = module.params['kind']
+ kind = module.params["kind"]
if kind == "DaemonSet":
- managed_resource['kind'] = "ControllerRevision"
- managed_resource['api_version'] = "apps/v1"
+ managed_resource["kind"] = "ControllerRevision"
+ managed_resource["api_version"] = "apps/v1"
elif kind == "Deployment":
- managed_resource['kind'] = "ReplicaSet"
- managed_resource['api_version'] = "apps/v1"
+ managed_resource["kind"] = "ReplicaSet"
+ managed_resource["api_version"] = "apps/v1"
else:
module.fail(msg="Cannot perform rollback on resource of kind {0}".format(kind))
return managed_resource
@@ -102,80 +107,89 @@ def execute_module(module, k8s_ansible_mixin):
results = []
resources = k8s_ansible_mixin.kubernetes_facts(
- module.params['kind'],
- module.params['api_version'],
- module.params['name'],
- module.params['namespace'],
- module.params['label_selectors'],
- module.params['field_selectors'])
-
- for resource in resources['resources']:
+ module.params["kind"],
+ module.params["api_version"],
+ module.params["name"],
+ module.params["namespace"],
+ module.params["label_selectors"],
+ module.params["field_selectors"],
+ )
+
+ for resource in resources["resources"]:
result = perform_action(module, k8s_ansible_mixin, resource)
results.append(result)
- module.exit_json(**{
- 'changed': True,
- 'rollback_info': results
- })
+ module.exit_json(**{"changed": True, "rollback_info": results})
def perform_action(module, k8s_ansible_mixin, resource):
- if module.params['kind'] == "DaemonSet":
- current_revision = resource['metadata']['generation']
- elif module.params['kind'] == "Deployment":
- current_revision = resource['metadata']['annotations']['deployment.kubernetes.io/revision']
+ if module.params["kind"] == "DaemonSet":
+ current_revision = resource["metadata"]["generation"]
+ elif module.params["kind"] == "Deployment":
+ current_revision = resource["metadata"]["annotations"][
+ "deployment.kubernetes.io/revision"
+ ]
managed_resource = get_managed_resource(module)
managed_resources = k8s_ansible_mixin.kubernetes_facts(
- managed_resource['kind'],
- managed_resource['api_version'],
- '',
- module.params['namespace'],
- resource['spec']
- ['selector']
- ['matchLabels'],
- '')
-
- prev_managed_resource = get_previous_revision(managed_resources['resources'],
- current_revision)
-
- if module.params['kind'] == "Deployment":
- del prev_managed_resource['spec']['template']['metadata']['labels']['pod-template-hash']
-
- resource_patch = [{
- "op": "replace",
- "path": "/spec/template",
- "value": prev_managed_resource['spec']['template']
- }, {
- "op": "replace",
- "path": "/metadata/annotations",
- "value": {
- "deployment.kubernetes.io/revision": prev_managed_resource['metadata']['annotations']['deployment.kubernetes.io/revision']
- }
- }]
-
- api_target = 'deployments'
- content_type = 'application/json-patch+json'
- elif module.params['kind'] == "DaemonSet":
+ managed_resource["kind"],
+ managed_resource["api_version"],
+ "",
+ module.params["namespace"],
+ resource["spec"]["selector"]["matchLabels"],
+ "",
+ )
+
+ prev_managed_resource = get_previous_revision(
+ managed_resources["resources"], current_revision
+ )
+
+ if module.params["kind"] == "Deployment":
+ del prev_managed_resource["spec"]["template"]["metadata"]["labels"][
+ "pod-template-hash"
+ ]
+
+ resource_patch = [
+ {
+ "op": "replace",
+ "path": "/spec/template",
+ "value": prev_managed_resource["spec"]["template"],
+ },
+ {
+ "op": "replace",
+ "path": "/metadata/annotations",
+ "value": {
+ "deployment.kubernetes.io/revision": prev_managed_resource[
+ "metadata"
+ ]["annotations"]["deployment.kubernetes.io/revision"]
+ },
+ },
+ ]
+
+ api_target = "deployments"
+ content_type = "application/json-patch+json"
+ elif module.params["kind"] == "DaemonSet":
resource_patch = prev_managed_resource["data"]
- api_target = 'daemonsets'
- content_type = 'application/strategic-merge-patch+json'
+ api_target = "daemonsets"
+ content_type = "application/strategic-merge-patch+json"
rollback = k8s_ansible_mixin.client.request(
"PATCH",
- "/apis/{0}/namespaces/{1}/{2}/{3}"
- .format(module.params['api_version'],
- module.params['namespace'],
- api_target,
- module.params['name']),
+ "/apis/{0}/namespaces/{1}/{2}/{3}".format(
+ module.params["api_version"],
+ module.params["namespace"],
+ api_target,
+ module.params["name"],
+ ),
body=resource_patch,
- content_type=content_type)
+ content_type=content_type,
+ )
- result = {'changed': True}
- result['method'] = 'patch'
- result['body'] = resource_patch
- result['resources'] = rollback.to_dict()
+ result = {"changed": True}
+ result["method"] = "patch"
+ result["body"] = resource_patch
+ result["resources"] = rollback.to_dict()
return result
@@ -184,8 +198,8 @@ def argspec():
args.update(NAME_ARG_SPEC)
args.update(
dict(
- label_selectors=dict(type='list', elements='str', default=[]),
- field_selectors=dict(type='list', elements='str', default=[]),
+ label_selectors=dict(type="list", elements="str", default=[]),
+ field_selectors=dict(type="list", elements="str", default=[]),
)
)
return args
@@ -193,27 +207,40 @@ def argspec():
def get_previous_revision(all_resources, current_revision):
for resource in all_resources:
- if resource['kind'] == 'ReplicaSet':
- if int(resource['metadata']
- ['annotations']
- ['deployment.kubernetes.io/revision']) == int(current_revision) - 1:
+ if resource["kind"] == "ReplicaSet":
+ if (
+ int(
+ resource["metadata"]["annotations"][
+ "deployment.kubernetes.io/revision"
+ ]
+ )
+ == int(current_revision) - 1
+ ):
return resource
- elif resource['kind'] == 'ControllerRevision':
- if int(resource['metadata']
- ['annotations']
- ['deprecated.daemonset.template.generation']) == int(current_revision) - 1:
+ elif resource["kind"] == "ControllerRevision":
+ if (
+ int(
+ resource["metadata"]["annotations"][
+ "deprecated.daemonset.template.generation"
+ ]
+ )
+ == int(current_revision) - 1
+ ):
return resource
return None
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
- from ansible_collections.kubernetes.core.plugins.module_utils.common import (K8sAnsibleMixin, get_api_client)
+ from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_scale.py b/plugins/modules/k8s_scale.py
index 58d178eebc..49f139377c 100644
--- a/plugins/modules/k8s_scale.py
+++ b/plugins/modules/k8s_scale.py
@@ -10,7 +10,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_scale
@@ -48,9 +48,9 @@
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Scale deployment up, and extend timeout
kubernetes.core.k8s_scale:
api_version: v1
@@ -105,9 +105,9 @@
label_selectors:
- app=test
continue_on_error: true
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- If a change was made, will return the patched object, otherwise returns the existing object.
@@ -139,133 +139,166 @@
returned: when C(wait) is true
type: int
sample: 48
-'''
+"""
import copy
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
- AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC)
+ AUTH_ARG_SPEC,
+ RESOURCE_ARG_SPEC,
+ NAME_ARG_SPEC,
+)
SCALE_ARG_SPEC = {
- 'replicas': {'type': 'int', 'required': True},
- 'current_replicas': {'type': 'int'},
- 'resource_version': {},
- 'wait': {'type': 'bool', 'default': True},
- 'wait_timeout': {'type': 'int', 'default': 20},
- 'wait_sleep': {'type': 'int', 'default': 5},
+ "replicas": {"type": "int", "required": True},
+ "current_replicas": {"type": "int"},
+ "resource_version": {},
+ "wait": {"type": "bool", "default": True},
+ "wait_timeout": {"type": "int", "default": 20},
+ "wait_sleep": {"type": "int", "default": 5},
}
-def execute_module(module, k8s_ansible_mixin,):
+def execute_module(
+ module, k8s_ansible_mixin,
+):
k8s_ansible_mixin.set_resource_definitions(module)
definition = k8s_ansible_mixin.resource_definitions[0]
- name = definition['metadata']['name']
- namespace = definition['metadata'].get('namespace')
- api_version = definition['apiVersion']
- kind = definition['kind']
- current_replicas = module.params.get('current_replicas')
- replicas = module.params.get('replicas')
- resource_version = module.params.get('resource_version')
+ name = definition["metadata"]["name"]
+ namespace = definition["metadata"].get("namespace")
+ api_version = definition["apiVersion"]
+ kind = definition["kind"]
+ current_replicas = module.params.get("current_replicas")
+ replicas = module.params.get("replicas")
+ resource_version = module.params.get("resource_version")
- label_selectors = module.params.get('label_selectors')
+ label_selectors = module.params.get("label_selectors")
if not label_selectors:
label_selectors = []
- continue_on_error = module.params.get('continue_on_error')
+ continue_on_error = module.params.get("continue_on_error")
- wait = module.params.get('wait')
- wait_time = module.params.get('wait_timeout')
- wait_sleep = module.params.get('wait_sleep')
+ wait = module.params.get("wait")
+ wait_time = module.params.get("wait_timeout")
+ wait_sleep = module.params.get("wait_sleep")
existing = None
existing_count = None
return_attributes = dict(result=dict())
if module._diff:
- return_attributes['diff'] = dict()
+ return_attributes["diff"] = dict()
if wait:
- return_attributes['duration'] = 0
+ return_attributes["duration"] = 0
resource = k8s_ansible_mixin.find_resource(kind, api_version, fail=True)
- from ansible_collections.kubernetes.core.plugins.module_utils.common import NotFoundError
+ from ansible_collections.kubernetes.core.plugins.module_utils.common import (
+ NotFoundError,
+ )
multiple_scale = False
try:
- existing = resource.get(name=name, namespace=namespace, label_selector=','.join(label_selectors))
- if existing.kind.endswith('List'):
+ existing = resource.get(
+ name=name, namespace=namespace, label_selector=",".join(label_selectors)
+ )
+ if existing.kind.endswith("List"):
existing_items = existing.items
multiple_scale = len(existing_items) > 1
else:
existing_items = [existing]
except NotFoundError as exc:
- module.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
- error=exc.value.get('status'))
+ module.fail_json(
+ msg="Failed to retrieve requested object: {0}".format(exc),
+ error=exc.value.get("status"),
+ )
if multiple_scale:
# when scaling multiple resource, the 'result' is changed to 'results' and is a list
- return_attributes = {'results': []}
+ return_attributes = {"results": []}
changed = False
def _continue_or_fail(error):
if multiple_scale and continue_on_error:
if "errors" not in return_attributes:
- return_attributes['errors'] = []
- return_attributes['errors'].append({'error': error, 'failed': True})
+ return_attributes["errors"] = []
+ return_attributes["errors"].append({"error": error, "failed": True})
else:
module.fail_json(msg=error, **return_attributes)
def _continue_or_exit(warn):
if multiple_scale:
- return_attributes['results'].append({'warning': warn, 'changed': False})
+ return_attributes["results"].append({"warning": warn, "changed": False})
else:
module.exit_json(warning=warn, **return_attributes)
for existing in existing_items:
- if module.params['kind'] == 'job':
+ if module.params["kind"] == "job":
existing_count = existing.spec.parallelism
- elif hasattr(existing.spec, 'replicas'):
+ elif hasattr(existing.spec, "replicas"):
existing_count = existing.spec.replicas
if existing_count is None:
- error = 'Failed to retrieve the available count for object kind={0} name={1} namespace={2}.'.format(
- existing.kind, existing.metadata.name, existing.metadata.namespace)
+ error = "Failed to retrieve the available count for object kind={0} name={1} namespace={2}.".format(
+ existing.kind, existing.metadata.name, existing.metadata.namespace
+ )
_continue_or_fail(error)
continue
if resource_version and resource_version != existing.metadata.resourceVersion:
- warn = 'expected resource version {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
- resource_version, existing.metadata.resourceVersion, existing.kind, existing.metadata.name, existing.metadata.namespace)
+ warn = "expected resource version {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.".format(
+ resource_version,
+ existing.metadata.resourceVersion,
+ existing.kind,
+ existing.metadata.name,
+ existing.metadata.namespace,
+ )
_continue_or_exit(warn)
continue
if current_replicas is not None and existing_count != current_replicas:
- warn = 'current replicas {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
- current_replicas, existing_count, existing.kind, existing.metadata.name, existing.metadata.namespace)
+ warn = "current replicas {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.".format(
+ current_replicas,
+ existing_count,
+ existing.kind,
+ existing.metadata.name,
+ existing.metadata.namespace,
+ )
_continue_or_exit(warn)
continue
if existing_count != replicas:
if not module.check_mode:
- if module.params['kind'] == 'job':
+ if module.params["kind"] == "job":
existing.spec.parallelism = replicas
result = resource.patch(existing.to_dict()).to_dict()
else:
- result = scale(module, k8s_ansible_mixin, resource, existing, replicas, wait, wait_time, wait_sleep)
- changed = changed or result['changed']
+ result = scale(
+ module,
+ k8s_ansible_mixin,
+ resource,
+ existing,
+ replicas,
+ wait,
+ wait_time,
+ wait_sleep,
+ )
+ changed = changed or result["changed"]
else:
name = existing.metadata.name
namespace = existing.metadata.namespace
existing = resource.get(name=name, namespace=namespace)
- result = {'changed': False, 'result': existing.to_dict()}
+ result = {"changed": False, "result": existing.to_dict()}
if module._diff:
- result['diff'] = {}
+ result["diff"] = {}
if wait:
- result['duration'] = 0
+ result["duration"] = 0
# append result to the return attribute
if multiple_scale:
- return_attributes['results'].append(result)
+ return_attributes["results"].append(result)
else:
module.exit_json(**result)
@@ -277,22 +310,35 @@ def argspec():
args.update(RESOURCE_ARG_SPEC)
args.update(NAME_ARG_SPEC)
args.update(AUTH_ARG_SPEC)
- args.update({'label_selectors': {'type': 'list', 'elements': 'str', 'default': []}})
- args.update(({'continue_on_error': {'type': 'bool', 'default': False}}))
+ args.update({"label_selectors": {"type": "list", "elements": "str", "default": []}})
+ args.update(({"continue_on_error": {"type": "bool", "default": False}}))
return args
-def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait, wait_time, wait_sleep):
+def scale(
+ module,
+ k8s_ansible_mixin,
+ resource,
+ existing_object,
+ replicas,
+ wait,
+ wait_time,
+ wait_sleep,
+):
name = existing_object.metadata.name
namespace = existing_object.metadata.namespace
kind = existing_object.kind
- if not hasattr(resource, 'scale'):
+ if not hasattr(resource, "scale"):
module.fail_json(
msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
)
- scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
+ scale_obj = {
+ "kind": kind,
+ "metadata": {"name": name, "namespace": namespace},
+ "spec": {"replicas": replicas},
+ }
existing = resource.get(name=name, namespace=namespace)
@@ -304,13 +350,15 @@ def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait,
k8s_obj = resource.get(name=name, namespace=namespace).to_dict()
match, diffs = k8s_ansible_mixin.diff_objects(existing.to_dict(), k8s_obj)
result = dict()
- result['result'] = k8s_obj
- result['changed'] = not match
+ result["result"] = k8s_obj
+ result["changed"] = not match
if module._diff:
- result['diff'] = diffs
+ result["diff"] = diffs
if wait:
- success, result['result'], result['duration'] = k8s_ansible_mixin.wait(resource, scale_obj, wait_sleep, wait_time)
+ success, result["result"], result["duration"] = k8s_ansible_mixin.wait(
+ resource, scale_obj, wait_sleep, wait_time
+ )
if not success:
module.fail_json(msg="Resource scaling timed out", **result)
return result
@@ -318,15 +366,22 @@ def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait,
def main():
mutually_exclusive = [
- ('resource_definition', 'src'),
+ ("resource_definition", "src"),
]
- module = AnsibleModule(argument_spec=argspec(), mutually_exclusive=mutually_exclusive, supports_check_mode=True)
+ module = AnsibleModule(
+ argument_spec=argspec(),
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
+
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/plugins/modules/k8s_service.py b/plugins/modules/k8s_service.py
index d94ba5c017..a3bbc41ae7 100644
--- a/plugins/modules/k8s_service.py
+++ b/plugins/modules/k8s_service.py
@@ -9,7 +9,7 @@
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
module: k8s_service
@@ -85,9 +85,9 @@
requirements:
- python >= 3.6
- kubernetes >= 12.0.0
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Expose https port with ClusterIP
kubernetes.core.k8s_service:
state: present
@@ -111,9 +111,9 @@
protocol: TCP
selector:
key: special
-'''
+"""
-RETURN = r'''
+RETURN = r"""
result:
description:
- The created, patched, or otherwise present Service object. Will be empty in the case of a deletion.
@@ -140,32 +140,36 @@
description: Current status details for the object.
returned: success
type: complex
-'''
+"""
import copy
from collections import defaultdict
-from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
+from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
+ AnsibleModule,
+)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
- AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC)
+ AUTH_ARG_SPEC,
+ COMMON_ARG_SPEC,
+ RESOURCE_ARG_SPEC,
+)
SERVICE_ARG_SPEC = {
- 'apply': {
- 'type': 'bool',
- 'default': False,
+ "apply": {"type": "bool", "default": False},
+ "name": {"required": True},
+ "namespace": {"required": True},
+ "merge_type": {
+ "type": "list",
+ "elements": "str",
+ "choices": ["json", "merge", "strategic-merge"],
},
- 'name': {'required': True},
- 'namespace': {'required': True},
- 'merge_type': {'type': 'list', 'elements': 'str', 'choices': ['json', 'merge', 'strategic-merge']},
- 'selector': {'type': 'dict'},
- 'type': {
- 'type': 'str',
- 'choices': [
- 'NodePort', 'ClusterIP', 'LoadBalancer', 'ExternalName'
- ],
+ "selector": {"type": "dict"},
+ "type": {
+ "type": "str",
+ "choices": ["NodePort", "ClusterIP", "LoadBalancer", "ExternalName"],
},
- 'ports': {'type': 'list', 'elements': 'dict'},
+ "ports": {"type": "list", "elements": "dict"},
}
@@ -195,29 +199,31 @@ def execute_module(module, k8s_ansible_mixin):
""" Module execution """
k8s_ansible_mixin.set_resource_definitions(module)
- api_version = 'v1'
- selector = module.params.get('selector')
- service_type = module.params.get('type')
- ports = module.params.get('ports')
+ api_version = "v1"
+ selector = module.params.get("selector")
+ service_type = module.params.get("type")
+ ports = module.params.get("ports")
definition = defaultdict(defaultdict)
- definition['kind'] = 'Service'
- definition['apiVersion'] = api_version
+ definition["kind"] = "Service"
+ definition["apiVersion"] = api_version
- def_spec = definition['spec']
- def_spec['type'] = service_type
- def_spec['ports'] = ports
- def_spec['selector'] = selector
+ def_spec = definition["spec"]
+ def_spec["type"] = service_type
+ def_spec["ports"] = ports
+ def_spec["selector"] = selector
- def_meta = definition['metadata']
- def_meta['name'] = module.params.get('name')
- def_meta['namespace'] = module.params.get('namespace')
+ def_meta = definition["metadata"]
+ def_meta["name"] = module.params.get("name")
+ def_meta["namespace"] = module.params.get("namespace")
# 'resource_definition:' has lower priority than module parameters
- definition = dict(merge_dicts(k8s_ansible_mixin.resource_definitions[0], definition))
+ definition = dict(
+ merge_dicts(k8s_ansible_mixin.resource_definitions[0], definition)
+ )
- resource = k8s_ansible_mixin.find_resource('Service', api_version, fail=True)
+ resource = k8s_ansible_mixin.find_resource("Service", api_version, fail=True)
definition = k8s_ansible_mixin.set_defaults(resource, definition)
result = k8s_ansible_mixin.perform_action(resource, definition)
@@ -227,12 +233,14 @@ def execute_module(module, k8s_ansible_mixin):
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
- K8sAnsibleMixin, get_api_client)
+ K8sAnsibleMixin,
+ get_api_client,
+ )
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/tests/integration/targets/kubernetes/library/test_tempfile.py b/tests/integration/targets/kubernetes/library/test_tempfile.py
index c89f5a3196..ad7811d3cb 100644
--- a/tests/integration/targets/kubernetes/library/test_tempfile.py
+++ b/tests/integration/targets/kubernetes/library/test_tempfile.py
@@ -6,10 +6,11 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = """
---
module: test_tempfile
@@ -50,7 +51,7 @@
author:
- Krzysztof Magosa (@krzysztof-magosa)
-'''
+"""
EXAMPLES = """
- name: create temporary build directory
@@ -71,13 +72,13 @@
when: tempfile_1.path is defined
"""
-RETURN = '''
+RETURN = """
path:
description: Path to created file or directory
returned: success
type: str
sample: "/tmp/ansible.bMlvdk"
-'''
+"""
from os import close
from tempfile import mkstemp, mkdtemp
@@ -90,26 +91,26 @@
def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(type='str', default='file', choices=['file', 'directory']),
- path=dict(type='path'),
- prefix=dict(type='str', default='ansible.'),
- suffix=dict(type='str', default=''),
+ state=dict(type="str", default="file", choices=["file", "directory"]),
+ path=dict(type="path"),
+ prefix=dict(type="str", default="ansible."),
+ suffix=dict(type="str", default=""),
),
)
try:
- if module.params['state'] == 'file':
+ if module.params["state"] == "file":
handle, path = mkstemp(
- prefix=module.params['prefix'],
- suffix=module.params['suffix'],
- dir=module.params['path'],
+ prefix=module.params["prefix"],
+ suffix=module.params["suffix"],
+ dir=module.params["path"],
)
close(handle)
- elif module.params['state'] == 'directory':
+ elif module.params["state"] == "directory":
path = mkdtemp(
- prefix=module.params['prefix'],
- suffix=module.params['suffix'],
- dir=module.params['path'],
+ prefix=module.params["prefix"],
+ suffix=module.params["suffix"],
+ dir=module.params["path"],
)
module.exit_json(changed=True, path=path)
@@ -117,5 +118,5 @@ def main():
module.fail_json(msg=to_native(e), exception=format_exc())
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/tests/unit/module_utils/test_apply.py b/tests/unit/module_utils/test_apply.py
index b9ba7f7deb..cecf5a8485 100644
--- a/tests/unit/module_utils/test_apply.py
+++ b/tests/unit/module_utils/test_apply.py
@@ -14,408 +14,477 @@
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-from ansible_collections.kubernetes.core.plugins.module_utils.apply import merge, apply_patch
+from ansible_collections.kubernetes.core.plugins.module_utils.apply import (
+ merge,
+ apply_patch,
+)
tests = [
dict(
last_applied=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", two="2")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", two="2")
),
desired=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", two="2")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", two="2")
),
- expected={}
+ expected={},
),
dict(
last_applied=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", two="2")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", two="2")
),
desired=dict(
kind="ConfigMap",
metadata=dict(name="foo"),
- data=dict(one="1", two="2", three="3")
+ data=dict(one="1", two="2", three="3"),
),
- expected=dict(data=dict(three="3"))
+ expected=dict(data=dict(three="3")),
),
dict(
last_applied=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", two="2")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", two="2")
),
desired=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", three="3")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", three="3")
),
- expected=dict(data=dict(two=None, three="3"))
+ expected=dict(data=dict(two=None, three="3")),
),
dict(
last_applied=dict(
kind="ConfigMap",
metadata=dict(name="foo", annotations=dict(this="one", hello="world")),
- data=dict(one="1", two="2")
+ data=dict(one="1", two="2"),
),
desired=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", three="3")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", three="3")
),
- expected=dict(metadata=dict(annotations=None), data=dict(two=None, three="3"))
+ expected=dict(metadata=dict(annotations=None), data=dict(two=None, three="3")),
),
-
dict(
last_applied=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, name="http")])
+ spec=dict(ports=[dict(port=8080, name="http")]),
),
actual=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")])
+ spec=dict(ports=[dict(port=8080, protocol="TCP", name="http")]),
),
desired=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, name="http")])
+ spec=dict(ports=[dict(port=8080, name="http")]),
),
- expected=dict(spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")]))
+ expected=dict(spec=dict(ports=[dict(port=8080, protocol="TCP", name="http")])),
),
dict(
last_applied=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, name="http")])
+ spec=dict(ports=[dict(port=8080, name="http")]),
),
actual=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")])
+ spec=dict(ports=[dict(port=8080, protocol="TCP", name="http")]),
),
desired=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8081, name="http")])
+ spec=dict(ports=[dict(port=8081, name="http")]),
),
- expected=dict(spec=dict(ports=[dict(port=8081, name="http")]))
+ expected=dict(spec=dict(ports=[dict(port=8081, name="http")])),
),
dict(
last_applied=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, name="http")])
+ spec=dict(ports=[dict(port=8080, name="http")]),
),
actual=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")])
+ spec=dict(ports=[dict(port=8080, protocol="TCP", name="http")]),
),
desired=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, name="https"), dict(port=8080, name="http")])
+ spec=dict(
+ ports=[dict(port=8443, name="https"), dict(port=8080, name="http")]
+ ),
+ ),
+ expected=dict(
+ spec=dict(
+ ports=[
+ dict(port=8443, name="https"),
+ dict(port=8080, name="http", protocol="TCP"),
+ ]
+ )
),
- expected=dict(spec=dict(ports=[dict(port=8443, name="https"), dict(port=8080, name="http", protocol='TCP')]))
),
dict(
last_applied=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, name="https"), dict(port=8080, name="http")])
+ spec=dict(
+ ports=[dict(port=8443, name="https"), dict(port=8080, name="http")]
+ ),
),
actual=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, protocol='TCP', name="https"), dict(port=8080, protocol='TCP', name='http')])
+ spec=dict(
+ ports=[
+ dict(port=8443, protocol="TCP", name="https"),
+ dict(port=8080, protocol="TCP", name="http"),
+ ]
+ ),
),
desired=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8080, name="http")])
+ spec=dict(ports=[dict(port=8080, name="http")]),
),
- expected=dict(spec=dict(ports=[dict(port=8080, name="http", protocol='TCP')]))
+ expected=dict(spec=dict(ports=[dict(port=8080, name="http", protocol="TCP")])),
),
dict(
last_applied=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, name="https", madeup="xyz"), dict(port=8080, name="http")])
+ spec=dict(
+ ports=[
+ dict(port=8443, name="https", madeup="xyz"),
+ dict(port=8080, name="http"),
+ ]
+ ),
),
actual=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, protocol='TCP', name="https", madeup="xyz"), dict(port=8080, protocol='TCP', name='http')])
+ spec=dict(
+ ports=[
+ dict(port=8443, protocol="TCP", name="https", madeup="xyz"),
+ dict(port=8080, protocol="TCP", name="http"),
+ ]
+ ),
),
desired=dict(
kind="Service",
metadata=dict(name="foo"),
- spec=dict(ports=[dict(port=8443, name="https")])
+ spec=dict(ports=[dict(port=8443, name="https")]),
+ ),
+ expected=dict(
+ spec=dict(
+ ports=[dict(madeup=None, port=8443, name="https", protocol="TCP")]
+ )
),
- expected=dict(spec=dict(ports=[dict(madeup=None, port=8443, name="https", protocol='TCP')]))
),
dict(
last_applied=dict(
kind="Pod",
metadata=dict(name="foo"),
- spec=dict(containers=[dict(name="busybox", image="busybox",
- resources=dict(requests=dict(cpu="100m", memory="100Mi"), limits=dict(cpu="100m", memory="100Mi")))])
+ spec=dict(
+ containers=[
+ dict(
+ name="busybox",
+ image="busybox",
+ resources=dict(
+ requests=dict(cpu="100m", memory="100Mi"),
+ limits=dict(cpu="100m", memory="100Mi"),
+ ),
+ )
+ ]
+ ),
),
actual=dict(
kind="Pod",
metadata=dict(name="foo"),
- spec=dict(containers=[dict(name="busybox", image="busybox",
- resources=dict(requests=dict(cpu="100m", memory="100Mi"), limits=dict(cpu="100m", memory="100Mi")))])
+ spec=dict(
+ containers=[
+ dict(
+ name="busybox",
+ image="busybox",
+ resources=dict(
+ requests=dict(cpu="100m", memory="100Mi"),
+ limits=dict(cpu="100m", memory="100Mi"),
+ ),
+ )
+ ]
+ ),
),
desired=dict(
kind="Pod",
metadata=dict(name="foo"),
- spec=dict(containers=[dict(name="busybox", image="busybox",
- resources=dict(requests=dict(cpu="50m", memory="50Mi"), limits=dict(memory="50Mi")))])
+ spec=dict(
+ containers=[
+ dict(
+ name="busybox",
+ image="busybox",
+ resources=dict(
+ requests=dict(cpu="50m", memory="50Mi"),
+ limits=dict(memory="50Mi"),
+ ),
+ )
+ ]
+ ),
+ ),
+ expected=dict(
+ spec=dict(
+ containers=[
+ dict(
+ name="busybox",
+ image="busybox",
+ resources=dict(
+ requests=dict(cpu="50m", memory="50Mi"),
+ limits=dict(cpu=None, memory="50Mi"),
+ ),
+ )
+ ]
+ )
),
- expected=dict(spec=dict(containers=[dict(name="busybox", image="busybox",
- resources=dict(requests=dict(cpu="50m", memory="50Mi"), limits=dict(cpu=None, memory="50Mi")))]))
),
dict(
- desired=dict(kind='Pod',
- spec=dict(containers=[
- dict(name='hello',
- volumeMounts=[dict(name="test", mountPath="/test")])
- ],
- volumes=[
- dict(name="test", configMap=dict(name="test")),
- ])),
- last_applied=dict(kind='Pod',
- spec=dict(containers=[
- dict(name='hello',
- volumeMounts=[dict(name="test", mountPath="/test")])
- ],
- volumes=[
- dict(name="test", configMap=dict(name="test"))])),
- actual=dict(kind='Pod',
- spec=dict(containers=[
- dict(name='hello',
- volumeMounts=[
- dict(name="test", mountPath="/test"),
- dict(mountPath="/var/run/secrets/kubernetes.io/serviceaccount", name="default-token-xyz")])
- ],
- volumes=[
- dict(name="test", configMap=dict(name="test")),
- dict(name="default-token-xyz", secret=dict(secretName="default-token-xyz")),
- ])),
- expected=dict(spec=dict(containers=[dict(name='hello',
- volumeMounts=[dict(name="test", mountPath="/test"),
- dict(mountPath="/var/run/secrets/kubernetes.io/serviceaccount", name="default-token-xyz")])],
- volumes=[dict(name="test", configMap=dict(name="test")),
- dict(name="default-token-xyz", secret=dict(secretName="default-token-xyz"))])),
+ desired=dict(
+ kind="Pod",
+ spec=dict(
+ containers=[
+ dict(
+ name="hello",
+ volumeMounts=[dict(name="test", mountPath="/test")],
+ )
+ ],
+ volumes=[dict(name="test", configMap=dict(name="test"))],
+ ),
+ ),
+ last_applied=dict(
+ kind="Pod",
+ spec=dict(
+ containers=[
+ dict(
+ name="hello",
+ volumeMounts=[dict(name="test", mountPath="/test")],
+ )
+ ],
+ volumes=[dict(name="test", configMap=dict(name="test"))],
+ ),
+ ),
+ actual=dict(
+ kind="Pod",
+ spec=dict(
+ containers=[
+ dict(
+ name="hello",
+ volumeMounts=[
+ dict(name="test", mountPath="/test"),
+ dict(
+ mountPath="/var/run/secrets/kubernetes.io/serviceaccount",
+ name="default-token-xyz",
+ ),
+ ],
+ )
+ ],
+ volumes=[
+ dict(name="test", configMap=dict(name="test")),
+ dict(
+ name="default-token-xyz",
+ secret=dict(secretName="default-token-xyz"),
+ ),
+ ],
+ ),
+ ),
+ expected=dict(
+ spec=dict(
+ containers=[
+ dict(
+ name="hello",
+ volumeMounts=[
+ dict(name="test", mountPath="/test"),
+ dict(
+ mountPath="/var/run/secrets/kubernetes.io/serviceaccount",
+ name="default-token-xyz",
+ ),
+ ],
+ )
+ ],
+ volumes=[
+ dict(name="test", configMap=dict(name="test")),
+ dict(
+ name="default-token-xyz",
+ secret=dict(secretName="default-token-xyz"),
+ ),
+ ],
+ )
+ ),
),
-
# This next one is based on a real world case where definition was mostly
# str type and everything else was mostly unicode type (don't ask me how)
dict(
last_applied={
- u'kind': u'ConfigMap',
- u'data': {u'one': '1', 'three': '3', 'two': '2'},
- u'apiVersion': u'v1',
- u'metadata': {u'namespace': u'apply', u'name': u'apply-configmap'}
+ u"kind": u"ConfigMap",
+ u"data": {u"one": "1", "three": "3", "two": "2"},
+ u"apiVersion": u"v1",
+ u"metadata": {u"namespace": u"apply", u"name": u"apply-configmap"},
},
actual={
- u'kind': u'ConfigMap',
- u'data': {u'one': '1', 'three': '3', 'two': '2'},
- u'apiVersion': u'v1',
- u'metadata': {u'namespace': u'apply', u'name': u'apply-configmap',
- u'resourceVersion': '1714994',
- u'creationTimestamp': u'2019-08-17T05:08:05Z', u'annotations': {},
- u'selfLink': u'/api/v1/namespaces/apply/configmaps/apply-configmap',
- u'uid': u'fed45fb0-c0ac-11e9-9d95-025000000001'}
+ u"kind": u"ConfigMap",
+ u"data": {u"one": "1", "three": "3", "two": "2"},
+ u"apiVersion": u"v1",
+ u"metadata": {
+ u"namespace": u"apply",
+ u"name": u"apply-configmap",
+ u"resourceVersion": "1714994",
+ u"creationTimestamp": u"2019-08-17T05:08:05Z",
+ u"annotations": {},
+ u"selfLink": u"/api/v1/namespaces/apply/configmaps/apply-configmap",
+ u"uid": u"fed45fb0-c0ac-11e9-9d95-025000000001",
+ },
},
desired={
- 'kind': u'ConfigMap',
- 'data': {'one': '1', 'three': '3', 'two': '2'},
- 'apiVersion': 'v1',
- 'metadata': {'namespace': 'apply', 'name': 'apply-configmap'}
+ "kind": u"ConfigMap",
+ "data": {"one": "1", "three": "3", "two": "2"},
+ "apiVersion": "v1",
+ "metadata": {"namespace": "apply", "name": "apply-configmap"},
},
- expected=dict()
+ expected=dict(),
),
# apply a Deployment, then scale the Deployment (which doesn't affect last-applied)
# then apply the Deployment again. Should un-scale the Deployment
dict(
last_applied={
- 'kind': u'Deployment',
- 'spec': {
- 'replicas': 1,
- 'template': {
- 'spec': {
- 'containers': [
+ "kind": u"Deployment",
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "spec": {
+ "containers": [
{
- 'name': 'this_must_exist',
- 'envFrom': [
- {
- 'configMapRef': {
- 'name': 'config-xyz'
- }
- },
- {
- 'secretRef': {
- 'name': 'config-wxy'
- }
- }
- ]
+ "name": "this_must_exist",
+ "envFrom": [
+ {"configMapRef": {"name": "config-xyz"}},
+ {"secretRef": {"name": "config-wxy"}},
+ ],
}
]
}
- }
+ },
},
- 'metadata': {
- 'namespace': 'apply',
- 'name': u'apply-deployment'
- }
+ "metadata": {"namespace": "apply", "name": u"apply-deployment"},
},
actual={
- 'kind': u'Deployment',
- 'spec': {
- 'replicas': 0,
- 'template': {
- 'spec': {
- 'containers': [
+ "kind": u"Deployment",
+ "spec": {
+ "replicas": 0,
+ "template": {
+ "spec": {
+ "containers": [
{
- 'name': 'this_must_exist',
- 'envFrom': [
- {
- 'configMapRef': {
- 'name': 'config-xyz'
- }
- },
- {
- 'secretRef': {
- 'name': 'config-wxy'
- }
- }
- ]
+ "name": "this_must_exist",
+ "envFrom": [
+ {"configMapRef": {"name": "config-xyz"}},
+ {"secretRef": {"name": "config-wxy"}},
+ ],
}
]
}
- }
+ },
},
- 'metadata': {
- 'namespace': 'apply',
- 'name': u'apply-deployment'
- }
+ "metadata": {"namespace": "apply", "name": u"apply-deployment"},
},
desired={
- 'kind': u'Deployment',
- 'spec': {
- 'replicas': 1,
- 'template': {
- 'spec': {
- 'containers': [
+ "kind": u"Deployment",
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "spec": {
+ "containers": [
{
- 'name': 'this_must_exist',
- 'envFrom': [
- {
- 'configMapRef': {
- 'name': 'config-abc'
- }
- }
- ]
+ "name": "this_must_exist",
+ "envFrom": [{"configMapRef": {"name": "config-abc"}}],
}
]
}
- }
+ },
},
- 'metadata': {
- 'namespace': 'apply',
- 'name': u'apply-deployment'
- }
+ "metadata": {"namespace": "apply", "name": u"apply-deployment"},
},
expected={
- 'spec': {
- 'replicas': 1,
- 'template': {
- 'spec': {
- 'containers': [
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "spec": {
+ "containers": [
{
- 'name': 'this_must_exist',
- 'envFrom': [
- {
- 'configMapRef': {
- 'name': 'config-abc'
- }
- }
- ]
+ "name": "this_must_exist",
+ "envFrom": [{"configMapRef": {"name": "config-abc"}}],
}
]
}
- }
+ },
}
- }
+ },
),
dict(
- last_applied={
- 'kind': 'MadeUp',
- 'toplevel': {
- 'original': 'entry'
- }
- },
+ last_applied={"kind": "MadeUp", "toplevel": {"original": "entry"}},
actual={
- 'kind': 'MadeUp',
- 'toplevel': {
- 'original': 'entry',
- 'another': {
- 'nested': {
- 'entry': 'value'
- }
- }
- }
+ "kind": "MadeUp",
+ "toplevel": {
+ "original": "entry",
+ "another": {"nested": {"entry": "value"}},
+ },
},
desired={
- 'kind': 'MadeUp',
- 'toplevel': {
- 'original': 'entry',
- 'another': {
- 'nested': {
- 'entry': 'value'
- }
- }
- }
+ "kind": "MadeUp",
+ "toplevel": {
+ "original": "entry",
+ "another": {"nested": {"entry": "value"}},
+ },
},
- expected={}
- )
+ expected={},
+ ),
]
def test_merges():
for test in tests:
- assert(merge(test['last_applied'], test['desired'], test.get('actual', test['last_applied'])) == test['expected'])
+ assert (
+ merge(
+ test["last_applied"],
+ test["desired"],
+ test.get("actual", test["last_applied"]),
+ )
+ == test["expected"]
+ )
def test_apply_patch():
actual = dict(
kind="ConfigMap",
- metadata=dict(name="foo",
- annotations={'kubectl.kubernetes.io/last-applied-configuration':
- '{"data":{"one":"1","two":"2"},"kind":"ConfigMap",'
- '"metadata":{"annotations":{"hello":"world","this":"one"},"name":"foo"}}',
- 'this': 'one', 'hello': 'world'}),
- data=dict(one="1", two="2")
+ metadata=dict(
+ name="foo",
+ annotations={
+ "kubectl.kubernetes.io/last-applied-configuration": '{"data":{"one":"1","two":"2"},"kind":"ConfigMap",'
+ '"metadata":{"annotations":{"hello":"world","this":"one"},"name":"foo"}}',
+ "this": "one",
+ "hello": "world",
+ },
+ ),
+ data=dict(one="1", two="2"),
)
desired = dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict(one="1", three="3")
+ kind="ConfigMap", metadata=dict(name="foo"), data=dict(one="1", three="3")
)
expected = dict(
metadata=dict(
- annotations={'kubectl.kubernetes.io/last-applied-configuration': '{"data":{"one":"1","three":"3"},"kind":"ConfigMap","metadata":{"name":"foo"}}',
- 'this': None, 'hello': None}),
- data=dict(two=None, three="3")
+ annotations={
+ "kubectl.kubernetes.io/last-applied-configuration": '{"data":{"one":"1","three":"3"},"kind":"ConfigMap","metadata":{"name":"foo"}}',
+ "this": None,
+ "hello": None,
+ }
+ ),
+ data=dict(two=None, three="3"),
)
- assert(apply_patch(actual, desired) == (actual, expected))
+ assert apply_patch(actual, desired) == (actual, expected)
diff --git a/tests/unit/module_utils/test_common.py b/tests/unit/module_utils/test_common.py
index 9444da2d9c..f4e5028bac 100644
--- a/tests/unit/module_utils/test_common.py
+++ b/tests/unit/module_utils/test_common.py
@@ -17,9 +17,7 @@ def test_encode_stringdata_modifies_definition():
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
- "stringData": {
- "mydata": "ansiβle"
- }
+ "stringData": {"mydata": "ansiβle"},
}
res = _encode_stringdata(definition)
assert "stringData" not in res
@@ -31,9 +29,7 @@ def test_encode_stringdata_does_not_modify_data():
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
- "data": {
- "mydata": "Zm9vYmFy"
- }
+ "data": {"mydata": "Zm9vYmFy"},
}
res = _encode_stringdata(definition)
assert res["data"]["mydata"] == "Zm9vYmFy"
diff --git a/tests/unit/module_utils/test_discoverer.py b/tests/unit/module_utils/test_discoverer.py
index 63a8a9f4c0..b23a7a9aa6 100644
--- a/tests/unit/module_utils/test_discoverer.py
+++ b/tests/unit/module_utils/test_discoverer.py
@@ -18,85 +18,101 @@
from kubernetes.client import ApiClient
from kubernetes.dynamic import Resource
-from ansible_collections.kubernetes.core.plugins.module_utils.k8sdynamicclient import K8SDynamicClient
-from ansible_collections.kubernetes.core.plugins.module_utils.client.discovery import LazyDiscoverer
-from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import ResourceList
-
-
-@pytest.fixture(scope='module')
+from ansible_collections.kubernetes.core.plugins.module_utils.k8sdynamicclient import (
+ K8SDynamicClient,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.client.discovery import (
+ LazyDiscoverer,
+)
+from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import (
+ ResourceList,
+)
+
+
+@pytest.fixture(scope="module")
def mock_namespace():
return Resource(
- api_version='v1',
- kind='Namespace',
- name='namespaces',
+ api_version="v1",
+ kind="Namespace",
+ name="namespaces",
namespaced=False,
preferred=True,
- prefix='api',
- shorter_names=['ns'],
- shortNames=['ns'],
- singularName='namespace',
- verbs=['create', 'delete', 'get', 'list', 'patch', 'update', 'watch']
+ prefix="api",
+ shorter_names=["ns"],
+ shortNames=["ns"],
+ singularName="namespace",
+ verbs=["create", "delete", "get", "list", "patch", "update", "watch"],
)
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def mock_templates():
return Resource(
- api_version='v1',
- kind='Template',
- name='templates',
+ api_version="v1",
+ kind="Template",
+ name="templates",
namespaced=True,
preferred=True,
- prefix='api',
+ prefix="api",
shorter_names=[],
shortNames=[],
- verbs=['create', 'delete', 'get', 'list', 'patch', 'update', 'watch']
+ verbs=["create", "delete", "get", "list", "patch", "update", "watch"],
)
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def mock_processedtemplates():
return Resource(
- api_version='v1',
- kind='Template',
- name='processedtemplates',
+ api_version="v1",
+ kind="Template",
+ name="processedtemplates",
namespaced=True,
preferred=True,
- prefix='api',
+ prefix="api",
shorter_names=[],
shortNames=[],
- verbs=['create', 'delete', 'get', 'list', 'patch', 'update', 'watch']
+ verbs=["create", "delete", "get", "list", "patch", "update", "watch"],
)
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def mock_namespace_list(mock_namespace):
- ret = ResourceList(mock_namespace.client, mock_namespace.group, mock_namespace.api_version, mock_namespace.kind)
+ ret = ResourceList(
+ mock_namespace.client,
+ mock_namespace.group,
+ mock_namespace.api_version,
+ mock_namespace.kind,
+ )
ret._ResourceList__base_resource = mock_namespace
return ret
-@pytest.fixture(scope='function', autouse=True)
-def setup_client_monkeypatch(monkeypatch, mock_namespace, mock_namespace_list, mock_templates, mock_processedtemplates):
-
+@pytest.fixture(scope="function", autouse=True)
+def setup_client_monkeypatch(
+ monkeypatch,
+ mock_namespace,
+ mock_namespace_list,
+ mock_templates,
+ mock_processedtemplates,
+):
def mock_load_server_info(self):
- self.__version = {'kubernetes': 'mock-k8s-version'}
+ self.__version = {"kubernetes": "mock-k8s-version"}
def mock_parse_api_groups(self, request_resources=False):
return {
- 'api': {
- '': {
- 'v1': {
- 'Namespace': [mock_namespace],
- 'NamespaceList': [mock_namespace_list],
- 'Template': [mock_templates, mock_processedtemplates],
+ "api": {
+ "": {
+ "v1": {
+ "Namespace": [mock_namespace],
+ "NamespaceList": [mock_namespace_list],
+ "Template": [mock_templates, mock_processedtemplates],
}
}
}
}
- monkeypatch.setattr(LazyDiscoverer, '_load_server_info', mock_load_server_info)
- monkeypatch.setattr(LazyDiscoverer, 'parse_api_groups', mock_parse_api_groups)
+ monkeypatch.setattr(LazyDiscoverer, "_load_server_info", mock_load_server_info)
+ monkeypatch.setattr(LazyDiscoverer, "parse_api_groups", mock_parse_api_groups)
@pytest.fixture
@@ -104,39 +120,45 @@ def client(request):
return K8SDynamicClient(ApiClient(), discoverer=LazyDiscoverer)
-@pytest.mark.parametrize(("attribute", "value"), [
- ('name', 'namespaces'),
- ('singular_name', 'namespace'),
- ('short_names', ['ns'])
-])
-def test_search_returns_single_and_list(client, mock_namespace, mock_namespace_list, attribute, value):
- resources = client.resources.search(**{'api_version': 'v1', attribute: value})
+@pytest.mark.parametrize(
+ ("attribute", "value"),
+ [("name", "namespaces"), ("singular_name", "namespace"), ("short_names", ["ns"])],
+)
+def test_search_returns_single_and_list(
+ client, mock_namespace, mock_namespace_list, attribute, value
+):
+ resources = client.resources.search(**{"api_version": "v1", attribute: value})
assert len(resources) == 2
assert mock_namespace in resources
assert mock_namespace_list in resources
-@pytest.mark.parametrize(("attribute", "value"), [
- ('kind', 'Namespace'),
- ('name', 'namespaces'),
- ('singular_name', 'namespace'),
- ('short_names', ['ns'])
-])
+@pytest.mark.parametrize(
+ ("attribute", "value"),
+ [
+ ("kind", "Namespace"),
+ ("name", "namespaces"),
+ ("singular_name", "namespace"),
+ ("short_names", ["ns"]),
+ ],
+)
def test_get_returns_only_single(client, mock_namespace, attribute, value):
- resource = client.resources.get(**{'api_version': 'v1', attribute: value})
+ resource = client.resources.get(**{"api_version": "v1", attribute: value})
assert resource == mock_namespace
def test_get_namespace_list_kind(client, mock_namespace_list):
- resource = client.resources.get(api_version='v1', kind='NamespaceList')
+ resource = client.resources.get(api_version="v1", kind="NamespaceList")
assert resource == mock_namespace_list
-def test_search_multiple_resources_for_template(client, mock_templates, mock_processedtemplates):
- resources = client.resources.search(api_version='v1', kind='Template')
+def test_search_multiple_resources_for_template(
+ client, mock_templates, mock_processedtemplates
+):
+ resources = client.resources.search(api_version="v1", kind="Template")
assert len(resources) == 2
assert mock_templates in resources
diff --git a/tests/unit/module_utils/test_hashes.py b/tests/unit/module_utils/test_hashes.py
index 4c58237b67..f338cb0c30 100644
--- a/tests/unit/module_utils/test_hashes.py
+++ b/tests/unit/module_utils/test_hashes.py
@@ -15,26 +15,22 @@
# Test ConfigMapHash and SecretHash equivalents
# tests based on https://github.com/kubernetes/kubernetes/pull/49961
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-from ansible_collections.kubernetes.core.plugins.module_utils.hashes import generate_hash
+from ansible_collections.kubernetes.core.plugins.module_utils.hashes import (
+ generate_hash,
+)
tests = [
dict(
- resource=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- data=dict()
- ),
+ resource=dict(kind="ConfigMap", metadata=dict(name="foo"), data=dict()),
expected="867km9574f",
),
dict(
resource=dict(
- kind="ConfigMap",
- metadata=dict(name="foo"),
- type="my-type",
- data=dict()
+ kind="ConfigMap", metadata=dict(name="foo"), type="my-type", data=dict()
),
expected="867km9574f",
),
@@ -42,44 +38,31 @@
resource=dict(
kind="ConfigMap",
metadata=dict(name="foo"),
- data=dict(
- key1="value1",
- key2="value2")
+ data=dict(key1="value1", key2="value2"),
),
expected="gcb75dd9gb",
),
dict(
- resource=dict(
- kind="Secret",
- metadata=dict(name="foo"),
- data=dict()
- ),
+ resource=dict(kind="Secret", metadata=dict(name="foo"), data=dict()),
expected="949tdgdkgg",
),
dict(
resource=dict(
- kind="Secret",
- metadata=dict(name="foo"),
- type="my-type",
- data=dict()
+ kind="Secret", metadata=dict(name="foo"), type="my-type", data=dict()
),
expected="dg474f9t76",
),
-
dict(
resource=dict(
kind="Secret",
metadata=dict(name="foo"),
- data=dict(
- key1="dmFsdWUx",
- key2="dmFsdWUy")
+ data=dict(key1="dmFsdWUx", key2="dmFsdWUy"),
),
expected="tf72c228m4",
- )
-
+ ),
]
def test_hashes():
for test in tests:
- assert(generate_hash(test['resource']) == test['expected'])
+ assert generate_hash(test["resource"]) == test["expected"]
diff --git a/tests/unit/module_utils/test_marshal.py b/tests/unit/module_utils/test_marshal.py
index 7d48cea7cc..301212ac54 100644
--- a/tests/unit/module_utils/test_marshal.py
+++ b/tests/unit/module_utils/test_marshal.py
@@ -15,78 +15,55 @@
# Test ConfigMap and Secret marshalling
# tests based on https://github.com/kubernetes/kubernetes/pull/49961
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
-from ansible_collections.kubernetes.core.plugins.module_utils.hashes import marshal, sorted_dict
+from ansible_collections.kubernetes.core.plugins.module_utils.hashes import (
+ marshal,
+ sorted_dict,
+)
tests = [
dict(
- resource=dict(
- kind="ConfigMap",
- name="",
- data=dict(),
- ),
- expected=b'{"data":{},"kind":"ConfigMap","name":""}'
+ resource=dict(kind="ConfigMap", name="", data=dict(),),
+ expected=b'{"data":{},"kind":"ConfigMap","name":""}',
),
dict(
- resource=dict(
- kind="ConfigMap",
- name="",
- data=dict(
- one=""
- ),
- ),
- expected=b'{"data":{"one":""},"kind":"ConfigMap","name":""}'
+ resource=dict(kind="ConfigMap", name="", data=dict(one=""),),
+ expected=b'{"data":{"one":""},"kind":"ConfigMap","name":""}',
),
dict(
resource=dict(
- kind="ConfigMap",
- name="",
- data=dict(
- two="2",
- one="",
- three="3",
- ),
+ kind="ConfigMap", name="", data=dict(two="2", one="", three="3",),
),
- expected=b'{"data":{"one":"","three":"3","two":"2"},"kind":"ConfigMap","name":""}'
+ expected=b'{"data":{"one":"","three":"3","two":"2"},"kind":"ConfigMap","name":""}',
),
dict(
- resource=dict(
- kind="Secret",
- type="my-type",
- name="",
- data=dict(),
- ),
- expected=b'{"data":{},"kind":"Secret","name":"","type":"my-type"}'
+ resource=dict(kind="Secret", type="my-type", name="", data=dict(),),
+ expected=b'{"data":{},"kind":"Secret","name":"","type":"my-type"}',
),
dict(
- resource=dict(
- kind="Secret",
- type="my-type",
- name="",
- data=dict(
- one=""
- ),
- ),
- expected=b'{"data":{"one":""},"kind":"Secret","name":"","type":"my-type"}'
+ resource=dict(kind="Secret", type="my-type", name="", data=dict(one=""),),
+ expected=b'{"data":{"one":""},"kind":"Secret","name":"","type":"my-type"}',
),
dict(
resource=dict(
kind="Secret",
type="my-type",
name="",
- data=dict(
- two="Mg==",
- one="",
- three="Mw==",
- ),
+ data=dict(two="Mg==", one="", three="Mw==",),
),
- expected=b'{"data":{"one":"","three":"Mw==","two":"Mg=="},"kind":"Secret","name":"","type":"my-type"}'
+ expected=b'{"data":{"one":"","three":"Mw==","two":"Mg=="},"kind":"Secret","name":"","type":"my-type"}',
),
]
def test_marshal():
for test in tests:
- assert(marshal(sorted_dict(test['resource']), sorted(list(test['resource'].keys()))) == test['expected'])
+ assert (
+ marshal(
+ sorted_dict(test["resource"]), sorted(list(test["resource"].keys()))
+ )
+ == test["expected"]
+ )
diff --git a/tests/unit/module_utils/test_selector.py b/tests/unit/module_utils/test_selector.py
index 9f14b768e3..466ac09616 100644
--- a/tests/unit/module_utils/test_selector.py
+++ b/tests/unit/module_utils/test_selector.py
@@ -12,54 +12,57 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ansible_collections.kubernetes.core.plugins.module_utils.selector import LabelSelectorFilter, Selector
+from ansible_collections.kubernetes.core.plugins.module_utils.selector import (
+ LabelSelectorFilter,
+ Selector,
+)
prod_definition = {
- 'apiVersion': 'v1',
- 'kind': 'Pod',
- 'metadata': {
- 'name': 'test',
- 'labels': {
- 'environment': 'production',
- 'app': 'nginx',
- }
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "name": "test",
+ "labels": {"environment": "production", "app": "nginx"},
},
- 'spec': {
- 'containers': [
- {'name': 'nginx', 'image': 'nginx:1.14.2', 'command': ['/bin/sh', '-c', 'sleep 10']}
+ "spec": {
+ "containers": [
+ {
+ "name": "nginx",
+ "image": "nginx:1.14.2",
+ "command": ["/bin/sh", "-c", "sleep 10"],
+ }
]
- }
+ },
}
no_label_definition = {
- 'apiVersion': 'v1',
- 'kind': 'Pod',
- 'metadata': {
- 'name': 'test',
- 'labels': {}
- },
- 'spec': {
- 'containers': [
- {'name': 'nginx', 'image': 'nginx:1.14.2', 'command': ['/bin/sh', '-c', 'sleep 10']}
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {"name": "test", "labels": {}},
+ "spec": {
+ "containers": [
+ {
+ "name": "nginx",
+ "image": "nginx:1.14.2",
+ "command": ["/bin/sh", "-c", "sleep 10"],
+ }
]
- }
+ },
}
test_definition = {
- 'apiVersion': 'v1',
- 'kind': 'Pod',
- 'metadata': {
- 'name': 'test',
- 'labels': {
- 'environment': 'test',
- 'app': 'nginx',
- }
- },
- 'spec': {
- 'containers': [
- {'name': 'nginx', 'image': 'nginx:1.15.2', 'command': ['/bin/sh', '-c', 'sleep 10']}
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {"name": "test", "labels": {"environment": "test", "app": "nginx"}},
+ "spec": {
+ "containers": [
+ {
+ "name": "nginx",
+ "image": "nginx:1.15.2",
+ "command": ["/bin/sh", "-c", "sleep 10"],
+ }
]
- }
+ },
}
@@ -75,13 +78,27 @@ def test_selector_parser():
assert sel._operator == "in" and sel._data == ["true"] and sel._key == "environment"
f_selector = "environment!=false"
sel = Selector(f_selector)
- assert sel._operator == "notin" and sel._data == ["false"] and sel._key == "environment"
+ assert (
+ sel._operator == "notin"
+ and sel._data == ["false"]
+ and sel._key == "environment"
+ )
f_selector = "environment notin (true, false)"
sel = Selector(f_selector)
- assert sel._operator == "notin" and "true" in sel._data and "false" in sel._data and sel._key == "environment"
+ assert (
+ sel._operator == "notin"
+ and "true" in sel._data
+ and "false" in sel._data
+ and sel._key == "environment"
+ )
f_selector = "environment in (true, false)"
sel = Selector(f_selector)
- assert sel._operator == "in" and "true" in sel._data and "false" in sel._data and sel._key == "environment"
+ assert (
+ sel._operator == "in"
+ and "true" in sel._data
+ and "false" in sel._data
+ and sel._key == "environment"
+ )
f_selector = "environmentin(true, false)"
sel = Selector(f_selector)
assert not sel._operator and not sel._data and sel._key == f_selector
@@ -97,91 +114,91 @@ def test_selector_parser():
def test_label_selector_without_operator():
- label_selector = ['environment', 'app']
+ label_selector = ["environment", "app"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
def test_label_selector_equal_operator():
- label_selector = ['environment==test']
+ label_selector = ["environment==test"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment=production']
+ label_selector = ["environment=production"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment=production', 'app==mongodb']
+ label_selector = ["environment=production", "app==mongodb"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment=production', 'app==nginx']
+ label_selector = ["environment=production", "app==nginx"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment', 'app==nginx']
+ label_selector = ["environment", "app==nginx"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
def test_label_selector_notequal_operator():
- label_selector = ['environment!=test']
+ label_selector = ["environment!=test"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment!=production']
+ label_selector = ["environment!=production"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment=production', 'app!=mongodb']
+ label_selector = ["environment=production", "app!=mongodb"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment=production', 'app!=nginx']
+ label_selector = ["environment=production", "app!=nginx"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment', 'app!=nginx']
+ label_selector = ["environment", "app!=nginx"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
def test_label_selector_conflicting_definition():
- label_selector = ['environment==test', 'environment!=test']
+ label_selector = ["environment==test", "environment!=test"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment==test', 'environment==production']
+ label_selector = ["environment==test", "environment==production"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
def test_set_based_requirement():
- label_selector = ['environment in (production)']
+ label_selector = ["environment in (production)"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment in (production, test)']
+ label_selector = ["environment in (production, test)"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment notin (production)']
+ label_selector = ["environment notin (production)"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment notin (production, test)']
+ label_selector = ["environment notin (production, test)"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['environment']
+ label_selector = ["environment"]
assert LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert not LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert LabelSelectorFilter(label_selector).isMatching(test_definition)
- label_selector = ['!environment']
+ label_selector = ["!environment"]
assert not LabelSelectorFilter(label_selector).isMatching(prod_definition)
assert LabelSelectorFilter(label_selector).isMatching(no_label_definition)
assert not LabelSelectorFilter(label_selector).isMatching(test_definition)
diff --git a/tox.ini b/tox.ini
index 7d71a8116d..198de64628 100644
--- a/tox.ini
+++ b/tox.ini
@@ -34,11 +34,21 @@ commands=
deps = git+https://github.com/ansible-network/collection_prep
commands = collection_prep_add_docs -p .
+[testenv:black]
+deps =
+ black==19.10b0
+
+commands =
+ black -v --check {toxinidir}/plugins {toxinidir}/tests
+
[testenv:linters]
-deps = yamllint
- flake8
+deps =
+ yamllint
+ flake8
+ black==19.10b0
commands =
+ black -v --check {toxinidir}/plugins {toxinidir}/tests
yamllint -s {toxinidir}
flake8 {toxinidir}