diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index cd4bdfee65c..1dc9ab7d715 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -7,3 +7,6 @@ d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0 e8f965fbf8154ea177c6622da149f2ae8533bd3c e938ca5f20651abc160ee6aba10014013d04dcc1 eaa5e07b2866e05b6c7b5628ca92e9cb1142d008 + +# Code reformatting +d80f0a1dba05ce7da41e64afc275335b7154708a diff --git a/antsibull-nox.toml b/antsibull-nox.toml index 7446748d832..dbb5985e1de 100644 --- a/antsibull-nox.toml +++ b/antsibull-nox.toml @@ -24,6 +24,8 @@ run_isort = false run_black = false run_ruff_check = true ruff_check_config = "ruff.toml" +run_ruff_format = true +ruff_format_config = "ruff.toml" run_flake8 = false run_pylint = false run_yamllint = true diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index 5649ed6f475..371a732fcce 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -15,9 +15,8 @@ class ActionModule(ActionBase): - # Keep internal params away from user interactions - _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) + _VALID_ARGS = frozenset(("path", "state", "table", "noflush", "counters", "modprobe", "ip_version", "wait")) DEFAULT_SUDOABLE = True @staticmethod @@ -27,7 +26,8 @@ def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout): "is set to 'restored'. To enable its rollback feature (that needs the " "module to run asynchronously on the remote), please set task attribute " f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " - f"'ansible_timeout' (={max_timeout}) (recommended).") + f"'ansible_timeout' (={max_timeout}) (recommended)." + ) @staticmethod def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): @@ -37,7 +37,8 @@ def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): "regain it before fixing firewall rules through a serial console, or any " f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and " f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) " - "(recommended).") + "(recommended)." + ) @staticmethod def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): @@ -46,44 +47,48 @@ def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): "but with settings that will lead this rollback to happen AFTER that the " "controller will reach its own timeout. Please set task attribute 'poll' " f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " - f"'ansible_timeout' (={max_timeout}) (recommended).") + f"'ansible_timeout' (={max_timeout}) (recommended)." + ) def _async_result(self, async_status_args, task_vars, timeout): - ''' + """ Retrieve results of the asynchronous task, and display them in place of the async wrapper results (those with the ansible_job_id key). - ''' + """ async_status = self._task.copy() async_status.args = async_status_args - async_status.action = 'ansible.builtin.async_status' + async_status.action = "ansible.builtin.async_status" async_status.async_val = 0 async_action = self._shared_loader_obj.action_loader.get( - async_status.action, task=async_status, connection=self._connection, - play_context=self._play_context, loader=self._loader, templar=self._templar, - shared_loader_obj=self._shared_loader_obj) - - if async_status.args['mode'] == 'cleanup': + async_status.action, + task=async_status, + connection=self._connection, + play_context=self._play_context, + loader=self._loader, + templar=self._templar, + shared_loader_obj=self._shared_loader_obj, + ) + + if async_status.args["mode"] == "cleanup": return async_action.run(task_vars=task_vars) # At least one iteration is required, even if timeout is 0. for dummy in range(max(1, timeout)): async_result = async_action.run(task_vars=task_vars) - if async_result.get('finished', 0) == 1: + if async_result.get("finished", 0) == 1: break time.sleep(min(1, timeout)) return async_result def run(self, tmp=None, task_vars=None): - self._supports_check_mode = True self._supports_async = True result = super().run(tmp, task_vars) del tmp # tmp no longer has any effect - if not result.get('skipped'): - + if not result.get("skipped"): # FUTURE: better to let _execute_module calculate this internally? wrap_async = self._task.async_val and not self._connection.has_native_async @@ -98,41 +103,38 @@ def run(self, tmp=None, task_vars=None): starter_cmd = None confirm_cmd = None - if module_args.get('state', None) == 'restored': + if module_args.get("state", None) == "restored": if not wrap_async: if not check_mode: - display.warning(self.msg_error__async_and_poll_not_zero( - task_poll, - task_async, - max_timeout)) + display.warning(self.msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout)) elif task_poll: - raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback( - task_poll, - task_async, - max_timeout)) + raise AnsibleActionFail( + self.msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout) + ) else: if task_async > max_timeout and not check_mode: - display.warning(self.msg_warning__async_greater_than_timeout( - task_poll, - task_async, - max_timeout)) + display.warning( + self.msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout) + ) # inject the async directory based on the shell option into the # module args - async_dir = self.get_shell_option('async_dir', default="~/.ansible_async") + async_dir = self.get_shell_option("async_dir", default="~/.ansible_async") # Bind the loop max duration to consistent values on both # remote and local sides (if not the same, make the loop # longer on the controller); and set a backup file path. - module_args['_timeout'] = task_async - module_args['_back'] = f'{async_dir}/iptables.state' - async_status_args = dict(mode='status') + module_args["_timeout"] = task_async + module_args["_back"] = f"{async_dir}/iptables.state" + async_status_args = dict(mode="status") confirm_cmd = f"rm -f {module_args['_back']}" starter_cmd = f"touch {module_args['_back']}.starter" remaining_time = max(task_async, max_timeout) # do work! - result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async)) + result = merge_hash( + result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async) + ) # Then the 3-steps "go ahead or rollback": # 1. Catch early errors of the module (in asynchronous task) if any. @@ -140,9 +142,9 @@ def run(self, tmp=None, task_vars=None): # 2. Reset connection to ensure a persistent one will not be reused. # 3. Confirm the restored state by removing the backup on the remote. # Retrieve the results of the asynchronous task to return them. - if '_back' in module_args: - async_status_args['jid'] = result.get('ansible_job_id', None) - if async_status_args['jid'] is None: + if "_back" in module_args: + async_status_args["jid"] = result.get("ansible_job_id", None) + if async_status_args["jid"] is None: raise AnsibleActionFail("Unable to get 'ansible_job_id'.") # Catch early errors due to missing mandatory option, bad @@ -156,7 +158,7 @@ def run(self, tmp=None, task_vars=None): # As the main command is not yet executed on the target, here # 'finished' means 'failed before main command be executed'. - if not result['finished']: + if not result["finished"]: try: self._connection.reset() except AttributeError: @@ -178,16 +180,16 @@ def run(self, tmp=None, task_vars=None): result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time)) # Cleanup async related stuff and internal params - for key in ('ansible_job_id', 'results_file', 'started', 'finished'): + for key in ("ansible_job_id", "results_file", "started", "finished"): if result.get(key): del result[key] - if result.get('invocation', {}).get('module_args'): - for key in ('_back', '_timeout', '_async_dir', 'jid'): - if result['invocation']['module_args'].get(key): - del result['invocation']['module_args'][key] + if result.get("invocation", {}).get("module_args"): + for key in ("_back", "_timeout", "_async_dir", "jid"): + if result["invocation"]["module_args"].get(key): + del result["invocation"]["module_args"][key] - async_status_args['mode'] = 'cleanup' + async_status_args["mode"] = "cleanup" dummy = self._async_result(async_status_args, task_vars, 0) if not wrap_async: diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py index 71c7b947dc8..fc59cd535f7 100644 --- a/plugins/action/shutdown.py +++ b/plugins/action/shutdown.py @@ -26,35 +26,31 @@ class TimedOutException(Exception): class ActionModule(ActionBase): TRANSFERS_FILES = False - _VALID_ARGS = frozenset(( - 'msg', - 'delay', - 'search_paths' - )) + _VALID_ARGS = frozenset(("msg", "delay", "search_paths")) DEFAULT_CONNECT_TIMEOUT = None DEFAULT_PRE_SHUTDOWN_DELAY = 0 - DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible' - DEFAULT_SHUTDOWN_COMMAND = 'shutdown' + DEFAULT_SHUTDOWN_MESSAGE = "Shut down initiated by Ansible" + DEFAULT_SHUTDOWN_COMMAND = "shutdown" DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"' DEFAULT_SUDOABLE = True SHUTDOWN_COMMANDS = { - 'alpine': 'poweroff', - 'vmkernel': 'halt', + "alpine": "poweroff", + "vmkernel": "halt", } SHUTDOWN_COMMAND_ARGS = { - 'alpine': '', - 'void': '-h +{delay_min} "{message}"', - 'freebsd': '-p +{delay_sec}s "{message}"', - 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, - 'macosx': '-h +{delay_min} "{message}"', - 'openbsd': '-h +{delay_min} "{message}"', - 'solaris': '-y -g {delay_sec} -i 5 "{message}"', - 'sunos': '-y -g {delay_sec} -i 5 "{message}"', - 'vmkernel': '-d {delay_sec}', - 'aix': '-Fh', + "alpine": "", + "void": '-h +{delay_min} "{message}"', + "freebsd": '-p +{delay_sec}s "{message}"', + "linux": DEFAULT_SHUTDOWN_COMMAND_ARGS, + "macosx": '-h +{delay_min} "{message}"', + "openbsd": '-h +{delay_min} "{message}"', + "solaris": '-y -g {delay_sec} -i 5 "{message}"', + "sunos": '-y -g {delay_sec} -i 5 "{message}"', + "vmkernel": "-d {delay_sec}", + "aix": "-Fh", } def __init__(self, *args, **kwargs): @@ -62,7 +58,7 @@ def __init__(self, *args, **kwargs): @property def delay(self): - return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY) + return self._check_delay("delay", self.DEFAULT_PRE_SHUTDOWN_DELAY) def _check_delay(self, key, default): """Ensure that the value is positive or zero""" @@ -75,29 +71,28 @@ def _get_value_from_facts(self, variable_name, distribution, default_value): """Get dist+version specific args first, then distribution, then family, lastly use default""" attr = getattr(self, variable_name) value = attr.get( - distribution['name'] + distribution['version'], - attr.get( - distribution['name'], - attr.get( - distribution['family'], - getattr(self, default_value)))) + distribution["name"] + distribution["version"], + attr.get(distribution["name"], attr.get(distribution["family"], getattr(self, default_value))), + ) return value def get_distribution(self, task_vars): # FIXME: only execute the module if we don't already have the facts we need distribution = {} - display.debug(f'{self._task.action}: running setup module to get distribution') + display.debug(f"{self._task.action}: running setup module to get distribution") module_output = self._execute_module( - task_vars=task_vars, - module_name='ansible.legacy.setup', - module_args={'gather_subset': 'min'}) + task_vars=task_vars, module_name="ansible.legacy.setup", module_args={"gather_subset": "min"} + ) try: - if module_output.get('failed', False): - raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}") - distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() - distribution['version'] = to_text( - module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) - distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) + if module_output.get("failed", False): + raise AnsibleError( + f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}" + ) + distribution["name"] = module_output["ansible_facts"]["ansible_distribution"].lower() + distribution["version"] = to_text( + module_output["ansible_facts"]["ansible_distribution_version"].split(".")[0] + ) + distribution["family"] = to_text(module_output["ansible_facts"]["ansible_os_family"].lower()) display.debug(f"{self._task.action}: distribution: {distribution}") return distribution except KeyError as ke: @@ -105,22 +100,20 @@ def get_distribution(self, task_vars): def get_shutdown_command(self, task_vars, distribution): def find_command(command, find_search_paths): - display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"') + display.debug( + f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"' + ) find_result = self._execute_module( task_vars=task_vars, # prevent collection search by calling with ansible.legacy (still allows library/ override of find) - module_name='ansible.legacy.find', - module_args={ - 'paths': find_search_paths, - 'patterns': [command], - 'file_type': 'any' - } + module_name="ansible.legacy.find", + module_args={"paths": find_search_paths, "patterns": [command], "file_type": "any"}, ) - return [x['path'] for x in find_result['files']] + return [x["path"] for x in find_result["files"]] - shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') - default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] - search_paths = self._task.args.get('search_paths', default_search_paths) + shutdown_bin = self._get_value_from_facts("SHUTDOWN_COMMANDS", distribution, "DEFAULT_SHUTDOWN_COMMAND") + default_search_paths = ["/sbin", "/usr/sbin", "/usr/local/sbin"] + search_paths = self._task.args.get("search_paths", default_search_paths) # FIXME: switch all this to user arg spec validation methods when they are available # Convert bare strings to a list @@ -138,26 +131,28 @@ def find_command(command, find_search_paths): full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command if not full_path: # if we could not find the shutdown command - # tell the user we will try with systemd - display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.') - systemctl_search_paths = ['/bin', '/usr/bin'] - full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command + display.vvv( + f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.' + ) + systemctl_search_paths = ["/bin", "/usr/bin"] + full_path = find_command("systemctl", systemctl_search_paths) # find the path to the systemctl command if not full_path: # if we couldn't find systemctl raise AnsibleError( f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl' - f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here + f" command in search paths: {systemctl_search_paths}, unable to shutdown." + ) # we give up here else: return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown # systemd case taken care of, here we add args to the command - args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') + args = self._get_value_from_facts("SHUTDOWN_COMMAND_ARGS", distribution, "DEFAULT_SHUTDOWN_COMMAND_ARGS") # Convert seconds to minutes. If less that 60, set it to 0. delay_sec = self.delay - shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) + shutdown_message = self._task.args.get("msg", self.DEFAULT_SHUTDOWN_MESSAGE) af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) - return f'{full_path[0]} {af}' + return f"{full_path[0]} {af}" def perform_shutdown(self, task_vars, distribution): result = {} @@ -169,23 +164,24 @@ def perform_shutdown(self, task_vars, distribution): display.vvv(f"{self._task.action}: shutting down server...") display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'") if self._play_context.check_mode: - shutdown_result['rc'] = 0 + shutdown_result["rc"] = 0 else: shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) except AnsibleConnectionFailure as e: # If the connection is closed too quickly due to the system being shutdown, carry on - display.debug( - f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}') - shutdown_result['rc'] = 0 - - if shutdown_result['rc'] != 0: - result['failed'] = True - result['shutdown'] = False - result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" + display.debug(f"{self._task.action}: AnsibleConnectionFailure caught and handled: {e}") + shutdown_result["rc"] = 0 + + if shutdown_result["rc"] != 0: + result["failed"] = True + result["shutdown"] = False + result["msg"] = ( + f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" + ) return result - result['failed'] = False - result['shutdown_command'] = shutdown_command_exec + result["failed"] = False + result["shutdown_command"] = shutdown_command_exec return result def run(self, tmp=None, task_vars=None): @@ -193,16 +189,16 @@ def run(self, tmp=None, task_vars=None): self._supports_async = True # If running with local connection, fail so we don't shutdown ourself - if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = f'Running {self._task.action} with local connection would shutdown the control node.' - return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} + if self._connection.transport == "local" and (not self._play_context.check_mode): + msg = f"Running {self._task.action} with local connection would shutdown the control node." + return {"changed": False, "elapsed": 0, "shutdown": False, "failed": True, "msg": msg} if task_vars is None: task_vars = {} result = super().run(tmp, task_vars) - if result.get('skipped', False) or result.get('failed', False): + if result.get("skipped", False) or result.get("failed", False): return result distribution = self.get_distribution(task_vars) @@ -210,12 +206,12 @@ def run(self, tmp=None, task_vars=None): # Initiate shutdown shutdown_result = self.perform_shutdown(task_vars, distribution) - if shutdown_result['failed']: + if shutdown_result["failed"]: result = shutdown_result return result - result['shutdown'] = True - result['changed'] = True - result['shutdown_command'] = shutdown_result['shutdown_command'] + result["shutdown"] = True + result["changed"] = True + result["shutdown_command"] = shutdown_result["shutdown_command"] return result diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 5eb7be61291..868094a7aa8 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -94,23 +94,22 @@ class BecomeModule(BecomeBase): - - name = 'community.general.doas' + name = "community.general.doas" # messages for detecting prompted password issues - fail = ('Permission denied',) - missing = ('Authorization required',) + fail = ("Permission denied",) + missing = ("Authorization required",) # See https://github.com/ansible-collections/community.general/issues/9977, # https://github.com/ansible/ansible/pull/78111 pipelining = False def check_password_prompt(self, b_output): - ''' checks if the expected password prompt exists in b_output ''' + """checks if the expected password prompt exists in b_output""" # FIXME: more accurate would be: 'doas (%s@' % remote_user # however become plugins don't have that information currently - b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:'] + b_prompts = [to_bytes(p) for p in self.get_option("prompt_l10n")] or [rb"doas \(", rb"Password:"] b_prompt = b"|".join(b_prompts) return bool(re.match(b_prompt, b_output)) @@ -123,16 +122,16 @@ def build_become_command(self, cmd, shell): self.prompt = True - become_exe = self.get_option('become_exe') + become_exe = self.get_option("become_exe") - flags = self.get_option('become_flags') - if not self.get_option('become_pass') and '-n' not in flags: - flags += ' -n' + flags = self.get_option("become_flags") + if not self.get_option("become_pass") and "-n" not in flags: + flags += " -n" - become_user = self.get_option('become_user') - user = f'-u {become_user}' if become_user else '' + become_user = self.get_option("become_user") + user = f"-u {become_user}" if become_user else "" success_cmd = self._build_success_command(cmd, shell, noexe=True) - executable = getattr(shell, 'executable', shell.SHELL_FAMILY) + executable = getattr(shell, "executable", shell.SHELL_FAMILY) - return f'{become_exe} {flags} {user} {executable} -c {success_cmd}' + return f"{become_exe} {flags} {user} {executable} -c {success_cmd}" diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index eac5dbd82af..0a56ed49dda 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -74,11 +74,10 @@ class BecomeModule(BecomeBase): - - name = 'community.general.dzdo' + name = "community.general.dzdo" # messages for detecting prompted password issues - fail = ('Sorry, try again.',) + fail = ("Sorry, try again.",) def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -86,14 +85,14 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - becomecmd = self.get_option('become_exe') + becomecmd = self.get_option("become_exe") - flags = self.get_option('become_flags') - if self.get_option('become_pass'): - self.prompt = f'[dzdo via ansible, key={self._id}] password:' - flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\"" + flags = self.get_option("become_flags") + if self.get_option("become_pass"): + self.prompt = f"[dzdo via ansible, key={self._id}] password:" + flags = f'{flags.replace("-n", "")} -p "{self.prompt}"' - become_user = self.get_option('become_user') - user = f'-u {become_user}' if become_user else '' + become_user = self.get_option("become_user") + user = f"-u {become_user}" if become_user else "" return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index 1f0788944fa..d1ecc56f18d 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -92,23 +92,21 @@ class BecomeModule(BecomeBase): - - name = 'community.general.ksu' + name = "community.general.ksu" # messages for detecting prompted password issues - fail = ('Password incorrect',) - missing = ('No password given',) + fail = ("Password incorrect",) + missing = ("No password given",) def check_password_prompt(self, b_output): - ''' checks if the expected password prompt exists in b_output ''' + """checks if the expected password prompt exists in b_output""" - prompts = self.get_option('prompt_l10n') or ["Kerberos password for .*@.*:"] + prompts = self.get_option("prompt_l10n") or ["Kerberos password for .*@.*:"] b_prompt = b"|".join(to_bytes(p) for p in prompts) return bool(re.match(b_prompt, b_output)) def build_become_command(self, cmd, shell): - super().build_become_command(cmd, shell) # Prompt handling for ``ksu`` is more complicated, this @@ -118,8 +116,8 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - exe = self.get_option('become_exe') + exe = self.get_option("become_exe") - flags = self.get_option('become_flags') - user = self.get_option('become_user') - return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} ' + flags = self.get_option("become_flags") + user = self.get_option("become_user") + return f"{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} " diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index 40fc72c216f..9130ca146d7 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -96,16 +96,15 @@ from ansible.module_utils.common.text.converters import to_bytes -ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m')) +ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m")) class BecomeModule(BecomeBase): + name = "community.general.machinectl" - name = 'community.general.machinectl' - - prompt = 'Password: ' - fail = ('==== AUTHENTICATION FAILED ====',) - success = ('==== AUTHENTICATION COMPLETE ====',) + prompt = "Password: " + fail = ("==== AUTHENTICATION FAILED ====",) + success = ("==== AUTHENTICATION COMPLETE ====",) require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932 # See https://github.com/ansible/ansible/issues/81254, @@ -122,11 +121,11 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - become = self.get_option('become_exe') + become = self.get_option("become_exe") - flags = self.get_option('become_flags') - user = self.get_option('become_user') - return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}' + flags = self.get_option("become_flags") + user = self.get_option("become_user") + return f"{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}" def check_success(self, b_output): b_output = self.remove_ansi_codes(b_output) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index b7af868d8f4..caa820da9da 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -86,10 +86,9 @@ class BecomeModule(BecomeBase): + name = "community.general.pbrun" - name = 'community.general.pbrun' - - prompt = 'Password:' + prompt = "Password:" def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -97,11 +96,11 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - become_exe = self.get_option('become_exe') + become_exe = self.get_option("become_exe") - flags = self.get_option('become_flags') - become_user = self.get_option('become_user') - user = f'-u {become_user}' if become_user else '' - noexe = not self.get_option('wrap_exe') + flags = self.get_option("become_flags") + become_user = self.get_option("become_user") + user = f"-u {become_user}" if become_user else "" + noexe = not self.get_option("wrap_exe") return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 7c242c8d76b..8863b946d73 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -91,8 +91,7 @@ class BecomeModule(BecomeBase): - - name = 'community.general.pfexec' + name = "community.general.pfexec" def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -100,8 +99,8 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - exe = self.get_option('become_exe') + exe = self.get_option("become_exe") - flags = self.get_option('become_flags') - noexe = not self.get_option('wrap_exe') - return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}' + flags = self.get_option("become_flags") + noexe = not self.get_option("wrap_exe") + return f"{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 6f2e4abab0b..0f29396a52f 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -63,9 +63,8 @@ class BecomeModule(BecomeBase): - - name = 'community.general.pmrun' - prompt = 'Enter UPM user password:' + name = "community.general.pmrun" + prompt = "Enter UPM user password:" def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -73,7 +72,7 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - become = self.get_option('become_exe') + become = self.get_option("become_exe") - flags = self.get_option('become_flags') - return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}' + flags = self.get_option("become_flags") + return f"{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}" diff --git a/plugins/become/run0.py b/plugins/become/run0.py index 4362d53ebfa..d2780337a8b 100644 --- a/plugins/become/run0.py +++ b/plugins/become/run0.py @@ -85,15 +85,12 @@ class BecomeModule(BecomeBase): - name = "community.general.run0" prompt = "Password: " fail = ("==== AUTHENTICATION FAILED ====",) success = ("==== AUTHENTICATION COMPLETE ====",) - require_tty = ( - True # see https://github.com/ansible-collections/community.general/issues/6932 - ) + require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932 @staticmethod def remove_ansi_codes(line): @@ -109,9 +106,7 @@ def build_become_command(self, cmd, shell): flags = self.get_option("become_flags") user = self.get_option("become_user") - return ( - f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" - ) + return f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" def check_success(self, b_output): b_output = self.remove_ansi_codes(b_output) diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 1b2b3f35d9b..a6c47c8c331 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -75,11 +75,10 @@ class BecomeModule(BecomeBase): + name = "community.general.sesu" - name = 'community.general.sesu' - - prompt = 'Please enter your password:' - fail = missing = ('Sorry, try again with sesu.',) + prompt = "Please enter your password:" + fail = missing = ("Sorry, try again with sesu.",) def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -87,8 +86,8 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - become = self.get_option('become_exe') + become = self.get_option("become_exe") - flags = self.get_option('become_flags') - user = self.get_option('become_user') - return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}' + flags = self.get_option("become_flags") + user = self.get_option("become_user") + return f"{become} {flags} {user} -c {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 8e353ca7a83..0f1902c9b46 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -79,12 +79,11 @@ class BecomeModule(BecomeBase): - - name = 'community.general.sudosu' + name = "community.general.sudosu" # messages for detecting prompted password issues - fail = ('Sorry, try again.',) - missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required') + fail = ("Sorry, try again.",) + missing = ("Sorry, a password is required to run sudo", "sudo: a password is required") def build_become_command(self, cmd, shell): super().build_become_command(cmd, shell) @@ -92,21 +91,21 @@ def build_become_command(self, cmd, shell): if not cmd: return cmd - becomecmd = 'sudo' + becomecmd = "sudo" - flags = self.get_option('become_flags') or '' - prompt = '' - if self.get_option('become_pass'): - self.prompt = f'[sudo via ansible, key={self._id}] password:' + flags = self.get_option("become_flags") or "" + prompt = "" + if self.get_option("become_pass"): + self.prompt = f"[sudo via ansible, key={self._id}] password:" if flags: # this could be simplified, but kept as is for now for backwards string matching - flags = flags.replace('-n', '') + flags = flags.replace("-n", "") prompt = f'-p "{self.prompt}"' - user = self.get_option('become_user') or '' + user = self.get_option("become_user") or "" if user: - user = f'{user}' + user = f"{user}" - if self.get_option('alt_method'): + if self.get_option("alt_method"): return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}" else: return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 75b405da1aa..a2151c1d44e 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -59,6 +59,7 @@ try: import memcache + HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False @@ -75,7 +76,7 @@ class ProxyClientPool: """ def __init__(self, *args, **kwargs): - self.max_connections = kwargs.pop('max_connections', 1024) + self.max_connections = kwargs.pop("max_connections", 1024) self.connection_args = args self.connection_kwargs = kwargs self.reset() @@ -123,6 +124,7 @@ def disconnect_all(self): def __getattr__(self, name): def wrapped(*args, **kwargs): return self._proxy_client(name, *args, **kwargs) + return wrapped def _proxy_client(self, name, *args, **kwargs): @@ -139,7 +141,8 @@ class CacheModuleKeys(MutableSet): A set subclass that keeps track of insertion time and persists the set in memcached. """ - PREFIX = 'ansible_cache_keys' + + PREFIX = "ansible_cache_keys" def __init__(self, cache, *args, **kwargs): self._cache = cache @@ -171,15 +174,14 @@ def remove_by_timerange(self, s_min, s_max): class CacheModule(BaseCacheModule): - def __init__(self, *args, **kwargs): - connection = ['127.0.0.1:11211'] + connection = ["127.0.0.1:11211"] super().__init__(*args, **kwargs) - if self.get_option('_uri'): - connection = self.get_option('_uri') - self._timeout = self.get_option('_timeout') - self._prefix = self.get_option('_prefix') + if self.get_option("_uri"): + connection = self.get_option("_uri") + self._timeout = self.get_option("_timeout") + self._prefix = self.get_option("_prefix") if not HAS_MEMCACHE: raise AnsibleError("python-memcached is required for the memcached fact cache") diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 6c053138c82..9ea9b05bb03 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -51,14 +51,15 @@ class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ + _persistent = False # prevent unnecessary JSON serialization and key munging def _load(self, filepath): # Pickle is a binary format - with open(filepath, 'rb') as f: - return pickle.load(f, encoding='bytes') + with open(filepath, "rb") as f: + return pickle.load(f, encoding="bytes") def _dump(self, value, filepath): - with open(filepath, 'wb') as f: + with open(filepath, "wb") as f: # Use pickle protocol 2 which is compatible with Python 2.3+. pickle.dump(value, f, protocol=2) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index b8f6021c793..f075f4720d0 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -77,6 +77,7 @@ try: from redis import StrictRedis, VERSION + HAS_REDIS = True except ImportError: HAS_REDIS = False @@ -93,32 +94,35 @@ class CacheModule(BaseCacheModule): to expire keys. This mechanism is used or a pattern matched 'scan' for performance. """ + _sentinel_service_name = None - re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$') - re_sent_conn = re.compile(r'^(.*):(\d+)$') + re_url_conn = re.compile(r"^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$") + re_sent_conn = re.compile(r"^(.*):(\d+)$") def __init__(self, *args, **kwargs): - uri = '' + uri = "" super().__init__(*args, **kwargs) - if self.get_option('_uri'): - uri = self.get_option('_uri') - self._timeout = float(self.get_option('_timeout')) - self._prefix = self.get_option('_prefix') - self._keys_set = self.get_option('_keyset_name') - self._sentinel_service_name = self.get_option('_sentinel_service_name') + if self.get_option("_uri"): + uri = self.get_option("_uri") + self._timeout = float(self.get_option("_timeout")) + self._prefix = self.get_option("_prefix") + self._keys_set = self.get_option("_keyset_name") + self._sentinel_service_name = self.get_option("_sentinel_service_name") if not HAS_REDIS: - raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'") + raise AnsibleError( + "The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'" + ) self._cache = {} kw = {} # tls connection - tlsprefix = 'tls://' + tlsprefix = "tls://" if uri.startswith(tlsprefix): - kw['ssl'] = True - uri = uri[len(tlsprefix):] + kw["ssl"] = True + uri = uri[len(tlsprefix) :] # redis sentinel connection if self._sentinel_service_name: @@ -128,7 +132,7 @@ def __init__(self, *args, **kwargs): connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) - display.vv(f'Redis connection: {self._db}') + display.vv(f"Redis connection: {self._db}") @staticmethod def _parse_connection(re_patt, uri): @@ -146,33 +150,32 @@ def _get_sentinel_connection(self, uri, kw): except ImportError: raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.") - if ';' not in uri: - raise AnsibleError('_uri does not have sentinel syntax.') + if ";" not in uri: + raise AnsibleError("_uri does not have sentinel syntax.") # format: "localhost:26379;localhost2:26379;0:changeme" - connections = uri.split(';') + connections = uri.split(";") connection_args = connections.pop(-1) if len(connection_args) > 0: # handle if no db nr is given - connection_args = connection_args.split(':') - kw['db'] = connection_args.pop(0) + connection_args = connection_args.split(":") + kw["db"] = connection_args.pop(0) try: - kw['password'] = connection_args.pop(0) + kw["password"] = connection_args.pop(0) except IndexError: pass # password is optional sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv(f'\nUsing redis sentinels: {sentinels}') + display.vv(f"\nUsing redis sentinels: {sentinels}") scon = Sentinel(sentinels, **kw) try: return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) except Exception as exc: - raise AnsibleError(f'Could not connect to redis sentinel: {exc}') + raise AnsibleError(f"Could not connect to redis sentinel: {exc}") def _make_key(self, key): return self._prefix + key def get(self, key): - if key not in self._cache: value = self._db.get(self._make_key(key)) # guard against the key not being removed from the zset; @@ -186,7 +189,6 @@ def get(self, key): return self._cache.get(key) def set(self, key, value): - value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4) if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' self._db.setex(self._make_key(key), int(self._timeout), value2) @@ -210,7 +212,7 @@ def keys(self): def contains(self, key): self._expire_keys() - return (self._db.zrank(self._keys_set, key) is not None) + return self._db.zrank(self._keys_set, key) is not None def delete(self, key): if key in self._cache: diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index 52cbf887dec..f91f3fe0b46 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -58,9 +58,9 @@ class CacheModule(BaseFileCacheModule): """ def _load(self, filepath): - with open(os.path.abspath(filepath), 'r', encoding='utf-8') as f: + with open(os.path.abspath(filepath), "r", encoding="utf-8") as f: return AnsibleLoader(f).get_single_data() def _dump(self, value, filepath): - with open(os.path.abspath(filepath), 'w', encoding='utf-8') as f: + with open(os.path.abspath(filepath), "w", encoding="utf-8") as f: yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index 30e7a6732f6..9315960a26b 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -49,6 +49,7 @@ class MemProf(threading.Thread): """Python thread for recording memory usage""" + def __init__(self, path, obj=None): threading.Thread.__init__(self) self.obj = obj @@ -66,8 +67,8 @@ def run(self): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'community.general.cgroup_memory_recap' + CALLBACK_TYPE = "aggregate" + CALLBACK_NAME = "community.general.cgroup_memory_recap" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): @@ -80,11 +81,11 @@ def __init__(self, display=None): def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.cgroup_max_file = self.get_option('max_mem_file') - self.cgroup_current_file = self.get_option('cur_mem_file') + self.cgroup_max_file = self.get_option("max_mem_file") + self.cgroup_current_file = self.get_option("cur_mem_file") - with open(self.cgroup_max_file, 'w+') as f: - f.write('0') + with open(self.cgroup_max_file, "w+") as f: + f.write("0") def _profile_memory(self, obj=None): prev_task = None @@ -112,8 +113,8 @@ def v2_playbook_on_stats(self, stats): with open(self.cgroup_max_file) as f: max_results = int(f.read().strip()) / 1024 / 1024 - self._display.banner('CGROUP MEMORY RECAP') - self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n') + self._display.banner("CGROUP MEMORY RECAP") + self._display.display(f"Execution Maximum: {max_results:0.2f}MB\n\n") for task, memory in self.task_results: - self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB') + self._display.display(f"{task.get_name()} ({task._uuid}): {memory:0.2f}MB") diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 503e39fadd4..c365636313a 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -25,9 +25,10 @@ class CallbackModule(CallbackBase): This is a very trivial example of how any callback function can get at play and task objects. play will be 'None' for runner invocations, and task will be None for 'setup' invocations. """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'community.general.context_demo' + CALLBACK_TYPE = "aggregate" + CALLBACK_NAME = "community.general.context_demo" CALLBACK_NEEDS_WHITELIST = True def __init__(self, *args, **kwargs): @@ -40,11 +41,11 @@ def v2_on_any(self, *args, **kwargs): self._display.display(" --- ARGS ") for i, a in enumerate(args): - self._display.display(f' {i}: {a}') + self._display.display(f" {i}: {a}") self._display.display(" --- KWARGS ") for k in kwargs: - self._display.display(f' {k}: {kwargs[k]}') + self._display.display(f" {k}: {kwargs[k]}") def v2_playbook_on_play_start(self, play): self.play = play diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 71ad915640c..b02429581a6 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,9 +1,9 @@ # Copyright (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -''' - Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) -''' +""" +Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) +""" from __future__ import annotations @@ -29,15 +29,14 @@ class CallbackModule(CallbackBase): - - ''' + """ This is the default callback interface, which simply prints messages to stdout when new callback events are received. - ''' + """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.counter_enabled' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.counter_enabled" _task_counter = 1 _task_total = 0 @@ -55,11 +54,7 @@ def __init__(self): def _all_vars(self, host=None, task=None): # host and task need to be specified in case 'magic variables' (host vars, group vars, etc) # need to be loaded as well - return self._play.get_variable_manager().get_vars( - play=self._play, - host=host, - task=task - ) + return self._play.get_variable_manager().get_vars(play=self._play, host=host, task=task) def v2_playbook_on_start(self, playbook): self._playbook = playbook @@ -77,8 +72,8 @@ def v2_playbook_on_play_start(self, play): self._play = play self._previous_batch_total = self._current_batch_total - self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch']) - self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all']) + self._current_batch_total = self._previous_batch_total + len(self._all_vars()["vars"]["ansible_play_batch"]) + self._host_total = len(self._all_vars()["vars"]["ansible_play_hosts_all"]) self._task_total = len(self._play.get_tasks()[0]) self._task_counter = 1 @@ -93,39 +88,39 @@ def v2_playbook_on_stats(self, stats): f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} " f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} " f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}", - screen_only=True + screen_only=True, ) self._display.display( f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} " f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} " f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}", - log_only=True + log_only=True, ) self._display.display("", screen_only=True) # print custom stats - if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom: + if self._plugin_options.get("show_custom_stats", C.SHOW_CUSTOM_STATS) and stats.custom: # fallback on constants for inherited plugins missing docs self._display.banner("CUSTOM STATS: ") # per host # TODO: come up with 'pretty format' for k in sorted(stats.custom.keys()): - if k == '_run': + if k == "_run": continue - _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '') - self._display.display(f'\t{k}: {_custom_stats}') + _custom_stats = self._dump_results(stats.custom[k], indent=1).replace("\n", "") + self._display.display(f"\t{k}: {_custom_stats}") # print per run custom stats - if '_run' in stats.custom: + if "_run" in stats.custom: self._display.display("", screen_only=True) - _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') - self._display.display(f'\tRUN: {_custom_stats_run}') + _custom_stats_run = self._dump_results(stats.custom["_run"], indent=1).replace("\n", "") + self._display.display(f"\tRUN: {_custom_stats_run}") self._display.display("", screen_only=True) def v2_playbook_on_task_start(self, task, is_conditional): - args = '' + args = "" # args can be specified as no_log in several places: in the task or in # the argument spec. We can check whether the task is no_log but the # argument spec can't be because that is only run on the target @@ -135,8 +130,8 @@ def v2_playbook_on_task_start(self, task, is_conditional): # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: - args = ', '.join(('{k}={v}' for k, v in task.args.items())) - args = f' {args}' + args = ", ".join(("{k}={v}" for k, v in task.args.items())) + args = f" {args}" self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]") if self._display.verbosity >= 2: path = task.get_path() @@ -146,17 +141,16 @@ def v2_playbook_on_task_start(self, task, is_conditional): self._task_counter += 1 def v2_runner_on_ok(self, result): - self._host_counter += 1 - delegated_vars = result._result.get('_ansible_delegated_vars', None) + delegated_vars = result._result.get("_ansible_delegated_vars", None) - if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + if self._play.strategy == "free" and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) if isinstance(result._task, TaskInclude): return - elif result._result.get('changed', False): + elif result._result.get("changed", False): if delegated_vars: msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: @@ -171,7 +165,7 @@ def v2_runner_on_ok(self, result): self._handle_warnings(result._result) - if result._task.loop and 'results' in result._result: + if result._task.loop and "results" in result._result: self._process_items(result) else: self._clean_results(result._result, result._task.action) @@ -181,19 +175,18 @@ def v2_runner_on_ok(self, result): self._display.display(msg, color=color) def v2_runner_on_failed(self, result, ignore_errors=False): - self._host_counter += 1 - delegated_vars = result._result.get('_ansible_delegated_vars', None) + delegated_vars = result._result.get("_ansible_delegated_vars", None) self._clean_results(result._result, result._task.action) - if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + if self._play.strategy == "free" and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._handle_exception(result._result) self._handle_warnings(result._result) - if result._task.loop and 'results' in result._result: + if result._task.loop and "results" in result._result: self._process_items(result) else: @@ -201,12 +194,12 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display( f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}", - color=C.COLOR_ERROR + color=C.COLOR_ERROR, ) else: self._display.display( f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}", - color=C.COLOR_ERROR + color=C.COLOR_ERROR, ) if ignore_errors: @@ -215,14 +208,15 @@ def v2_runner_on_failed(self, result, ignore_errors=False): def v2_runner_on_skipped(self, result): self._host_counter += 1 - if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs - + if self._plugin_options.get( + "show_skipped_hosts", C.DISPLAY_SKIPPED_HOSTS + ): # fallback on constants for inherited plugins missing docs self._clean_results(result._result, result._task.action) - if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + if self._play.strategy == "free" and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) - if result._task.loop and 'results' in result._result: + if result._task.loop and "results" in result._result: self._process_items(result) else: msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" @@ -233,18 +227,18 @@ def v2_runner_on_skipped(self, result): def v2_runner_on_unreachable(self, result): self._host_counter += 1 - if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + if self._play.strategy == "free" and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) - delegated_vars = result._result.get('_ansible_delegated_vars', None) + delegated_vars = result._result.get("_ansible_delegated_vars", None) if delegated_vars: self._display.display( f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}", - color=C.COLOR_UNREACHABLE + color=C.COLOR_UNREACHABLE, ) else: self._display.display( f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}", - color=C.COLOR_UNREACHABLE + color=C.COLOR_UNREACHABLE, ) diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py index b0315829b52..efffb2b4774 100644 --- a/plugins/callback/default_without_diff.py +++ b/plugins/callback/default_without_diff.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -36,8 +35,8 @@ class CallbackModule(Default): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.default_without_diff' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.default_without_diff" def v2_on_file_diff(self, result): pass diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 15e4ee10a6c..63ff88b4d61 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -22,6 +22,7 @@ HAS_OD = False try: from collections import OrderedDict + HAS_OD = True except ImportError: pass @@ -69,66 +70,66 @@ # FIXME: Importing constants as C simply does not work, beats me :-/ # from ansible import constants as C class C: - COLOR_HIGHLIGHT = 'white' - COLOR_VERBOSE = 'blue' - COLOR_WARN = 'bright purple' - COLOR_ERROR = 'red' - COLOR_DEBUG = 'dark gray' - COLOR_DEPRECATE = 'purple' - COLOR_SKIP = 'cyan' - COLOR_UNREACHABLE = 'bright red' - COLOR_OK = 'green' - COLOR_CHANGED = 'yellow' + COLOR_HIGHLIGHT = "white" + COLOR_VERBOSE = "blue" + COLOR_WARN = "bright purple" + COLOR_ERROR = "red" + COLOR_DEBUG = "dark gray" + COLOR_DEPRECATE = "purple" + COLOR_SKIP = "cyan" + COLOR_UNREACHABLE = "bright red" + COLOR_OK = "green" + COLOR_CHANGED = "yellow" # Taken from Dstat class vt100: - black = '\033[0;30m' - darkred = '\033[0;31m' - darkgreen = '\033[0;32m' - darkyellow = '\033[0;33m' - darkblue = '\033[0;34m' - darkmagenta = '\033[0;35m' - darkcyan = '\033[0;36m' - gray = '\033[0;37m' - - darkgray = '\033[1;30m' - red = '\033[1;31m' - green = '\033[1;32m' - yellow = '\033[1;33m' - blue = '\033[1;34m' - magenta = '\033[1;35m' - cyan = '\033[1;36m' - white = '\033[1;37m' - - blackbg = '\033[40m' - redbg = '\033[41m' - greenbg = '\033[42m' - yellowbg = '\033[43m' - bluebg = '\033[44m' - magentabg = '\033[45m' - cyanbg = '\033[46m' - whitebg = '\033[47m' - - reset = '\033[0;0m' - bold = '\033[1m' - reverse = '\033[2m' - underline = '\033[4m' - - clear = '\033[2J' -# clearline = '\033[K' - clearline = '\033[2K' - save = '\033[s' - restore = '\033[u' - save_all = '\0337' - restore_all = '\0338' - linewrap = '\033[7h' - nolinewrap = '\033[7l' - - up = '\033[1A' - down = '\033[1B' - right = '\033[1C' - left = '\033[1D' + black = "\033[0;30m" + darkred = "\033[0;31m" + darkgreen = "\033[0;32m" + darkyellow = "\033[0;33m" + darkblue = "\033[0;34m" + darkmagenta = "\033[0;35m" + darkcyan = "\033[0;36m" + gray = "\033[0;37m" + + darkgray = "\033[1;30m" + red = "\033[1;31m" + green = "\033[1;32m" + yellow = "\033[1;33m" + blue = "\033[1;34m" + magenta = "\033[1;35m" + cyan = "\033[1;36m" + white = "\033[1;37m" + + blackbg = "\033[40m" + redbg = "\033[41m" + greenbg = "\033[42m" + yellowbg = "\033[43m" + bluebg = "\033[44m" + magentabg = "\033[45m" + cyanbg = "\033[46m" + whitebg = "\033[47m" + + reset = "\033[0;0m" + bold = "\033[1m" + reverse = "\033[2m" + underline = "\033[4m" + + clear = "\033[2J" + # clearline = '\033[K' + clearline = "\033[2K" + save = "\033[s" + restore = "\033[u" + save_all = "\0337" + restore_all = "\0338" + linewrap = "\033[7h" + nolinewrap = "\033[7l" + + up = "\033[1A" + down = "\033[1B" + right = "\033[1C" + left = "\033[1D" colors = dict( @@ -140,26 +141,23 @@ class vt100: unreachable=vt100.red, ) -states = ('skipped', 'ok', 'changed', 'failed', 'unreachable') +states = ("skipped", "ok", "changed", "failed", "unreachable") class CallbackModule(CallbackModule_default): - - ''' + """ This is the dense callback interface, where screen estate is still valued. - ''' + """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'dense' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "dense" def __init__(self): - # From CallbackModule self._display = display if HAS_OD: - self.disabled = False self.super_ref = super() self.super_ref.__init__() @@ -167,14 +165,14 @@ def __init__(self): # Attributes to remove from results for more density self.removed_attributes = ( # 'changed', - 'delta', + "delta", # 'diff', - 'end', - 'failed', - 'failed_when_result', - 'invocation', - 'start', - 'stdout_lines', + "end", + "failed", + "failed_when_result", + "invocation", + "start", + "stdout_lines", ) # Initiate data structures @@ -182,13 +180,15 @@ def __init__(self): self.keep = False self.shown_title = False self.count = dict(play=0, handler=0, task=0) - self.type = 'foo' + self.type = "foo" # Start immediately on the first line sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) sys.stdout.flush() else: - display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.") + display.warning( + "The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling." + ) self.disabled = True def __del__(self): @@ -198,27 +198,27 @@ def _add_host(self, result, status): name = result._host.get_name() # Add a new status in case a failed task is ignored - if status == 'failed' and result._task.ignore_errors: - status = 'ignored' + if status == "failed" and result._task.ignore_errors: + status = "ignored" # Check if we have to update an existing state (when looping over items) if name not in self.hosts: self.hosts[name] = dict(state=status) - elif states.index(self.hosts[name]['state']) < states.index(status): - self.hosts[name]['state'] = status + elif states.index(self.hosts[name]["state"]) < states.index(status): + self.hosts[name]["state"] = status # Store delegated hostname, if needed - delegated_vars = result._result.get('_ansible_delegated_vars', None) + delegated_vars = result._result.get("_ansible_delegated_vars", None) if delegated_vars: - self.hosts[name]['delegate'] = delegated_vars['ansible_host'] + self.hosts[name]["delegate"] = delegated_vars["ansible_host"] # Print progress bar self._display_progress(result) -# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode -# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)): + # # Ensure that tasks with changes/failures stay on-screen, and during diff-mode + # if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)): # Ensure that tasks with changes/failures stay on-screen - if status in ['changed', 'failed', 'unreachable']: + if status in ["changed", "failed", "unreachable"]: self.keep = True if self._display.verbosity == 1: @@ -239,9 +239,9 @@ def _clean_results(self, result): del result[attr] def _handle_exceptions(self, result): - if 'exception' in result: + if "exception" in result: # Remove the exception from the result so it is not shown every time - del result['exception'] + del result["exception"] if self._display.verbosity == 1: return "An exception occurred during task execution. To see the full traceback, use -vvv." @@ -249,16 +249,16 @@ def _handle_exceptions(self, result): def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) - sys.stdout.write(f'{self.type} {self.count[self.type]}:') + sys.stdout.write(f"{self.type} {self.count[self.type]}:") sys.stdout.write(vt100.reset) sys.stdout.flush() # Print out each host in its own status-color for name in self.hosts: - sys.stdout.write(' ') - if self.hosts[name].get('delegate', None): + sys.stdout.write(" ") + if self.hosts[name].get("delegate", None): sys.stdout.write(f"{self.hosts[name]['delegate']}>") - sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) + sys.stdout.write(colors[self.hosts[name]["state"]] + name + vt100.reset) sys.stdout.flush() sys.stdout.write(vt100.linewrap) @@ -267,7 +267,7 @@ def _display_task_banner(self): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) - sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}') + sys.stdout.write(f"{self.type} {self.count[self.type]}: {self.task.get_name().strip()}") sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() else: @@ -284,29 +284,31 @@ def _display_results(self, result, status): self._clean_results(result._result) - dump = '' - if result._task.action == 'include': + dump = "" + if result._task.action == "include": return - elif status == 'ok': + elif status == "ok": return - elif status == 'ignored': + elif status == "ignored": dump = self._handle_exceptions(result._result) - elif status == 'failed': + elif status == "failed": dump = self._handle_exceptions(result._result) - elif status == 'unreachable': - dump = result._result['msg'] + elif status == "unreachable": + dump = result._result["msg"] if not dump: dump = self._dump_results(result._result) - if result._task.loop and 'results' in result._result: + if result._task.loop and "results" in result._result: self._process_items(result) else: sys.stdout.write(f"{colors[status] + status}: ") - delegated_vars = result._result.get('_ansible_delegated_vars', None) + delegated_vars = result._result.get("_ansible_delegated_vars", None) if delegated_vars: - sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") + sys.stdout.write( + f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}" + ) else: sys.stdout.write(result._host.get_name()) @@ -314,7 +316,7 @@ def _display_results(self, result, status): sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}") sys.stdout.flush() - if status == 'changed': + if status == "changed": self._handle_warnings(result._result) def v2_playbook_on_play_start(self, play): @@ -327,13 +329,13 @@ def v2_playbook_on_play_start(self, play): # Reset at the start of each play self.keep = False self.count.update(dict(handler=0, task=0)) - self.count['play'] += 1 + self.count["play"] += 1 self.play = play # Write the next play on screen IN UPPERCASE, and make it permanent name = play.get_name().strip() if not name: - name = 'unnamed' + name = "unnamed" sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}") sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() @@ -351,14 +353,14 @@ def v2_playbook_on_task_start(self, task, is_conditional): self.shown_title = False self.hosts = OrderedDict() self.task = task - self.type = 'task' + self.type = "task" # Enumerate task if not setup (task names are too long for dense output) - if task.get_name() != 'setup': - self.count['task'] += 1 + if task.get_name() != "setup": + self.count["task"] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write(f'{self.type} {self.count[self.type]}.') + sys.stdout.write(f"{self.type} {self.count[self.type]}.") sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -374,36 +376,36 @@ def v2_playbook_on_handler_task_start(self, task): self.shown_title = False self.hosts = OrderedDict() self.task = task - self.type = 'handler' + self.type = "handler" # Enumerate handler if not setup (handler names may be too long for dense output) - if task.get_name() != 'setup': + if task.get_name() != "setup": self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write(f'{self.type} {self.count[self.type]}.') + sys.stdout.write(f"{self.type} {self.count[self.type]}.") sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_cleanup_task_start(self, task): # TBD - sys.stdout.write('cleanup.') + sys.stdout.write("cleanup.") sys.stdout.flush() def v2_runner_on_failed(self, result, ignore_errors=False): - self._add_host(result, 'failed') + self._add_host(result, "failed") def v2_runner_on_ok(self, result): - if result._result.get('changed', False): - self._add_host(result, 'changed') + if result._result.get("changed", False): + self._add_host(result, "changed") else: - self._add_host(result, 'ok') + self._add_host(result, "ok") def v2_runner_on_skipped(self, result): - self._add_host(result, 'skipped') + self._add_host(result, "skipped") def v2_runner_on_unreachable(self, result): - self._add_host(result, 'unreachable') + self._add_host(result, "unreachable") def v2_runner_on_include(self, included_file): pass @@ -423,24 +425,24 @@ def v2_playbook_item_on_ok(self, result): self.v2_runner_item_on_ok(result) def v2_runner_item_on_ok(self, result): - if result._result.get('changed', False): - self._add_host(result, 'changed') + if result._result.get("changed", False): + self._add_host(result, "changed") else: - self._add_host(result, 'ok') + self._add_host(result, "ok") # Old definition in v2.0 def v2_playbook_item_on_failed(self, result): self.v2_runner_item_on_failed(result) def v2_runner_item_on_failed(self, result): - self._add_host(result, 'failed') + self._add_host(result, "failed") # Old definition in v2.0 def v2_playbook_item_on_skipped(self, result): self.v2_runner_item_on_skipped(result) def v2_runner_item_on_skipped(self, result): - self._add_host(result, 'skipped') + self._add_host(result, "skipped") def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: @@ -467,7 +469,7 @@ def v2_playbook_on_stats(self, stats): return sys.stdout.write(vt100.bold + vt100.underline) - sys.stdout.write('SUMMARY') + sys.stdout.write("SUMMARY") sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() @@ -479,7 +481,7 @@ def v2_playbook_on_stats(self, stats): f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", - screen_only=True + screen_only=True, ) diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index d668319b713..a747442abb6 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Trevor Highfill # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -786,6 +785,7 @@ try: from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import + SUPPORTS_DATA_TAGGING = True except ImportError: SUPPORTS_DATA_TAGGING = False @@ -806,11 +806,12 @@ class CallbackModule(Default): """ Callback plugin that allows you to supply your own custom callback templates to be output. """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.diy' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.diy" - DIY_NS = 'ansible_callback_diy' + DIY_NS = "ansible_callback_diy" @contextmanager def _suppress_stdout(self, enabled): @@ -823,50 +824,48 @@ def _suppress_stdout(self, enabled): def _get_output_specification(self, loader, variables): _ret = {} _calling_method = sys._getframe(1).f_code.co_name - _callback_type = (_calling_method[3:] if _calling_method[:3] == "v2_" else _calling_method) - _callback_options = ['msg', 'msg_color'] + _callback_type = _calling_method[3:] if _calling_method[:3] == "v2_" else _calling_method + _callback_options = ["msg", "msg_color"] for option in _callback_options: - _option_name = f'{_callback_type}_{option}' - _option_template = variables.get( - f"{self.DIY_NS}_{_option_name}", - self.get_option(_option_name) - ) - _ret.update({option: self._template( - loader=loader, - template=_option_template, - variables=variables - )}) + _option_name = f"{_callback_type}_{option}" + _option_template = variables.get(f"{self.DIY_NS}_{_option_name}", self.get_option(_option_name)) + _ret.update({option: self._template(loader=loader, template=_option_template, variables=variables)}) - _ret.update({'vars': variables}) + _ret.update({"vars": variables}) return _ret def _using_diy(self, spec): sentinel = object() - omit = spec['vars'].get('omit', sentinel) + omit = spec["vars"].get("omit", sentinel) # With Data Tagging, omit is sentinel - return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel) + return (spec["msg"] is not None) and (spec["msg"] != omit or omit is sentinel) def _parent_has_callback(self): return hasattr(super(), sys._getframe(1).f_code.co_name) def _template(self, loader, template, variables): _templar = Templar(loader=loader, variables=variables) - return _templar.template( - template, - preserve_trailing_newlines=True, - convert_data=False, - escape_backslashes=True - ) + return _templar.template(template, preserve_trailing_newlines=True, convert_data=False, escape_backslashes=True) def _output(self, spec, stderr=False): - _msg = to_text(spec['msg']) + _msg = to_text(spec["msg"]) if len(_msg) > 0: - self._display.display(msg=_msg, color=spec['msg_color'], stderr=stderr) - - def _get_vars(self, playbook, play=None, host=None, task=None, included_file=None, - handler=None, result=None, stats=None, remove_attr_ref_loop=True): + self._display.display(msg=_msg, color=spec["msg_color"], stderr=stderr) + + def _get_vars( + self, + playbook, + play=None, + host=None, + task=None, + included_file=None, + handler=None, + result=None, + stats=None, + remove_attr_ref_loop=True, + ): def _get_value(obj, attr=None, method=None): if attr: return getattr(obj, attr, getattr(obj, f"_{attr}", None)) @@ -876,8 +875,8 @@ def _get_value(obj, attr=None, method=None): return _method() def _remove_attr_ref_loop(obj, attributes): - _loop_var = getattr(obj, 'loop_control', None) - _loop_var = (_loop_var or 'item') + _loop_var = getattr(obj, "loop_control", None) + _loop_var = _loop_var or "item" for attr in attributes: if str(_loop_var) in str(_get_value(obj=obj, attr=attr)): @@ -896,56 +895,128 @@ def __deepcopy__(self, memo): _all = _variable_manager.get_vars() if play: _all = play.get_variable_manager().get_vars( - play=play, - host=(host if host else getattr(result, '_host', None)), - task=(handler if handler else task) + play=play, host=(host if host else getattr(result, "_host", None)), task=(handler if handler else task) ) _ret.update(_all) _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()})) - _ret[self.DIY_NS].update({'playbook': {}}) - _playbook_attributes = ['entries', 'file_name', 'basedir'] + _ret[self.DIY_NS].update({"playbook": {}}) + _playbook_attributes = ["entries", "file_name", "basedir"] for attr in _playbook_attributes: - _ret[self.DIY_NS]['playbook'].update({attr: _get_value(obj=playbook, attr=attr)}) + _ret[self.DIY_NS]["playbook"].update({attr: _get_value(obj=playbook, attr=attr)}) if play: - _ret[self.DIY_NS].update({'play': {}}) - _play_attributes = ['any_errors_fatal', 'become', 'become_flags', 'become_method', - 'become_user', 'check_mode', 'collections', 'connection', - 'debugger', 'diff', 'environment', 'fact_path', 'finalized', - 'force_handlers', 'gather_facts', 'gather_subset', - 'gather_timeout', 'handlers', 'hosts', 'ignore_errors', - 'ignore_unreachable', 'included_conditional', 'included_path', - 'max_fail_percentage', 'module_defaults', 'name', 'no_log', - 'only_tags', 'order', 'port', 'post_tasks', 'pre_tasks', - 'remote_user', 'removed_hosts', 'roles', 'run_once', 'serial', - 'skip_tags', 'squashed', 'strategy', 'tags', 'tasks', 'uuid', - 'validated', 'vars_files', 'vars_prompt'] + _ret[self.DIY_NS].update({"play": {}}) + _play_attributes = [ + "any_errors_fatal", + "become", + "become_flags", + "become_method", + "become_user", + "check_mode", + "collections", + "connection", + "debugger", + "diff", + "environment", + "fact_path", + "finalized", + "force_handlers", + "gather_facts", + "gather_subset", + "gather_timeout", + "handlers", + "hosts", + "ignore_errors", + "ignore_unreachable", + "included_conditional", + "included_path", + "max_fail_percentage", + "module_defaults", + "name", + "no_log", + "only_tags", + "order", + "port", + "post_tasks", + "pre_tasks", + "remote_user", + "removed_hosts", + "roles", + "run_once", + "serial", + "skip_tags", + "squashed", + "strategy", + "tags", + "tasks", + "uuid", + "validated", + "vars_files", + "vars_prompt", + ] for attr in _play_attributes: - _ret[self.DIY_NS]['play'].update({attr: _get_value(obj=play, attr=attr)}) + _ret[self.DIY_NS]["play"].update({attr: _get_value(obj=play, attr=attr)}) if host: - _ret[self.DIY_NS].update({'host': {}}) - _host_attributes = ['name', 'uuid', 'address', 'implicit'] + _ret[self.DIY_NS].update({"host": {}}) + _host_attributes = ["name", "uuid", "address", "implicit"] for attr in _host_attributes: - _ret[self.DIY_NS]['host'].update({attr: _get_value(obj=host, attr=attr)}) + _ret[self.DIY_NS]["host"].update({attr: _get_value(obj=host, attr=attr)}) if task: - _ret[self.DIY_NS].update({'task': {}}) - _task_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val', - 'become', 'become_flags', 'become_method', 'become_user', - 'changed_when', 'check_mode', 'collections', 'connection', - 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff', - 'environment', 'failed_when', 'finalized', 'ignore_errors', - 'ignore_unreachable', 'loop', 'loop_control', 'loop_with', - 'module_defaults', 'name', 'no_log', 'notify', 'parent', 'poll', - 'port', 'register', 'remote_user', 'retries', 'role', 'run_once', - 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated', - 'when'] + _ret[self.DIY_NS].update({"task": {}}) + _task_attributes = [ + "action", + "any_errors_fatal", + "args", + "async", + "async_val", + "become", + "become_flags", + "become_method", + "become_user", + "changed_when", + "check_mode", + "collections", + "connection", + "debugger", + "delay", + "delegate_facts", + "delegate_to", + "diff", + "environment", + "failed_when", + "finalized", + "ignore_errors", + "ignore_unreachable", + "loop", + "loop_control", + "loop_with", + "module_defaults", + "name", + "no_log", + "notify", + "parent", + "poll", + "port", + "register", + "remote_user", + "retries", + "role", + "run_once", + "squashed", + "tags", + "untagged", + "until", + "uuid", + "validated", + "when", + ] # remove arguments that reference a loop var because they cause templating issues in # callbacks that do not have the loop context(e.g. playbook_on_task_start) @@ -953,74 +1024,114 @@ def __deepcopy__(self, memo): _task_attributes = _remove_attr_ref_loop(obj=task, attributes=_task_attributes) for attr in _task_attributes: - _ret[self.DIY_NS]['task'].update({attr: _get_value(obj=task, attr=attr)}) + _ret[self.DIY_NS]["task"].update({attr: _get_value(obj=task, attr=attr)}) if included_file: - _ret[self.DIY_NS].update({'included_file': {}}) - _included_file_attributes = ['args', 'filename', 'hosts', 'is_role', 'task'] + _ret[self.DIY_NS].update({"included_file": {}}) + _included_file_attributes = ["args", "filename", "hosts", "is_role", "task"] for attr in _included_file_attributes: - _ret[self.DIY_NS]['included_file'].update({attr: _get_value( - obj=included_file, - attr=attr - )}) + _ret[self.DIY_NS]["included_file"].update({attr: _get_value(obj=included_file, attr=attr)}) if handler: - _ret[self.DIY_NS].update({'handler': {}}) - _handler_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val', - 'become', 'become_flags', 'become_method', 'become_user', - 'changed_when', 'check_mode', 'collections', 'connection', - 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff', - 'environment', 'failed_when', 'finalized', 'ignore_errors', - 'ignore_unreachable', 'listen', 'loop', 'loop_control', - 'loop_with', 'module_defaults', 'name', 'no_log', - 'notified_hosts', 'notify', 'parent', 'poll', 'port', - 'register', 'remote_user', 'retries', 'role', 'run_once', - 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated', - 'when'] + _ret[self.DIY_NS].update({"handler": {}}) + _handler_attributes = [ + "action", + "any_errors_fatal", + "args", + "async", + "async_val", + "become", + "become_flags", + "become_method", + "become_user", + "changed_when", + "check_mode", + "collections", + "connection", + "debugger", + "delay", + "delegate_facts", + "delegate_to", + "diff", + "environment", + "failed_when", + "finalized", + "ignore_errors", + "ignore_unreachable", + "listen", + "loop", + "loop_control", + "loop_with", + "module_defaults", + "name", + "no_log", + "notified_hosts", + "notify", + "parent", + "poll", + "port", + "register", + "remote_user", + "retries", + "role", + "run_once", + "squashed", + "tags", + "untagged", + "until", + "uuid", + "validated", + "when", + ] if handler.loop and remove_attr_ref_loop: - _handler_attributes = _remove_attr_ref_loop(obj=handler, - attributes=_handler_attributes) + _handler_attributes = _remove_attr_ref_loop(obj=handler, attributes=_handler_attributes) for attr in _handler_attributes: - _ret[self.DIY_NS]['handler'].update({attr: _get_value(obj=handler, attr=attr)}) + _ret[self.DIY_NS]["handler"].update({attr: _get_value(obj=handler, attr=attr)}) - _ret[self.DIY_NS]['handler'].update({'is_host_notified': handler.is_host_notified(host)}) + _ret[self.DIY_NS]["handler"].update({"is_host_notified": handler.is_host_notified(host)}) if result: - _ret[self.DIY_NS].update({'result': {}}) - _result_attributes = ['host', 'task', 'task_name'] + _ret[self.DIY_NS].update({"result": {}}) + _result_attributes = ["host", "task", "task_name"] for attr in _result_attributes: - _ret[self.DIY_NS]['result'].update({attr: _get_value(obj=result, attr=attr)}) + _ret[self.DIY_NS]["result"].update({attr: _get_value(obj=result, attr=attr)}) - _result_methods = ['is_changed', 'is_failed', 'is_skipped', 'is_unreachable'] + _result_methods = ["is_changed", "is_failed", "is_skipped", "is_unreachable"] for method in _result_methods: - _ret[self.DIY_NS]['result'].update({method: _get_value(obj=result, method=method)}) + _ret[self.DIY_NS]["result"].update({method: _get_value(obj=result, method=method)}) - _ret[self.DIY_NS]['result'].update({'output': getattr(result, '_result', None)}) + _ret[self.DIY_NS]["result"].update({"output": getattr(result, "_result", None)}) _ret.update(result._result) if stats: - _ret[self.DIY_NS].update({'stats': {}}) - _stats_attributes = ['changed', 'custom', 'dark', 'failures', 'ignored', - 'ok', 'processed', 'rescued', 'skipped'] + _ret[self.DIY_NS].update({"stats": {}}) + _stats_attributes = [ + "changed", + "custom", + "dark", + "failures", + "ignored", + "ok", + "processed", + "rescued", + "skipped", + ] for attr in _stats_attributes: - _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)}) + _ret[self.DIY_NS]["stats"].update({attr: _get_value(obj=stats, attr=attr)}) - _ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())}) + _ret[self.DIY_NS].update({"top_level_var_names": list(_ret.keys())}) return _ret def v2_on_any(self, *args, **kwargs): - self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._diy_spec['vars'] - ) + self._diy_spec = self._get_output_specification(loader=self._diy_loader, variables=self._diy_spec["vars"]) if self._using_diy(spec=self._diy_spec): self._output(spec=self._diy_spec) @@ -1033,11 +1144,8 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): @@ -1051,11 +1159,8 @@ def v2_runner_on_ok(self, result): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): @@ -1069,11 +1174,8 @@ def v2_runner_on_skipped(self, result): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): @@ -1087,11 +1189,8 @@ def v2_runner_on_unreachable(self, result): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): @@ -1121,8 +1220,8 @@ def v2_runner_item_on_ok(self, result): play=self._diy_play, task=self._diy_task, result=result, - remove_attr_ref_loop=False - ) + remove_attr_ref_loop=False, + ), ) if self._using_diy(spec=self._diy_spec): @@ -1140,8 +1239,8 @@ def v2_runner_item_on_failed(self, result): play=self._diy_play, task=self._diy_task, result=result, - remove_attr_ref_loop=False - ) + remove_attr_ref_loop=False, + ), ) if self._using_diy(spec=self._diy_spec): @@ -1159,8 +1258,8 @@ def v2_runner_item_on_skipped(self, result): play=self._diy_play, task=self._diy_task, result=result, - remove_attr_ref_loop=False - ) + remove_attr_ref_loop=False, + ), ) if self._using_diy(spec=self._diy_spec): @@ -1174,11 +1273,8 @@ def v2_runner_retry(self, result): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): @@ -1195,11 +1291,8 @@ def v2_runner_on_start(self, host, task): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - host=self._diy_host, - task=self._diy_task - ) + playbook=self._diy_playbook, play=self._diy_play, host=self._diy_host, task=self._diy_task + ), ) if self._using_diy(spec=self._diy_spec): @@ -1214,10 +1307,7 @@ def v2_playbook_on_start(self, playbook): self._diy_loader = self._diy_playbook.get_loader() self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._get_vars( - playbook=self._diy_playbook - ) + loader=self._diy_loader, variables=self._get_vars(playbook=self._diy_playbook) ) if self._using_diy(spec=self._diy_spec): @@ -1234,11 +1324,8 @@ def v2_playbook_on_notify(self, handler, host): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - host=self._diy_host, - handler=self._diy_handler - ) + playbook=self._diy_playbook, play=self._diy_play, host=self._diy_host, handler=self._diy_handler + ), ) if self._using_diy(spec=self._diy_spec): @@ -1249,10 +1336,7 @@ def v2_playbook_on_notify(self, handler, host): super().v2_playbook_on_notify(handler, host) def v2_playbook_on_no_hosts_matched(self): - self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._diy_spec['vars'] - ) + self._diy_spec = self._get_output_specification(loader=self._diy_loader, variables=self._diy_spec["vars"]) if self._using_diy(spec=self._diy_spec): self._output(spec=self._diy_spec) @@ -1262,10 +1346,7 @@ def v2_playbook_on_no_hosts_matched(self): super().v2_playbook_on_no_hosts_matched() def v2_playbook_on_no_hosts_remaining(self): - self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._diy_spec['vars'] - ) + self._diy_spec = self._get_output_specification(loader=self._diy_loader, variables=self._diy_spec["vars"]) if self._using_diy(spec=self._diy_spec): self._output(spec=self._diy_spec) @@ -1279,11 +1360,7 @@ def v2_playbook_on_task_start(self, task, is_conditional): self._diy_spec = self._get_output_specification( loader=self._diy_loader, - variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task - ) + variables=self._get_vars(playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task), ) if self._using_diy(spec=self._diy_spec): @@ -1302,11 +1379,7 @@ def v2_playbook_on_handler_task_start(self, task): self._diy_spec = self._get_output_specification( loader=self._diy_loader, - variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task - ) + variables=self._get_vars(playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task), ) if self._using_diy(spec=self._diy_spec): @@ -1316,13 +1389,19 @@ def v2_playbook_on_handler_task_start(self, task): with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)): super().v2_playbook_on_handler_task_start(task) - def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, - confirm=False, salt_size=None, salt=None, default=None, - unsafe=None): - self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._diy_spec['vars'] - ) + def v2_playbook_on_vars_prompt( + self, + varname, + private=True, + prompt=None, + encrypt=None, + confirm=False, + salt_size=None, + salt=None, + default=None, + unsafe=None, + ): + self._diy_spec = self._get_output_specification(loader=self._diy_loader, variables=self._diy_spec["vars"]) if self._using_diy(spec=self._diy_spec): self._output(spec=self._diy_spec) @@ -1330,9 +1409,7 @@ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt if self._parent_has_callback(): with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)): super().v2_playbook_on_vars_prompt( - varname, private, prompt, encrypt, - confirm, salt_size, salt, default, - unsafe + varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe ) # not implemented as the call to this is not implemented yet @@ -1347,11 +1424,7 @@ def v2_playbook_on_play_start(self, play): self._diy_play = play self._diy_spec = self._get_output_specification( - loader=self._diy_loader, - variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play - ) + loader=self._diy_loader, variables=self._get_vars(playbook=self._diy_playbook, play=self._diy_play) ) if self._using_diy(spec=self._diy_spec): @@ -1366,11 +1439,7 @@ def v2_playbook_on_stats(self, stats): self._diy_spec = self._get_output_specification( loader=self._diy_loader, - variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - stats=self._diy_stats - ) + variables=self._get_vars(playbook=self._diy_playbook, play=self._diy_play, stats=self._diy_stats), ) if self._using_diy(spec=self._diy_spec): @@ -1389,8 +1458,8 @@ def v2_playbook_on_include(self, included_file): playbook=self._diy_playbook, play=self._diy_play, task=self._diy_included_file._task, - included_file=self._diy_included_file - ) + included_file=self._diy_included_file, + ), ) if self._using_diy(spec=self._diy_spec): @@ -1404,11 +1473,8 @@ def v2_on_file_diff(self, result): self._diy_spec = self._get_output_specification( loader=self._diy_loader, variables=self._get_vars( - playbook=self._diy_playbook, - play=self._diy_play, - task=self._diy_task, - result=result - ) + playbook=self._diy_playbook, play=self._diy_play, task=self._diy_task, result=result + ), ) if self._using_diy(spec=self._diy_spec): diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 5d98032bf32..f30d8156889 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -116,9 +116,9 @@ def __init__(self, uuid, name, path, play, action, args): def add_host(self, host): if host.uuid in self.host_data: - if host.status == 'included': + if host.status == "included": # concatenate task include output from multiple items - host.result = f'{self.host_data[host.uuid].result}\n{host.result}' + host.result = f"{self.host_data[host.uuid].result}\n{host.result}" else: return @@ -152,7 +152,7 @@ def __init__(self, display): self._display = display def start_task(self, tasks_data, hide_task_arguments, play_name, task): - """ record the start of a task for one or more hosts """ + """record the start of a task for one or more hosts""" uuid = task._uuid @@ -165,29 +165,39 @@ def start_task(self, tasks_data, hide_task_arguments, play_name, task): args = None if not task.no_log and not hide_task_arguments: - args = ', '.join((f'{k}={v}' for k, v in task.args.items())) + args = ", ".join((f"{k}={v}" for k, v in task.args.items())) tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) def finish_task(self, tasks_data, status, result): - """ record the results of a task for a single host """ + """record the results of a task for a single host""" task_uuid = result._task._uuid - if hasattr(result, '_host') and result._host is not None: + if hasattr(result, "_host") and result._host is not None: host_uuid = result._host._uuid host_name = result._host.name else: - host_uuid = 'include' - host_name = 'include' + host_uuid = "include" + host_name = "include" task = tasks_data[task_uuid] task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name, - apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key): - """ generate distributed traces from the collected TaskData and HostData """ + def generate_distributed_traces( + self, + tasks_data, + status, + end_time, + traceparent, + apm_service_name, + apm_server_url, + apm_verify_server_cert, + apm_secret_token, + apm_api_key, + ): + """generate distributed traces from the collected TaskData and HostData""" tasks = [] parent_start_time = None @@ -196,7 +206,9 @@ def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, parent_start_time = task.start tasks.append(task) - apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key) + apm_cli = self.init_apm_client( + apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key + ) if apm_cli: with closing(apm_cli): instrument() # Only call this once, as early as possible. @@ -218,72 +230,80 @@ def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) def create_span_data(self, apm_cli, task_data, host_data): - """ create the span with the given TaskData and HostData """ + """create the span with the given TaskData and HostData""" - name = f'[{host_data.name}] {task_data.play}: {task_data.name}' + name = f"[{host_data.name}] {task_data.play}: {task_data.name}" message = "success" status = "success" enriched_error_message = None - if host_data.status == 'included': + if host_data.status == "included": rc = 0 else: res = host_data.result._result - rc = res.get('rc', 0) - if host_data.status == 'failed': + rc = res.get("rc", 0) + if host_data.status == "failed": message = self.get_error_message(res) enriched_error_message = self.enrich_error_message(res) status = "failure" - elif host_data.status == 'skipped': - if 'skip_reason' in res: - message = res['skip_reason'] + elif host_data.status == "skipped": + if "skip_reason" in res: + message = res["skip_reason"] else: - message = 'skipped' + message = "skipped" status = "unknown" - with capture_span(task_data.name, - start=task_data.start, - span_type="ansible.task.run", - duration=host_data.finish - task_data.start, - labels={"ansible.task.args": task_data.args, - "ansible.task.message": message, - "ansible.task.module": task_data.action, - "ansible.task.name": name, - "ansible.task.result": rc, - "ansible.task.host.name": host_data.name, - "ansible.task.host.status": host_data.status}) as span: + with capture_span( + task_data.name, + start=task_data.start, + span_type="ansible.task.run", + duration=host_data.finish - task_data.start, + labels={ + "ansible.task.args": task_data.args, + "ansible.task.message": message, + "ansible.task.module": task_data.action, + "ansible.task.name": name, + "ansible.task.result": rc, + "ansible.task.host.name": host_data.name, + "ansible.task.host.status": host_data.status, + }, + ) as span: span.outcome = status - if 'failure' in status: - exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}") + if "failure" in status: + exception = AnsibleRuntimeError( + message=f"{task_data.action}: {name} failed with error message {enriched_error_message}" + ) apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True) def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): if apm_server_url: - return Client(service_name=apm_service_name, - server_url=apm_server_url, - verify_server_cert=False, - secret_token=apm_secret_token, - api_key=apm_api_key, - use_elastic_traceparent_header=True, - debug=True) + return Client( + service_name=apm_service_name, + server_url=apm_server_url, + verify_server_cert=False, + secret_token=apm_secret_token, + api_key=apm_api_key, + use_elastic_traceparent_header=True, + debug=True, + ) @staticmethod def get_error_message(result): - if result.get('exception') is not None: - return ElasticSource._last_line(result['exception']) - return result.get('msg', 'failed') + if result.get("exception") is not None: + return ElasticSource._last_line(result["exception"]) + return result.get("msg", "failed") @staticmethod def _last_line(text): - lines = text.strip().split('\n') + lines = text.strip().split("\n") return lines[-1] @staticmethod def enrich_error_message(result): - message = result.get('msg', 'failed') - exception = result.get('exception') - stderr = result.get('stderr') - return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" + message = result.get("msg", "failed") + exception = result.get("exception") + stderr = result.get("stderr") + return f'message: "{message}"\nexception: "{exception}"\nstderr: "{stderr}"' class CallbackModule(CallbackBase): @@ -292,8 +312,8 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.elastic' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.elastic" CALLBACK_NEEDS_ENABLED = True def __init__(self, display=None): @@ -308,7 +328,9 @@ def __init__(self, display=None): self.disabled = False if ELASTIC_LIBRARY_IMPORT_ERROR: - raise AnsibleError('The `elastic-apm` must be installed to use this plugin') from ELASTIC_LIBRARY_IMPORT_ERROR + raise AnsibleError( + "The `elastic-apm` must be installed to use this plugin" + ) from ELASTIC_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() @@ -317,17 +339,17 @@ def __init__(self, display=None): def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.hide_task_arguments = self.get_option('hide_task_arguments') + self.hide_task_arguments = self.get_option("hide_task_arguments") - self.apm_service_name = self.get_option('apm_service_name') + self.apm_service_name = self.get_option("apm_service_name") if not self.apm_service_name: - self.apm_service_name = 'ansible' + self.apm_service_name = "ansible" - self.apm_server_url = self.get_option('apm_server_url') - self.apm_secret_token = self.get_option('apm_secret_token') - self.apm_api_key = self.get_option('apm_api_key') - self.apm_verify_server_cert = self.get_option('apm_verify_server_cert') - self.traceparent = self.get_option('traceparent') + self.apm_server_url = self.get_option("apm_server_url") + self.apm_secret_token = self.get_option("apm_secret_token") + self.apm_api_key = self.get_option("apm_api_key") + self.apm_verify_server_cert = self.get_option("apm_verify_server_cert") + self.traceparent = self.get_option("traceparent") def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -336,65 +358,29 @@ def v2_playbook_on_play_start(self, play): self.play_name = play.get_name() def v2_runner_on_no_hosts(self, task): - self.elastic.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.elastic.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_task_start(self, task, is_conditional): - self.elastic.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.elastic.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_cleanup_task_start(self, task): - self.elastic.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.elastic.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_handler_task_start(self, task): - self.elastic.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.elastic.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_runner_on_failed(self, result, ignore_errors=False): self.errors += 1 - self.elastic.finish_task( - self.tasks_data, - 'failed', - result - ) + self.elastic.finish_task(self.tasks_data, "failed", result) def v2_runner_on_ok(self, result): - self.elastic.finish_task( - self.tasks_data, - 'ok', - result - ) + self.elastic.finish_task(self.tasks_data, "ok", result) def v2_runner_on_skipped(self, result): - self.elastic.finish_task( - self.tasks_data, - 'skipped', - result - ) + self.elastic.finish_task(self.tasks_data, "skipped", result) def v2_playbook_on_include(self, included_file): - self.elastic.finish_task( - self.tasks_data, - 'included', - included_file - ) + self.elastic.finish_task(self.tasks_data, "included", included_file) def v2_playbook_on_stats(self, stats): if self.errors == 0: @@ -410,7 +396,7 @@ def v2_playbook_on_stats(self, stats): self.apm_server_url, self.apm_verify_server_cert, self.apm_secret_token, - self.apm_api_key + self.apm_api_key, ) def v2_runner_on_async_failed(self, result, **kwargs): diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index 9d85be923cb..cb8c8114570 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -54,29 +54,31 @@ class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.jabber' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.jabber" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): - super().__init__(display=display) if not HAS_XMPP: - self._display.warning("The required python xmpp library (xmpppy) is not installed. " - "pip install git+https://github.com/ArchipelProject/xmpppy") + self._display.warning( + "The required python xmpp library (xmpppy) is not installed. " + "pip install git+https://github.com/ArchipelProject/xmpppy" + ) self.disabled = True - self.serv = os.getenv('JABBER_SERV') - self.j_user = os.getenv('JABBER_USER') - self.j_pass = os.getenv('JABBER_PASS') - self.j_to = os.getenv('JABBER_TO') + self.serv = os.getenv("JABBER_SERV") + self.j_user = os.getenv("JABBER_USER") + self.j_pass = os.getenv("JABBER_PASS") + self.j_to = os.getenv("JABBER_TO") if (self.j_user or self.j_pass or self.serv or self.j_to) is None: self.disabled = True - self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables') + self._display.warning( + "Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables" + ) def send_msg(self, msg): """Send message""" @@ -85,7 +87,7 @@ def send_msg(self, msg): client.connect(server=(self.serv, 5222)) client.auth(jid.getNode(), self.j_pass, resource=jid.getResource()) message = xmpp.Message(self.j_to, msg) - message.setAttr('type', 'chat') + message.setAttr("type", "chat") client.send(message) client.disconnect() @@ -109,9 +111,9 @@ def playbook_on_stats(self, stats): unreachable = False for h in hosts: s = stats.summarize(h) - if s['failures'] > 0: + if s["failures"] > 0: failures = True - if s['unreachable'] > 0: + if s["unreachable"] > 0: unreachable = True if failures or unreachable: diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index ff2198364d4..5666153d2f6 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -49,9 +49,10 @@ class CallbackModule(CallbackBase): """ logs playbook results, per host, in /var/log/ansible/hosts """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.log_plays' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.log_plays" CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" @@ -61,7 +62,6 @@ def _make_msg(now, playbook, task_name, task_action, category, data): return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n" def __init__(self): - super().__init__() def set_options(self, task_keys=None, var_options=None, direct=None): @@ -75,12 +75,12 @@ def set_options(self, task_keys=None, var_options=None, direct=None): def log(self, result, category): data = result._result if isinstance(data, MutableMapping): - if '_ansible_verbose_override' in data: + if "_ansible_verbose_override" in data: # avoid logging extraneous data - data = 'omitted' + data = "omitted" else: data = data.copy() - invocation = data.pop('invocation', None) + invocation = data.pop("invocation", None) data = json.dumps(data, cls=AnsibleJSONEncoder) if invocation is not None: data = f"{json.dumps(invocation)} => {data} " @@ -93,25 +93,25 @@ def log(self, result, category): fd.write(msg) def v2_runner_on_failed(self, result, ignore_errors=False): - self.log(result, 'FAILED') + self.log(result, "FAILED") def v2_runner_on_ok(self, result): - self.log(result, 'OK') + self.log(result, "OK") def v2_runner_on_skipped(self, result): - self.log(result, 'SKIPPED') + self.log(result, "SKIPPED") def v2_runner_on_unreachable(self, result): - self.log(result, 'UNREACHABLE') + self.log(result, "UNREACHABLE") def v2_runner_on_async_failed(self, result): - self.log(result, 'ASYNC_FAILED') + self.log(result, "ASYNC_FAILED") def v2_playbook_on_start(self, playbook): self.playbook = playbook._file_name def v2_playbook_on_import_for_host(self, result, imported_file): - self.log(result, 'IMPORTED', imported_file) + self.log(result, "IMPORTED", imported_file) def v2_playbook_on_not_import_for_host(self, result, missing_file): - self.log(result, 'NOTIMPORTED', missing_file) + self.log(result, "NOTIMPORTED", missing_file) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 3ee0f3ad490..6bf7fa0fc1b 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -83,11 +83,10 @@ def __init__(self): def __build_signature(self, date, workspace_id, shared_key, content_length): # Build authorisation signature for Azure log analytics API call sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs" - utf8_sigs = sigs.encode('utf-8') + utf8_sigs = sigs.encode("utf-8") decoded_shared_key = base64.b64decode(shared_key) - hmac_sha256_sigs = hmac.new( - decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() - encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') + hmac_sha256_sigs = hmac.new(decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() + encoded_hash = base64.b64encode(hmac_sha256_sigs).decode("utf-8") signature = f"SharedKey {workspace_id}:{encoded_hash}" return signature @@ -95,10 +94,10 @@ def __build_workspace_url(self, workspace_id): return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01" def __rfc1123date(self): - return now().strftime('%a, %d %b %Y %H:%M:%S GMT') + return now().strftime("%a, %d %b %Y %H:%M:%S GMT") def send_event(self, workspace_id, shared_key, state, result, runtime): - if result._task_fields['args'].get('_ansible_check_mode') is True: + if result._task_fields["args"].get("_ansible_check_mode") is True: self.ansible_check_mode = True if result._task._role: @@ -107,31 +106,31 @@ def send_event(self, workspace_id, shared_key, state, result, runtime): ansible_role = None data = {} - data['uuid'] = result._task._uuid - data['session'] = self.session - data['status'] = state - data['timestamp'] = self.__rfc1123date() - data['host'] = self.host - data['user'] = self.user - data['runtime'] = runtime - data['ansible_version'] = ansible_version - data['ansible_check_mode'] = self.ansible_check_mode - data['ansible_host'] = result._host.name - data['ansible_playbook'] = self.ansible_playbook - data['ansible_role'] = ansible_role - data['ansible_task'] = result._task_fields + data["uuid"] = result._task._uuid + data["session"] = self.session + data["status"] = state + data["timestamp"] = self.__rfc1123date() + data["host"] = self.host + data["user"] = self.user + data["runtime"] = runtime + data["ansible_version"] = ansible_version + data["ansible_check_mode"] = self.ansible_check_mode + data["ansible_host"] = result._host.name + data["ansible_playbook"] = self.ansible_playbook + data["ansible_role"] = ansible_role + data["ansible_task"] = result._task_fields # Removing args since it can contain sensitive data - if 'args' in data['ansible_task']: - data['ansible_task'].pop('args') - data['ansible_result'] = result._result - if 'content' in data['ansible_result']: - data['ansible_result'].pop('content') + if "args" in data["ansible_task"]: + data["ansible_task"].pop("args") + data["ansible_result"] = result._result + if "content" in data["ansible_result"]: + data["ansible_result"].pop("content") # Adding extra vars info - data['extra_vars'] = self.extra_vars + data["extra_vars"] = self.extra_vars # Preparing the playbook logs as JSON format and send to Azure log analytics - jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True) + jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) content_length = len(jsondata) rfc1123date = self.__rfc1123date() signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length) @@ -141,19 +140,19 @@ def send_event(self, workspace_id, shared_key, state, result, runtime): workspace_url, jsondata, headers={ - 'content-type': 'application/json', - 'Authorization': signature, - 'Log-Type': 'ansible_playbook', - 'x-ms-date': rfc1123date + "content-type": "application/json", + "Authorization": signature, + "Log-Type": "ansible_playbook", + "x-ms-date": rfc1123date, }, - method='POST' + method="POST", ) class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'loganalytics' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "loganalytics" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): @@ -164,15 +163,12 @@ def __init__(self, display=None): self.loganalytics = AzureLogAnalyticsSource() def _seconds_since_start(self, result): - return ( - now() - - self.start_datetimes[result._task._uuid] - ).total_seconds() + return (now() - self.start_datetimes[result._task._uuid]).total_seconds() def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.workspace_id = self.get_option('workspace_id') - self.shared_key = self.get_option('shared_key') + self.workspace_id = self.get_option("workspace_id") + self.shared_key = self.get_option("shared_key") def v2_playbook_on_play_start(self, play): vm = play.get_variable_manager() @@ -190,45 +186,25 @@ def v2_playbook_on_handler_task_start(self, task): def v2_runner_on_ok(self, result, **kwargs): self.loganalytics.send_event( - self.workspace_id, - self.shared_key, - 'OK', - result, - self._seconds_since_start(result) + self.workspace_id, self.shared_key, "OK", result, self._seconds_since_start(result) ) def v2_runner_on_skipped(self, result, **kwargs): self.loganalytics.send_event( - self.workspace_id, - self.shared_key, - 'SKIPPED', - result, - self._seconds_since_start(result) + self.workspace_id, self.shared_key, "SKIPPED", result, self._seconds_since_start(result) ) def v2_runner_on_failed(self, result, **kwargs): self.loganalytics.send_event( - self.workspace_id, - self.shared_key, - 'FAILED', - result, - self._seconds_since_start(result) + self.workspace_id, self.shared_key, "FAILED", result, self._seconds_since_start(result) ) def runner_on_async_failed(self, result, **kwargs): self.loganalytics.send_event( - self.workspace_id, - self.shared_key, - 'FAILED', - result, - self._seconds_since_start(result) + self.workspace_id, self.shared_key, "FAILED", result, self._seconds_since_start(result) ) def v2_runner_on_unreachable(self, result, **kwargs): self.loganalytics.send_event( - self.workspace_id, - self.shared_key, - 'UNREACHABLE', - result, - self._seconds_since_start(result) + self.workspace_id, self.shared_key, "UNREACHABLE", result, self._seconds_since_start(result) ) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 3ab87274fe6..6aed1796f82 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -64,6 +64,7 @@ try: from logdna import LogDNAHandler + HAS_LOGDNA = True except ImportError: HAS_LOGDNA = False @@ -72,12 +73,12 @@ # Getting MAC Address of system: def get_mac(): mac = f"{getnode():012x}" - return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) + return ":".join(map(lambda index: mac[index : index + 2], range(int(len(mac) / 2)))) # Getting hostname of system: def get_hostname(): - return str(socket.gethostname()).split('.local', 1)[0] + return str(socket.gethostname()).split(".local", 1)[0] # Getting IP of system: @@ -87,10 +88,10 @@ def get_ip(): except Exception: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: - s.connect(('10.255.255.255', 1)) + s.connect(("10.255.255.255", 1)) IP = s.getsockname()[0] except Exception: - IP = '127.0.0.1' + IP = "127.0.0.1" finally: s.close() return IP @@ -107,10 +108,9 @@ def isJSONable(obj): # LogDNA Callback Module: class CallbackModule(CallbackBase): - CALLBACK_VERSION = 0.1 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.logdna' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.logdna" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): @@ -127,27 +127,27 @@ def __init__(self, display=None): def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.conf_key = self.get_option('conf_key') - self.plugin_ignore_errors = self.get_option('plugin_ignore_errors') - self.conf_hostname = self.get_option('conf_hostname') - self.conf_tags = self.get_option('conf_tags') + self.conf_key = self.get_option("conf_key") + self.plugin_ignore_errors = self.get_option("plugin_ignore_errors") + self.conf_hostname = self.get_option("conf_hostname") + self.conf_tags = self.get_option("conf_tags") self.mac = get_mac() self.ip = get_ip() if self.conf_hostname is None: self.conf_hostname = get_hostname() - self.conf_tags = self.conf_tags.split(',') + self.conf_tags = self.conf_tags.split(",") if HAS_LOGDNA: - self.log = logging.getLogger('logdna') + self.log = logging.getLogger("logdna") self.log.setLevel(logging.INFO) - self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True} + self.options = {"hostname": self.conf_hostname, "mac": self.mac, "index_meta": True} self.log.addHandler(LogDNAHandler(self.conf_key, self.options)) self.disabled = False else: self.disabled = True - self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`') + self._display.warning("WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`") def metaIndexing(self, meta): invalidKeys = [] @@ -159,25 +159,25 @@ def metaIndexing(self, meta): if ninvalidKeys > 0: for key in invalidKeys: del meta[key] - meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}" + meta["__errors"] = f"These keys have been sanitized: {', '.join(invalidKeys)}" return meta def sanitizeJSON(self, data): try: return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)) except Exception: - return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]} + return {"warnings": ["JSON Formatting Issue", json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]} def flush(self, log, options): if HAS_LOGDNA: self.log.info(json.dumps(log), options) def sendLog(self, host, category, logdata): - options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}} - logdata['info'].pop('invocation', None) - warnings = logdata['info'].pop('warnings', None) + options = {"app": "ansible", "meta": {"playbook": self.playbook_name, "host": host, "category": category}} + logdata["info"].pop("invocation", None) + warnings = logdata["info"].pop("warnings", None) if warnings is not None: - self.flush({'warn': warnings}, options) + self.flush({"warn": warnings}, options) self.flush(logdata, options) def v2_playbook_on_start(self, playbook): @@ -188,21 +188,21 @@ def v2_playbook_on_stats(self, stats): result = dict() for host in stats.processed.keys(): result[host] = stats.summarize(host) - self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)}) + self.sendLog(self.conf_hostname, "STATS", {"info": self.sanitizeJSON(result)}) def runner_on_failed(self, host, res, ignore_errors=False): if self.plugin_ignore_errors: ignore_errors = self.plugin_ignore_errors - self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors}) + self.sendLog(host, "FAILED", {"info": self.sanitizeJSON(res), "ignore_errors": ignore_errors}) def runner_on_ok(self, host, res): - self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)}) + self.sendLog(host, "OK", {"info": self.sanitizeJSON(res)}) def runner_on_unreachable(self, host, res): - self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)}) + self.sendLog(host, "UNREACHABLE", {"info": self.sanitizeJSON(res)}) def runner_on_async_failed(self, host, res, jid): - self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid}) + self.sendLog(host, "ASYNC_FAILED", {"info": self.sanitizeJSON(res), "job_id": jid}) def runner_on_async_ok(self, host, res, jid): - self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid}) + self.sendLog(host, "ASYNC_OK", {"info": self.sanitizeJSON(res), "job_id": jid}) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index 469e31965d2..5ce8711b372 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -103,12 +103,14 @@ try: import certifi + HAS_CERTIFI = True except ImportError: HAS_CERTIFI = False try: import flatdict + HAS_FLATDICT = True except ImportError: HAS_FLATDICT = False @@ -121,8 +123,7 @@ class PlainTextSocketAppender: - def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443): - + def __init__(self, display, LE_API="data.logentries.com", LE_PORT=80, LE_TLS_PORT=443): self.LE_API = LE_API self.LE_PORT = LE_PORT self.LE_TLS_PORT = LE_TLS_PORT @@ -131,7 +132,7 @@ def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_POR # Error message displayed when an incorrect Token has been detected self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" # Unicode Line separator character \u2028 - self.LINE_SEP = '\u2028' + self.LINE_SEP = "\u2028" self._display = display self._conn = None @@ -170,13 +171,13 @@ def close_connection(self): def put(self, data): # Replace newlines with Unicode line separator # for multi-line events - data = to_text(data, errors='surrogate_or_strict') - multiline = data.replace('\n', self.LINE_SEP) + data = to_text(data, errors="surrogate_or_strict") + multiline = data.replace("\n", self.LINE_SEP) multiline += "\n" # Send data, reconnect if needed while True: try: - self._conn.send(to_bytes(multiline, errors='surrogate_or_strict')) + self._conn.send(to_bytes(multiline, errors="surrogate_or_strict")) except socket.error: self.reopen_connection() continue @@ -187,6 +188,7 @@ def put(self, data): try: import ssl + HAS_SSL = True except ImportError: # for systems without TLS support. SocketAppender = PlainTextSocketAppender @@ -198,11 +200,13 @@ def open_connection(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) context = ssl.create_default_context( purpose=ssl.Purpose.SERVER_AUTH, - cafile=certifi.where(), ) + cafile=certifi.where(), + ) sock = context.wrap_socket( sock=sock, do_handshake_on_connect=True, - suppress_ragged_eofs=True, ) + suppress_ragged_eofs=True, + ) sock.connect((self.LE_API, self.LE_TLS_PORT)) self._conn = sock @@ -211,12 +215,11 @@ def open_connection(self): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.logentries' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.logentries" CALLBACK_NEEDS_WHITELIST = True def __init__(self): - # TODO: allow for alternate posting methods (REST/UDP/agent/etc) super().__init__() @@ -226,7 +229,9 @@ def __init__(self): if not HAS_CERTIFI: self.disabled = True - self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.') + self._display.warning( + "The `certifi` python module is not installed.\nDisabling the Logentries callback plugin." + ) self.le_jobid = str(uuid.uuid4()) @@ -234,41 +239,47 @@ def __init__(self): self.timeout = 10 def set_options(self, task_keys=None, var_options=None, direct=None): - super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) # get options try: - self.api_url = self.get_option('api') - self.api_port = self.get_option('port') - self.api_tls_port = self.get_option('tls_port') - self.use_tls = self.get_option('use_tls') - self.flatten = self.get_option('flatten') + self.api_url = self.get_option("api") + self.api_port = self.get_option("port") + self.api_tls_port = self.get_option("tls_port") + self.use_tls = self.get_option("use_tls") + self.flatten = self.get_option("flatten") except KeyError as e: self._display.warning(f"Missing option for Logentries callback plugin: {e}") self.disabled = True try: - self.token = self.get_option('token') + self.token = self.get_option("token") except KeyError as e: - self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling') + self._display.warning( + "Logentries token was not provided, this is required for this callback to operate, disabling" + ) self.disabled = True if self.flatten and not HAS_FLATDICT: self.disabled = True - self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.') + self._display.warning( + "You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin." + ) self._initialize_connections() def _initialize_connections(self): - if not self.disabled: if self.use_tls: self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS") - self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) + self._appender = TLSSocketAppender( + display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port + ) else: self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}") - self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) + self._appender = PlainTextSocketAppender( + display=self._display, LE_API=self.api_url, LE_PORT=self.api_port + ) self._appender.reopen_connection() def emit_formatted(self, record): @@ -279,50 +290,50 @@ def emit_formatted(self, record): self.emit(self._dump_results(record)) def emit(self, record): - msg = record.rstrip('\n') + msg = record.rstrip("\n") msg = f"{self.token} {msg}" self._appender.put(msg) self._display.vvvv("Sent event to logentries") def _set_info(self, host, res): - return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res} + return {"le_jobid": self.le_jobid, "hostname": host, "results": res} def runner_on_ok(self, host, res): results = self._set_info(host, res) - results['status'] = 'OK' + results["status"] = "OK" self.emit_formatted(results) def runner_on_failed(self, host, res, ignore_errors=False): results = self._set_info(host, res) - results['status'] = 'FAILED' + results["status"] = "FAILED" self.emit_formatted(results) def runner_on_skipped(self, host, item=None): results = self._set_info(host, item) - del results['results'] - results['status'] = 'SKIPPED' + del results["results"] + results["status"] = "SKIPPED" self.emit_formatted(results) def runner_on_unreachable(self, host, res): results = self._set_info(host, res) - results['status'] = 'UNREACHABLE' + results["status"] = "UNREACHABLE" self.emit_formatted(results) def runner_on_async_failed(self, host, res, jid): results = self._set_info(host, res) - results['jid'] = jid - results['status'] = 'ASYNC_FAILED' + results["jid"] = jid + results["status"] = "ASYNC_FAILED" self.emit_formatted(results) def v2_playbook_on_play_start(self, play): results = {} - results['le_jobid'] = self.le_jobid - results['started_by'] = os.getlogin() + results["le_jobid"] = self.le_jobid + results["started_by"] = os.getlogin() if play.name: - results['play'] = play.name - results['hosts'] = play.hosts + results["play"] = play.name + results["hosts"] = play.hosts self.emit_formatted(results) def playbook_on_stats(self, stats): - """ close connection """ + """close connection""" self._appender.close_connection() diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 16d474fd386..c17f3191726 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -103,6 +103,7 @@ try: import logstash + HAS_LOGSTASH = True except ImportError: HAS_LOGSTASH = False @@ -115,10 +116,9 @@ class CallbackModule(CallbackBase): - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.logstash' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.logstash" CALLBACK_NEEDS_WHITELIST = True def __init__(self): @@ -132,14 +132,11 @@ def __init__(self): def _init_plugin(self): if not self.disabled: - self.logger = logging.getLogger('python-logstash-logger') + self.logger = logging.getLogger("python-logstash-logger") self.logger.setLevel(logging.DEBUG) self.handler = logstash.TCPLogstashHandler( - self.ls_server, - self.ls_port, - version=1, - message_type=self.ls_type + self.ls_server, self.ls_port, version=1, message_type=self.ls_type ) self.logger.addHandler(self.handler) @@ -147,42 +144,36 @@ def _init_plugin(self): self.session = str(uuid.uuid4()) self.errors = 0 - self.base_data = { - 'session': self.session, - 'host': self.hostname - } + self.base_data = {"session": self.session, "host": self.hostname} if self.ls_pre_command is not None: - self.base_data['ansible_pre_command_output'] = os.popen( - self.ls_pre_command).read() + self.base_data["ansible_pre_command_output"] = os.popen(self.ls_pre_command).read() if context.CLIARGS is not None: - self.base_data['ansible_checkmode'] = context.CLIARGS.get('check') - self.base_data['ansible_tags'] = context.CLIARGS.get('tags') - self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags') - self.base_data['inventory'] = context.CLIARGS.get('inventory') + self.base_data["ansible_checkmode"] = context.CLIARGS.get("check") + self.base_data["ansible_tags"] = context.CLIARGS.get("tags") + self.base_data["ansible_skip_tags"] = context.CLIARGS.get("skip_tags") + self.base_data["inventory"] = context.CLIARGS.get("inventory") def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.ls_server = self.get_option('server') - self.ls_port = int(self.get_option('port')) - self.ls_type = self.get_option('type') - self.ls_pre_command = self.get_option('pre_command') - self.ls_format_version = self.get_option('format_version') + self.ls_server = self.get_option("server") + self.ls_port = int(self.get_option("port")) + self.ls_type = self.get_option("type") + self.ls_pre_command = self.get_option("pre_command") + self.ls_format_version = self.get_option("format_version") self._init_plugin() def v2_playbook_on_start(self, playbook): data = self.base_data.copy() - data['ansible_type'] = "start" - data['status'] = "OK" - data['ansible_playbook'] = playbook._file_name + data["ansible_type"] = "start" + data["status"] = "OK" + data["ansible_playbook"] = playbook._file_name if self.ls_format_version == "v2": - self.logger.info( - "START PLAYBOOK | %s", data['ansible_playbook'], extra=data - ) + self.logger.info("START PLAYBOOK | %s", data["ansible_playbook"], extra=data) else: self.logger.info("ansible start", extra=data) @@ -199,15 +190,13 @@ def v2_playbook_on_stats(self, stats): status = "FAILED" data = self.base_data.copy() - data['ansible_type'] = "finish" - data['status'] = status - data['ansible_playbook_duration'] = runtime.total_seconds() - data['ansible_result'] = json.dumps(summarize_stat) # deprecated field + data["ansible_type"] = "finish" + data["status"] = status + data["ansible_playbook_duration"] = runtime.total_seconds() + data["ansible_result"] = json.dumps(summarize_stat) # deprecated field if self.ls_format_version == "v2": - self.logger.info( - "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data - ) + self.logger.info("FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data) else: self.logger.info("ansible stats", extra=data) @@ -218,10 +207,10 @@ def v2_playbook_on_play_start(self, play): self.play_name = play.name data = self.base_data.copy() - data['ansible_type'] = "start" - data['status'] = "OK" - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name + data["ansible_type"] = "start" + data["status"] = "OK" + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name if self.ls_format_version == "v2": self.logger.info("START PLAY | %s", self.play_name, extra=data) @@ -231,64 +220,61 @@ def v2_playbook_on_play_start(self, play): def v2_playbook_on_task_start(self, task, is_conditional): self.task_id = str(task._uuid) - ''' + """ Tasks and handler tasks are dealt with here - ''' + """ def v2_runner_on_ok(self, result, **kwargs): - task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '') + task_name = str(result._task).replace("TASK: ", "").replace("HANDLER: ", "") data = self.base_data.copy() - if task_name == 'setup': - data['ansible_type'] = "setup" - data['status'] = "OK" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_facts'] = self._dump_results(result._result) + if task_name == "setup": + data["ansible_type"] = "setup" + data["status"] = "OK" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_facts"] = self._dump_results(result._result) if self.ls_format_version == "v2": - self.logger.info( - "SETUP FACTS | %s", self._dump_results(result._result), extra=data - ) + self.logger.info("SETUP FACTS | %s", self._dump_results(result._result), extra=data) else: self.logger.info("ansible facts", extra=data) else: - if 'changed' in result._result.keys(): - data['ansible_changed'] = result._result['changed'] + if "changed" in result._result.keys(): + data["ansible_changed"] = result._result["changed"] else: - data['ansible_changed'] = False + data["ansible_changed"] = False - data['ansible_type'] = "task" - data['status'] = "OK" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_task_id'] = self.task_id - data['ansible_result'] = self._dump_results(result._result) + data["ansible_type"] = "task" + data["status"] = "OK" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_task_id"] = self.task_id + data["ansible_result"] = self._dump_results(result._result) if self.ls_format_version == "v2": self.logger.info( - "TASK OK | %s | RESULT | %s", - task_name, self._dump_results(result._result), extra=data + "TASK OK | %s | RESULT | %s", task_name, self._dump_results(result._result), extra=data ) else: self.logger.info("ansible ok", extra=data) def v2_runner_on_skipped(self, result, **kwargs): - task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '') + task_name = str(result._task).replace("TASK: ", "").replace("HANDLER: ", "") data = self.base_data.copy() - data['ansible_type'] = "task" - data['status'] = "SKIPPED" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_task_id'] = self.task_id - data['ansible_result'] = self._dump_results(result._result) + data["ansible_type"] = "task" + data["status"] = "SKIPPED" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_task_id"] = self.task_id + data["ansible_result"] = self._dump_results(result._result) if self.ls_format_version == "v2": self.logger.info("TASK SKIPPED | %s", task_name, extra=data) @@ -297,12 +283,12 @@ def v2_runner_on_skipped(self, result, **kwargs): def v2_playbook_on_import_for_host(self, result, imported_file): data = self.base_data.copy() - data['ansible_type'] = "import" - data['status'] = "IMPORTED" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['imported_file'] = imported_file + data["ansible_type"] = "import" + data["status"] = "IMPORTED" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["imported_file"] = imported_file if self.ls_format_version == "v2": self.logger.info("IMPORT | %s", imported_file, extra=data) @@ -311,12 +297,12 @@ def v2_playbook_on_import_for_host(self, result, imported_file): def v2_playbook_on_not_import_for_host(self, result, missing_file): data = self.base_data.copy() - data['ansible_type'] = "import" - data['status'] = "NOT IMPORTED" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['imported_file'] = missing_file + data["ansible_type"] = "import" + data["status"] = "NOT IMPORTED" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["imported_file"] = missing_file if self.ls_format_version == "v2": self.logger.info("NOT IMPORTED | %s", missing_file, extra=data) @@ -324,75 +310,81 @@ def v2_playbook_on_not_import_for_host(self, result, missing_file): self.logger.info("ansible import", extra=data) def v2_runner_on_failed(self, result, **kwargs): - task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '') + task_name = str(result._task).replace("TASK: ", "").replace("HANDLER: ", "") data = self.base_data.copy() - if 'changed' in result._result.keys(): - data['ansible_changed'] = result._result['changed'] + if "changed" in result._result.keys(): + data["ansible_changed"] = result._result["changed"] else: - data['ansible_changed'] = False + data["ansible_changed"] = False - data['ansible_type'] = "task" - data['status'] = "FAILED" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_task_id'] = self.task_id - data['ansible_result'] = self._dump_results(result._result) + data["ansible_type"] = "task" + data["status"] = "FAILED" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_task_id"] = self.task_id + data["ansible_result"] = self._dump_results(result._result) self.errors += 1 if self.ls_format_version == "v2": self.logger.error( "TASK FAILED | %s | HOST | %s | RESULT | %s", - task_name, self.hostname, - self._dump_results(result._result), extra=data + task_name, + self.hostname, + self._dump_results(result._result), + extra=data, ) else: self.logger.error("ansible failed", extra=data) def v2_runner_on_unreachable(self, result, **kwargs): - task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '') + task_name = str(result._task).replace("TASK: ", "").replace("HANDLER: ", "") data = self.base_data.copy() - data['ansible_type'] = "task" - data['status'] = "UNREACHABLE" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_task_id'] = self.task_id - data['ansible_result'] = self._dump_results(result._result) + data["ansible_type"] = "task" + data["status"] = "UNREACHABLE" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_task_id"] = self.task_id + data["ansible_result"] = self._dump_results(result._result) self.errors += 1 if self.ls_format_version == "v2": self.logger.error( "UNREACHABLE | %s | HOST | %s | RESULT | %s", - task_name, self.hostname, - self._dump_results(result._result), extra=data + task_name, + self.hostname, + self._dump_results(result._result), + extra=data, ) else: self.logger.error("ansible unreachable", extra=data) def v2_runner_on_async_failed(self, result, **kwargs): - task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '') + task_name = str(result._task).replace("TASK: ", "").replace("HANDLER: ", "") data = self.base_data.copy() - data['ansible_type'] = "task" - data['status'] = "FAILED" - data['ansible_host'] = result._host.name - data['ansible_play_id'] = self.play_id - data['ansible_play_name'] = self.play_name - data['ansible_task'] = task_name - data['ansible_task_id'] = self.task_id - data['ansible_result'] = self._dump_results(result._result) + data["ansible_type"] = "task" + data["status"] = "FAILED" + data["ansible_host"] = result._host.name + data["ansible_play_id"] = self.play_id + data["ansible_play_name"] = self.play_name + data["ansible_task"] = task_name + data["ansible_task_id"] = self.task_id + data["ansible_result"] = self._dump_results(result._result) self.errors += 1 if self.ls_format_version == "v2": self.logger.error( "ASYNC FAILED | %s | HOST | %s | RESULT | %s", - task_name, self.hostname, - self._dump_results(result._result), extra=data + task_name, + self.hostname, + self._dump_results(result._result), + extra=data, ) else: self.logger.error("ansible async", extra=data) diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 40a0f093322..400aade4b7f 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -1,4 +1,3 @@ - # Copyright (c) 2012, Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -92,33 +91,33 @@ class CallbackModule(CallbackBase): - ''' This Ansible callback plugin mails errors to interested parties. ''' + """This Ansible callback plugin mails errors to interested parties.""" + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.mail' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.mail" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): super().__init__(display=display) self.sender = None - self.to = 'root' - self.smtphost = os.getenv('SMTPHOST', 'localhost') + self.to = "root" + self.smtphost = os.getenv("SMTPHOST", "localhost") self.smtpport = 25 self.cc = None self.bcc = None def set_options(self, task_keys=None, var_options=None, direct=None): - super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.sender = self.get_option('sender') - self.to = self.get_option('to') - self.smtphost = self.get_option('mta') - self.smtpport = self.get_option('mtaport') - self.cc = self.get_option('cc') - self.bcc = self.get_option('bcc') + self.sender = self.get_option("sender") + self.to = self.get_option("to") + self.smtphost = self.get_option("mta") + self.smtpport = self.get_option("mtaport") + self.cc = self.get_option("cc") + self.bcc = self.get_option("bcc") - def mail(self, subject='Ansible error mail', body=None): + def mail(self, subject="Ansible error mail", body=None): if body is None: body = subject @@ -132,14 +131,14 @@ def mail(self, subject='Ansible error mail', body=None): if self.bcc: bcc_addresses = email.utils.getaddresses(self.bcc) - content = f'Date: {email.utils.formatdate()}\n' - content += f'From: {email.utils.formataddr(sender_address)}\n' + content = f"Date: {email.utils.formatdate()}\n" + content += f"From: {email.utils.formataddr(sender_address)}\n" if self.to: content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n" if self.cc: content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n" content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n" - content += f'Subject: {subject.strip()}\n\n' + content += f"Subject: {subject.strip()}\n\n" content += body addresses = to_addresses @@ -149,23 +148,23 @@ def mail(self, subject='Ansible error mail', body=None): addresses += bcc_addresses if not addresses: - self._display.warning('No receiver has been specified for the mail callback plugin.') + self._display.warning("No receiver has been specified for the mail callback plugin.") smtp.sendmail(self.sender, [address for name, address in addresses], to_bytes(content)) smtp.quit() def subject_msg(self, multiline, failtype, linenr): - msg = multiline.strip('\r\n').splitlines()[linenr] - return f'{failtype}: {msg}' + msg = multiline.strip("\r\n").splitlines()[linenr] + return f"{failtype}: {msg}" def indent(self, multiline, indent=8): - return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) + return re.sub("^", " " * indent, multiline, flags=re.MULTILINE) def body_blob(self, multiline, texttype): - ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = f'with the following {texttype}:\n\n' - blob = "\n".join(multiline.strip('\r\n').splitlines()) + """Turn some text output in a well-indented block for sending in a mail body""" + intro = f"with the following {texttype}:\n\n" + blob = "\n".join(multiline.strip("\r\n").splitlines()) return f"{intro}{self.indent(blob)}\n" def mail_result(self, result, failtype): @@ -176,83 +175,87 @@ def mail_result(self, result, failtype): # Add subject if self.itembody: subject = self.itemsubject - elif result._result.get('failed_when_result') is True: + elif result._result.get("failed_when_result") is True: subject = "Failed due to 'failed_when' condition" - elif result._result.get('msg'): - subject = self.subject_msg(result._result['msg'], failtype, 0) - elif result._result.get('stderr'): - subject = self.subject_msg(result._result['stderr'], failtype, -1) - elif result._result.get('stdout'): - subject = self.subject_msg(result._result['stdout'], failtype, -1) - elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ - subject = self.subject_msg(result._result['exception'], failtype, -1) + elif result._result.get("msg"): + subject = self.subject_msg(result._result["msg"], failtype, 0) + elif result._result.get("stderr"): + subject = self.subject_msg(result._result["stderr"], failtype, -1) + elif result._result.get("stdout"): + subject = self.subject_msg(result._result["stdout"], failtype, -1) + elif result._result.get("exception"): # Unrelated exceptions are added to output :-/ + subject = self.subject_msg(result._result["exception"], failtype, -1) else: - subject = f'{failtype}: {result._task.name or result._task.action}' + subject = f"{failtype}: {result._task.name or result._task.action}" # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n' + body = f"Playbook: {os.path.basename(self.playbook._file_name)}\n" if result._task.name: - body += f'Task: {result._task.name}\n' - body += f'Module: {result._task.action}\n' - body += f'Host: {host}\n' - body += '\n' + body += f"Task: {result._task.name}\n" + body += f"Module: {result._task.action}\n" + body += f"Host: {host}\n" + body += "\n" # Add task information (as much as possible) - body += 'The following task failed:\n\n' - if 'invocation' in result._result: - body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n") + body += "The following task failed:\n\n" + if "invocation" in result._result: + body += self.indent( + f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n" + ) elif result._task.name: - body += self.indent(f'{result._task.name} ({result._task.action})\n') + body += self.indent(f"{result._task.name} ({result._task.action})\n") else: - body += self.indent(f'{result._task.action}\n') - body += '\n' + body += self.indent(f"{result._task.action}\n") + body += "\n" # Add item / message if self.itembody: body += self.itembody - elif result._result.get('failed_when_result') is True: - fail_cond_list = '\n- '.join(result._task.failed_when) + elif result._result.get("failed_when_result") is True: + fail_cond_list = "\n- ".join(result._task.failed_when) fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}") body += f"due to the following condition:\n\n{fail_cond}\n\n" - elif result._result.get('msg'): - body += self.body_blob(result._result['msg'], 'message') + elif result._result.get("msg"): + body += self.body_blob(result._result["msg"], "message") # Add stdout / stderr / exception / warnings / deprecations - if result._result.get('stdout'): - body += self.body_blob(result._result['stdout'], 'standard output') - if result._result.get('stderr'): - body += self.body_blob(result._result['stderr'], 'error output') - if result._result.get('exception'): # Unrelated exceptions are added to output :-/ - body += self.body_blob(result._result['exception'], 'exception') - if result._result.get('warnings'): - for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}') - if result._result.get('deprecations'): - for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}') - - body += 'and a complete dump of the error:\n\n' - body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}') + if result._result.get("stdout"): + body += self.body_blob(result._result["stdout"], "standard output") + if result._result.get("stderr"): + body += self.body_blob(result._result["stderr"], "error output") + if result._result.get("exception"): # Unrelated exceptions are added to output :-/ + body += self.body_blob(result._result["exception"], "exception") + if result._result.get("warnings"): + for i in range(len(result._result.get("warnings"))): + body += self.body_blob(result._result["warnings"][i], f"exception {i + 1}") + if result._result.get("deprecations"): + for i in range(len(result._result.get("deprecations"))): + body += self.body_blob(result._result["deprecations"][i], f"exception {i + 1}") + + body += "and a complete dump of the error:\n\n" + body += self.indent(f"{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}") self.mail(subject=subject, body=body) def v2_playbook_on_start(self, playbook): self.playbook = playbook - self.itembody = '' + self.itembody = "" def v2_runner_on_failed(self, result, ignore_errors=False): if ignore_errors: return - self.mail_result(result, 'Failed') + self.mail_result(result, "Failed") def v2_runner_on_unreachable(self, result): - self.mail_result(result, 'Unreachable') + self.mail_result(result, "Unreachable") def v2_runner_on_async_failed(self, result): - self.mail_result(result, 'Async failure') + self.mail_result(result, "Async failure") def v2_runner_item_on_failed(self, result): # Pass item information to task failure - self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'") + self.itemsubject = result._result["msg"] + self.itembody += self.body_blob( + json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'" + ) diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 9f49ed6bcd3..e0fba85d376 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -73,13 +73,13 @@ class CallbackModule(CallbackBase): - ''' + """ send ansible-playbook to Nagios server using nrdp protocol - ''' + """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.nrdp' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.nrdp" CALLBACK_NEEDS_WHITELIST = True # Nagios states @@ -98,25 +98,26 @@ def __init__(self): def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.url = self.get_option('url') - if not self.url.endswith('/'): - self.url += '/' - self.token = self.get_option('token') - self.hostname = self.get_option('hostname') - self.servicename = self.get_option('servicename') - self.validate_nrdp_certs = self.get_option('validate_certs') - - if (self.url or self.token or self.hostname or - self.servicename) is None: - self._display.warning("NRDP callback wants the NRDP_URL," - " NRDP_TOKEN, NRDP_HOSTNAME," - " NRDP_SERVICENAME" - " environment variables'." - " The NRDP callback plugin is disabled.") + self.url = self.get_option("url") + if not self.url.endswith("/"): + self.url += "/" + self.token = self.get_option("token") + self.hostname = self.get_option("hostname") + self.servicename = self.get_option("servicename") + self.validate_nrdp_certs = self.get_option("validate_certs") + + if (self.url or self.token or self.hostname or self.servicename) is None: + self._display.warning( + "NRDP callback wants the NRDP_URL," + " NRDP_TOKEN, NRDP_HOSTNAME," + " NRDP_SERVICENAME" + " environment variables'." + " The NRDP callback plugin is disabled." + ) self.disabled = True def _send_nrdp(self, state, msg): - ''' + """ nrpd service check send XMLDATA like this: @@ -127,7 +128,7 @@ def _send_nrdp(self, state, msg): WARNING: Danger Will Robinson!|perfdata - ''' + """ xmldata = "\n" xmldata += "\n" xmldata += "\n" @@ -138,31 +139,24 @@ def _send_nrdp(self, state, msg): xmldata += "\n" xmldata += "\n" - body = { - 'cmd': 'submitcheck', - 'token': self.token, - 'XMLDATA': to_bytes(xmldata) - } + body = {"cmd": "submitcheck", "token": self.token, "XMLDATA": to_bytes(xmldata)} try: - response = open_url(self.url, - data=urlencode(body), - method='POST', - validate_certs=self.validate_nrdp_certs) + response = open_url(self.url, data=urlencode(body), method="POST", validate_certs=self.validate_nrdp_certs) return response.read() except Exception as ex: self._display.warning(f"NRDP callback cannot send result {ex}") def v2_playbook_on_play_start(self, play): - ''' + """ Display Playbook and play start messages - ''' + """ self.play = play def v2_playbook_on_stats(self, stats): - ''' + """ Display info about playbook statistics - ''' + """ name = self.play gstats = "" hosts = sorted(stats.processed.keys()) @@ -170,13 +164,14 @@ def v2_playbook_on_stats(self, stats): for host in hosts: stat = stats.summarize(host) gstats += ( - f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " + f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']}" + f" '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " ) # Critical when failed tasks or unreachable host - critical += stat['failures'] - critical += stat['unreachable'] + critical += stat["failures"] + critical += stat["unreachable"] # Warning when changed tasks - warning += stat['changed'] + warning += stat["changed"] msg = f"{name} | {gstats}" if critical: diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 3074a698d02..16b74ae23da 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -20,11 +20,10 @@ class CallbackModule(CallbackBase): - - ''' + """ This callback won't print messages to stdout when new callback events are received. - ''' + """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.null' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.null" diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 8cb37c80c67..ab5d6cd8a3a 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -155,13 +155,8 @@ from opentelemetry.trace.status import Status, StatusCode from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor, - SimpleSpanProcessor - ) - from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( - InMemorySpanExporter - ) + from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor + from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter except ImportError as imp_exc: OTEL_LIBRARY_IMPORT_ERROR = imp_exc else: @@ -186,9 +181,9 @@ def __init__(self, uuid, name, path, play, action, args): def add_host(self, host): if host.uuid in self.host_data: - if host.status == 'included': + if host.status == "included": # concatenate task include output from multiple items - host.result = f'{self.host_data[host.uuid].result}\n{host.result}' + host.result = f"{self.host_data[host.uuid].result}\n{host.result}" else: return @@ -223,11 +218,11 @@ def __init__(self, display): def traceparent_context(self, traceparent): carrier = dict() - carrier['traceparent'] = traceparent + carrier["traceparent"] = traceparent return TraceContextTextMapPropagator().extract(carrier=carrier) def start_task(self, tasks_data, hide_task_arguments, play_name, task): - """ record the start of a task for one or more hosts """ + """record the start of a task for one or more hosts""" uuid = task._uuid @@ -245,33 +240,35 @@ def start_task(self, tasks_data, hide_task_arguments, play_name, task): tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) def finish_task(self, tasks_data, status, result, dump): - """ record the results of a task for a single host """ + """record the results of a task for a single host""" task_uuid = result._task._uuid - if hasattr(result, '_host') and result._host is not None: + if hasattr(result, "_host") and result._host is not None: host_uuid = result._host._uuid host_name = result._host.name else: - host_uuid = 'include' - host_name = 'include' + host_uuid = "include" + host_name = "include" task = tasks_data[task_uuid] task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, - otel_service_name, - ansible_playbook, - tasks_data, - status, - traceparent, - disable_logs, - disable_attributes_in_logs, - otel_exporter_otlp_traces_protocol, - store_spans_in_file): - """ generate distributed traces from the collected TaskData and HostData """ + def generate_distributed_traces( + self, + otel_service_name, + ansible_playbook, + tasks_data, + status, + traceparent, + disable_logs, + disable_attributes_in_logs, + otel_exporter_otlp_traces_protocol, + store_spans_in_file, + ): + """generate distributed traces from the collected TaskData and HostData""" tasks = [] parent_start_time = None @@ -280,18 +277,14 @@ def generate_distributed_traces(self, parent_start_time = task.start tasks.append(task) - trace.set_tracer_provider( - TracerProvider( - resource=Resource.create({SERVICE_NAME: otel_service_name}) - ) - ) + trace.set_tracer_provider(TracerProvider(resource=Resource.create({SERVICE_NAME: otel_service_name}))) otel_exporter = None if store_spans_in_file: otel_exporter = InMemorySpanExporter() processor = SimpleSpanProcessor(otel_exporter) else: - if otel_exporter_otlp_traces_protocol == 'grpc': + if otel_exporter_otlp_traces_protocol == "grpc": otel_exporter = GRPCOTLPSpanExporter() else: otel_exporter = HTTPOTLPSpanExporter() @@ -301,8 +294,12 @@ def generate_distributed_traces(self, tracer = trace.get_tracer(__name__) - with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent), - start_time=parent_start_time, kind=SpanKind.SERVER) as parent: + with tracer.start_as_current_span( + ansible_playbook, + context=self.traceparent_context(traceparent), + start_time=parent_start_time, + kind=SpanKind.SERVER, + ) as parent: parent.set_status(status) # Populate trace metadata attributes parent.set_attribute("ansible.version", ansible_version) @@ -319,36 +316,38 @@ def generate_distributed_traces(self, return otel_exporter def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): - """ update the span with the given TaskData and HostData """ + """update the span with the given TaskData and HostData""" - name = f'[{host_data.name}] {task_data.play}: {task_data.name}' + name = f"[{host_data.name}] {task_data.play}: {task_data.name}" - message = 'success' + message = "success" res = {} rc = 0 status = Status(status_code=StatusCode.OK) - if host_data.status != 'included': + if host_data.status != "included": # Support loops enriched_error_message = None - if 'results' in host_data.result._result: - if host_data.status == 'failed': - message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) - enriched_error_message = self.enrich_error_message_from_results(host_data.result._result['results'], task_data.action) + if "results" in host_data.result._result: + if host_data.status == "failed": + message = self.get_error_message_from_results(host_data.result._result["results"], task_data.action) + enriched_error_message = self.enrich_error_message_from_results( + host_data.result._result["results"], task_data.action + ) else: res = host_data.result._result - rc = res.get('rc', 0) - if host_data.status == 'failed': + rc = res.get("rc", 0) + if host_data.status == "failed": message = self.get_error_message(res) enriched_error_message = self.enrich_error_message(res) - if host_data.status == 'failed': + if host_data.status == "failed": status = Status(status_code=StatusCode.ERROR, description=message) # Record an exception with the task message span.record_exception(BaseException(enriched_error_message)) - elif host_data.status == 'skipped': - message = res['skip_reason'] if 'skip_reason' in res else 'skipped' + elif host_data.status == "skipped": + message = res["skip_reason"] if "skip_reason" in res else "skipped" status = Status(status_code=StatusCode.UNSET) - elif host_data.status == 'ignored': + elif host_data.status == "ignored": status = Status(status_code=StatusCode.UNSET) span.set_status(status) @@ -360,7 +359,7 @@ def update_span_data(self, task_data, host_data, span, disable_logs, disable_att "ansible.task.name": name, "ansible.task.result": rc, "ansible.task.host.name": host_data.name, - "ansible.task.host.status": host_data.status + "ansible.task.host.status": host_data.status, } if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action: names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys()) @@ -380,10 +379,10 @@ def update_span_data(self, task_data, host_data, span, disable_logs, disable_att span.end(end_time=host_data.finish) def set_span_attributes(self, span, attributes): - """ update the span attributes with the given attributes if not None """ + """update the span attributes with the given attributes if not None""" if span is None and self._display is not None: - self._display.warning('span object is None. Please double check if that is expected.') + self._display.warning("span object is None. Please double check if that is expected.") else: if attributes is not None: span.set_attributes(attributes) @@ -411,7 +410,18 @@ def parse_and_redact_url_if_possible(args): @staticmethod def url_from_args(args): # the order matters - url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url", "registry_url", "endpoint", "uri", "updates_url") + url_args = ( + "url", + "api_url", + "baseurl", + "repo", + "server_url", + "chart_repo_url", + "registry_url", + "endpoint", + "uri", + "updates_url", + ) for arg in url_args: if args is not None and args.get(arg): return args.get(arg) @@ -436,33 +446,33 @@ def transform_ansible_unicode_to_str(value): @staticmethod def get_error_message(result): - if result.get('exception') is not None: - return OpenTelemetrySource._last_line(result['exception']) - return result.get('msg', 'failed') + if result.get("exception") is not None: + return OpenTelemetrySource._last_line(result["exception"]) + return result.get("msg", "failed") @staticmethod def get_error_message_from_results(results, action): for result in results: - if result.get('failed', False): + if result.get("failed", False): return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}" @staticmethod def _last_line(text): - lines = text.strip().split('\n') + lines = text.strip().split("\n") return lines[-1] @staticmethod def enrich_error_message(result): - message = result.get('msg', 'failed') - exception = result.get('exception') - stderr = result.get('stderr') - return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" + message = result.get("msg", "failed") + exception = result.get("exception") + stderr = result.get("stderr") + return f'message: "{message}"\nexception: "{exception}"\nstderr: "{stderr}"' @staticmethod def enrich_error_message_from_results(results, action): message = "" for result in results: - if result.get('failed', False): + if result.get("failed", False): message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}" return message @@ -473,8 +483,8 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.opentelemetry' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.opentelemetry" CALLBACK_NEEDS_ENABLED = True def __init__(self, display=None): @@ -494,7 +504,7 @@ def __init__(self, display=None): if OTEL_LIBRARY_IMPORT_ERROR: raise AnsibleError( - 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin' + "The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin" ) from OTEL_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() @@ -504,33 +514,33 @@ def __init__(self, display=None): def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - environment_variable = self.get_option('enable_from_environment') - if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true': + environment_variable = self.get_option("enable_from_environment") + if environment_variable is not None and os.environ.get(environment_variable, "false").lower() != "true": self.disabled = True self._display.warning( f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin." ) - self.hide_task_arguments = self.get_option('hide_task_arguments') + self.hide_task_arguments = self.get_option("hide_task_arguments") - self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs') + self.disable_attributes_in_logs = self.get_option("disable_attributes_in_logs") - self.disable_logs = self.get_option('disable_logs') + self.disable_logs = self.get_option("disable_logs") - self.store_spans_in_file = self.get_option('store_spans_in_file') + self.store_spans_in_file = self.get_option("store_spans_in_file") - self.otel_service_name = self.get_option('otel_service_name') + self.otel_service_name = self.get_option("otel_service_name") if not self.otel_service_name: - self.otel_service_name = 'ansible' + self.otel_service_name = "ansible" # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 - self.traceparent = self.get_option('traceparent') + self.traceparent = self.get_option("traceparent") - self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + self.otel_exporter_otlp_traces_protocol = self.get_option("otel_exporter_otlp_traces_protocol") def dump_results(self, task, result): - """ dump the results if disable_logs is not enabled """ + """dump the results if disable_logs is not enabled""" if self.disable_logs: return "" # ansible.builtin.uri contains the response in the json field @@ -550,74 +560,40 @@ def v2_playbook_on_play_start(self, play): self.play_name = play.get_name() def v2_runner_on_no_hosts(self, task): - self.opentelemetry.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.opentelemetry.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_task_start(self, task, is_conditional): - self.opentelemetry.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.opentelemetry.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_cleanup_task_start(self, task): - self.opentelemetry.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.opentelemetry.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_playbook_on_handler_task_start(self, task): - self.opentelemetry.start_task( - self.tasks_data, - self.hide_task_arguments, - self.play_name, - task - ) + self.opentelemetry.start_task(self.tasks_data, self.hide_task_arguments, self.play_name, task) def v2_runner_on_failed(self, result, ignore_errors=False): if ignore_errors: - status = 'ignored' + status = "ignored" else: - status = 'failed' + status = "failed" self.errors += 1 self.opentelemetry.finish_task( - self.tasks_data, - status, - result, - self.dump_results(self.tasks_data[result._task._uuid], result) + self.tasks_data, status, result, self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): self.opentelemetry.finish_task( - self.tasks_data, - 'ok', - result, - self.dump_results(self.tasks_data[result._task._uuid], result) + self.tasks_data, "ok", result, self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): self.opentelemetry.finish_task( - self.tasks_data, - 'skipped', - result, - self.dump_results(self.tasks_data[result._task._uuid], result) + self.tasks_data, "skipped", result, self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): - self.opentelemetry.finish_task( - self.tasks_data, - 'included', - included_file, - "" - ) + self.opentelemetry.finish_task(self.tasks_data, "included", included_file, "") def v2_playbook_on_stats(self, stats): if self.errors == 0: @@ -633,7 +609,7 @@ def v2_playbook_on_stats(self, stats): self.disable_logs, self.disable_attributes_in_logs, self.otel_exporter_otlp_traces_protocol, - self.store_spans_in_file + self.store_spans_in_file, ) if self.store_spans_in_file: diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py index c73cdb8cec5..6caeee2556b 100644 --- a/plugins/callback/print_task.py +++ b/plugins/callback/print_task.py @@ -37,9 +37,10 @@ class CallbackModule(CallbackBase): """ This callback module tells you how long your plays ran for. """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'community.general.print_task' + CALLBACK_TYPE = "aggregate" + CALLBACK_NAME = "community.general.print_task" CALLBACK_NEEDS_ENABLED = True @@ -48,7 +49,7 @@ def __init__(self): self._printed_message = False def _print_task(self, task): - if hasattr(task, '_ds'): + if hasattr(task, "_ds"): task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader) task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper) self._display.display(f"\n{task_yaml}\n") diff --git a/plugins/callback/say.py b/plugins/callback/say.py index d27b4592edc..28748c32690 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -30,13 +30,13 @@ class CallbackModule(CallbackBase): """ makes Ansible much more exciting. """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.say' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.say" CALLBACK_NEEDS_WHITELIST = True def __init__(self): - super().__init__() self.FAILED_VOICE = None @@ -45,21 +45,23 @@ def __init__(self): self.LASER_VOICE = None try: - self.synthesizer = get_bin_path('say') - if platform.system() != 'Darwin': + self.synthesizer = get_bin_path("say") + if platform.system() != "Darwin": # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter") + self._display.warning( + f"'say' executable found but system is '{platform.system()}': ignoring voice parameter" + ) else: - self.FAILED_VOICE = 'Zarvox' - self.REGULAR_VOICE = 'Trinoids' - self.HAPPY_VOICE = 'Cellos' - self.LASER_VOICE = 'Princess' + self.FAILED_VOICE = "Zarvox" + self.REGULAR_VOICE = "Trinoids" + self.HAPPY_VOICE = "Cellos" + self.LASER_VOICE = "Princess" except ValueError: try: - self.synthesizer = get_bin_path('espeak') - self.FAILED_VOICE = 'klatt' - self.HAPPY_VOICE = 'f5' - self.LASER_VOICE = 'whisper' + self.synthesizer = get_bin_path("espeak") + self.FAILED_VOICE = "klatt" + self.HAPPY_VOICE = "f5" + self.LASER_VOICE = "whisper" except ValueError: self.synthesizer = None @@ -67,12 +69,14 @@ def __init__(self): # ansible will not call any callback if disabled is set to True if not self.synthesizer: self.disabled = True - self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled") + self._display.warning( + f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled" + ) def say(self, msg, voice): cmd = [self.synthesizer, msg] if voice: - cmd.extend(('-v', voice)) + cmd.extend(("-v", voice)) subprocess.call(cmd) def runner_on_failed(self, host, res, ignore_errors=False): diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 207b814bc32..863f1318428 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -45,14 +45,14 @@ DONT_COLORIZE = False COLORS = { - 'normal': '\033[0m', - 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m', # type: ignore - 'bold': '\033[1m', - 'not_so_bold': '\033[1m\033[34m', - 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m', # type: ignore - 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m', # type: ignore - 'endc': '\033[0m', - 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m', # type: ignore + "normal": "\033[0m", + "ok": f"\x1b[{C.COLOR_CODES[C.COLOR_OK]}m", # type: ignore + "bold": "\033[1m", + "not_so_bold": "\033[1m\033[34m", + "changed": f"\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m", # type: ignore + "failed": f"\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m", # type: ignore + "endc": "\033[0m", + "skipped": f"\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m", # type: ignore } @@ -78,8 +78,8 @@ class CallbackModule(CallbackBase): """selective.py callback plugin.""" CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.selective' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.selective" def __init__(self, display=None): """selective.py callback plugin.""" @@ -89,11 +89,10 @@ def __init__(self, display=None): self.printed_last_task = False def set_options(self, task_keys=None, var_options=None, direct=None): - super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) global DONT_COLORIZE - DONT_COLORIZE = self.get_option('nocolor') + DONT_COLORIZE = self.get_option("nocolor") def _print_task(self, task_name=None): if task_name is None: @@ -105,7 +104,7 @@ def _print_task(self, task_name=None): if self.last_skipped: print() line = f"# {task_name} " - msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold') + msg = colorize(f"{line}{'*' * (line_length - len(line))}", "bold") print(msg) def _indent_text(self, text, indent_level): @@ -113,48 +112,51 @@ def _indent_text(self, text, indent_level): result_lines = [] for l in lines: result_lines.append(f"{' ' * indent_level}{l}") - return '\n'.join(result_lines) + return "\n".join(result_lines) def _print_diff(self, diff, indent_level): if isinstance(diff, dict): try: - diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(), - diff['after'].splitlines(), - fromfile=diff.get('before_header', - 'new_file'), - tofile=diff['after_header'])) + diff = "\n".join( + difflib.unified_diff( + diff["before"].splitlines(), + diff["after"].splitlines(), + fromfile=diff.get("before_header", "new_file"), + tofile=diff["after_header"], + ) + ) except AttributeError: - diff = dict_diff(diff['before'], diff['after']) + diff = dict_diff(diff["before"], diff["after"]) if diff: - diff = colorize(str(diff), 'changed') + diff = colorize(str(diff), "changed") print(self._indent_text(diff, indent_level + 4)) def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr): if is_host: indent_level = 0 - name = colorize(host_or_item.name, 'not_so_bold') + name = colorize(host_or_item.name, "not_so_bold") else: indent_level = 4 if isinstance(host_or_item, dict): - if 'key' in host_or_item.keys(): - host_or_item = host_or_item['key'] - name = colorize(to_text(host_or_item), 'bold') + if "key" in host_or_item.keys(): + host_or_item = host_or_item["key"] + name = colorize(to_text(host_or_item), "bold") if error: - color = 'failed' - change_string = colorize('FAILED!!!', color) + color = "failed" + change_string = colorize("FAILED!!!", color) else: - color = 'changed' if changed else 'ok' + color = "changed" if changed else "ok" change_string = colorize(f"changed={changed}", color) msg = colorize(msg, color) line_length = 120 - spaces = ' ' * (40 - len(name) - indent_level) + spaces = " " * (40 - len(name) - indent_level) line = f"{' ' * indent_level} * {name}{spaces}- {change_string}" if len(msg) < 50: - line += f' -- {msg}' + line += f" -- {msg}" print(f"{line} {'-' * (line_length - len(line))}---------") else: print(f"{line} {'-' * (line_length - len(line))}") @@ -163,10 +165,10 @@ def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, if diff: self._print_diff(diff, indent_level) if stdout: - stdout = colorize(stdout, 'failed') + stdout = colorize(stdout, "failed") print(self._indent_text(stdout, indent_level + 4)) if stderr: - stderr = colorize(stderr, 'failed') + stderr = colorize(stderr, "failed") print(self._indent_text(stderr, indent_level + 4)) def v2_playbook_on_play_start(self, play): @@ -181,61 +183,61 @@ def v2_playbook_on_task_start(self, task, **kwargs): def _print_task_result(self, result, error=False, **kwargs): """Run when a task finishes correctly.""" - if 'print_action' in result._task.tags or error or self._display.verbosity > 1: + if "print_action" in result._task.tags or error or self._display.verbosity > 1: self._print_task() self.last_skipped = False - msg = to_text(result._result.get('msg', '')) or\ - to_text(result._result.get('reason', '')) + msg = to_text(result._result.get("msg", "")) or to_text(result._result.get("reason", "")) - stderr = [result._result.get('exception', None), - result._result.get('module_stderr', None)] + stderr = [result._result.get("exception", None), result._result.get("module_stderr", None)] stderr = "\n".join([e for e in stderr if e]).strip() - self._print_host_or_item(result._host, - result._result.get('changed', False), - msg, - result._result.get('diff', None), - is_host=True, - error=error, - stdout=result._result.get('module_stdout', None), - stderr=stderr.strip(), - ) - if 'results' in result._result: - for r in result._result['results']: - failed = 'failed' in r and r['failed'] - - stderr = [r.get('exception', None), r.get('module_stderr', None)] + self._print_host_or_item( + result._host, + result._result.get("changed", False), + msg, + result._result.get("diff", None), + is_host=True, + error=error, + stdout=result._result.get("module_stdout", None), + stderr=stderr.strip(), + ) + if "results" in result._result: + for r in result._result["results"]: + failed = "failed" in r and r["failed"] + + stderr = [r.get("exception", None), r.get("module_stderr", None)] stderr = "\n".join([e for e in stderr if e]).strip() - self._print_host_or_item(r[r['ansible_loop_var']], - r.get('changed', False), - to_text(r.get('msg', '')), - r.get('diff', None), - is_host=False, - error=failed, - stdout=r.get('module_stdout', None), - stderr=stderr.strip(), - ) + self._print_host_or_item( + r[r["ansible_loop_var"]], + r.get("changed", False), + to_text(r.get("msg", "")), + r.get("diff", None), + is_host=False, + error=failed, + stdout=r.get("module_stdout", None), + stderr=stderr.strip(), + ) else: self.last_skipped = True - print('.', end="") + print(".", end="") def v2_playbook_on_stats(self, stats): """Display info about playbook statistics.""" print() self.printed_last_task = False - self._print_task('STATS') + self._print_task("STATS") hosts = sorted(stats.processed.keys()) for host in hosts: s = stats.summarize(host) - if s['failures'] or s['unreachable']: - color = 'failed' - elif s['changed']: - color = 'changed' + if s["failures"] or s["unreachable"]: + color = "failed" + elif s["changed"]: + color = "changed" else: - color = 'ok' + color = "ok" msg = ( f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable=" @@ -250,14 +252,13 @@ def v2_runner_on_skipped(self, result, **kwargs): self.last_skipped = False line_length = 120 - spaces = ' ' * (31 - len(result._host.name) - 4) + spaces = " " * (31 - len(result._host.name) - 4) line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}" - reason = result._result.get('skipped_reason', '') or \ - result._result.get('skip_reason', '') + reason = result._result.get("skipped_reason", "") or result._result.get("skip_reason", "") if len(reason) < 50: - line += f' -- {reason}' + line += f" -- {reason}" print(f"{line} {'-' * (line_length - len(line))}---------") else: print(f"{line} {'-' * (line_length - len(line))}") diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 7570c981bdc..5c4ae9750be 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -70,6 +70,7 @@ try: import prettytable + HAS_PRETTYTABLE = True except ImportError: HAS_PRETTYTABLE = False @@ -79,20 +80,20 @@ class CallbackModule(CallbackBase): """This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. """ + CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.slack' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.slack" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): - super().__init__(display=display) if not HAS_PRETTYTABLE: self.disabled = True - self._display.warning('The `prettytable` python module is not ' - 'installed. Disabling the Slack callback ' - 'plugin.') + self._display.warning( + "The `prettytable` python module is not installed. Disabling the Slack callback plugin." + ) self.playbook_name = None @@ -102,34 +103,34 @@ def __init__(self, display=None): self.guid = uuid.uuid4().hex[:6] def set_options(self, task_keys=None, var_options=None, direct=None): - super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.webhook_url = self.get_option('webhook_url') - self.channel = self.get_option('channel') - self.username = self.get_option('username') - self.show_invocation = (self._display.verbosity > 1) - self.validate_certs = self.get_option('validate_certs') - self.http_agent = self.get_option('http_agent') + self.webhook_url = self.get_option("webhook_url") + self.channel = self.get_option("channel") + self.username = self.get_option("username") + self.show_invocation = self._display.verbosity > 1 + self.validate_certs = self.get_option("validate_certs") + self.http_agent = self.get_option("http_agent") if self.webhook_url is None: self.disabled = True - self._display.warning('Slack Webhook URL was not provided. The ' - 'Slack Webhook URL can be provided using ' - 'the `SLACK_WEBHOOK_URL` environment ' - 'variable.') + self._display.warning( + "Slack Webhook URL was not provided. The " + "Slack Webhook URL can be provided using " + "the `SLACK_WEBHOOK_URL` environment " + "variable." + ) def send_msg(self, attachments): headers = { - 'Content-type': 'application/json', + "Content-type": "application/json", } payload = { - 'channel': self.channel, - 'username': self.username, - 'attachments': attachments, - 'parse': 'none', - 'icon_url': ('https://cdn2.hubspot.net/hub/330046/' - 'file-449187601-png/ansible_badge.png'), + "channel": self.channel, + "username": self.username, + "attachments": attachments, + "parse": "none", + "icon_url": ("https://cdn2.hubspot.net/hub/330046/file-449187601-png/ansible_badge.png"), } data = json.dumps(payload) @@ -145,67 +146,63 @@ def send_msg(self, attachments): ) return response.read() except Exception as e: - self._display.warning(f'Could not submit message to Slack: {e}') + self._display.warning(f"Could not submit message to Slack: {e}") def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) - title = [ - f'*Playbook initiated* (_{self.guid}_)' - ] + title = [f"*Playbook initiated* (_{self.guid}_)"] invocation_items = [] if context.CLIARGS and self.show_invocation: - tags = context.CLIARGS['tags'] - skip_tags = context.CLIARGS['skip_tags'] - extra_vars = context.CLIARGS['extra_vars'] - subset = context.CLIARGS['subset'] - inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] + tags = context.CLIARGS["tags"] + skip_tags = context.CLIARGS["skip_tags"] + extra_vars = context.CLIARGS["extra_vars"] + subset = context.CLIARGS["subset"] + inventory = [os.path.abspath(i) for i in context.CLIARGS["inventory"]] invocation_items.append(f"Inventory: {', '.join(inventory)}") - if tags and tags != ['all']: + if tags and tags != ["all"]: invocation_items.append(f"Tags: {', '.join(tags)}") if skip_tags: invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}") if subset: - invocation_items.append(f'Limit: {subset}') + invocation_items.append(f"Limit: {subset}") if extra_vars: invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}") title.append(f"by *{context.CLIARGS['remote_user']}*") - title.append(f'\n\n*{self.playbook_name}*') - msg_items = [' '.join(title)] + title.append(f"\n\n*{self.playbook_name}*") + msg_items = [" ".join(title)] if invocation_items: - _inv_item = '\n'.join(invocation_items) - msg_items.append(f'```\n{_inv_item}\n```') - - msg = '\n'.join(msg_items) - - attachments = [{ - 'fallback': msg, - 'fields': [ - { - 'value': msg - } - ], - 'color': 'warning', - 'mrkdwn_in': ['text', 'fallback', 'fields'], - }] + _inv_item = "\n".join(invocation_items) + msg_items.append(f"```\n{_inv_item}\n```") + + msg = "\n".join(msg_items) + + attachments = [ + { + "fallback": msg, + "fields": [{"value": msg}], + "color": "warning", + "mrkdwn_in": ["text", "fallback", "fields"], + } + ] self.send_msg(attachments=attachments) def v2_playbook_on_play_start(self, play): """Display Play start messages""" - name = play.name or f'Play name not specified ({play._uuid})' - msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*' + name = play.name or f"Play name not specified ({play._uuid})" + msg = f"*Starting play* (_{self.guid}_)\n\n*{name}*" attachments = [ { - 'fallback': msg, - 'text': msg, - 'color': 'warning', - 'mrkdwn_in': ['text', 'fallback', 'fields'], + "fallback": msg, + "text": msg, + "color": "warning", + "mrkdwn_in": ["text", "fallback", "fields"], } ] self.send_msg(attachments=attachments) @@ -215,8 +212,7 @@ def v2_playbook_on_stats(self, stats): hosts = sorted(stats.processed.keys()) - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures', 'Rescued', 'Ignored']) + t = prettytable.PrettyTable(["Host", "Ok", "Changed", "Unreachable", "Failures", "Rescued", "Ignored"]) failures = False unreachable = False @@ -224,38 +220,28 @@ def v2_playbook_on_stats(self, stats): for h in hosts: s = stats.summarize(h) - if s['failures'] > 0: + if s["failures"] > 0: failures = True - if s['unreachable'] > 0: + if s["unreachable"] > 0: unreachable = True - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures', 'rescued', 'ignored']]) + t.add_row([h] + [s[k] for k in ["ok", "changed", "unreachable", "failures", "rescued", "ignored"]]) attachments = [] - msg_items = [ - f'*Playbook Complete* (_{self.guid}_)' - ] + msg_items = [f"*Playbook Complete* (_{self.guid}_)"] if failures or unreachable: - color = 'danger' - msg_items.append('\n*Failed!*') + color = "danger" + msg_items.append("\n*Failed!*") else: - color = 'good' - msg_items.append('\n*Success!*') - - msg_items.append(f'```\n{t}\n```') - - msg = '\n'.join(msg_items) - - attachments.append({ - 'fallback': msg, - 'fields': [ - { - 'value': msg - } - ], - 'color': color, - 'mrkdwn_in': ['text', 'fallback', 'fields'] - }) + color = "good" + msg_items.append("\n*Success!*") + + msg_items.append(f"```\n{t}\n```") + + msg = "\n".join(msg_items) + + attachments.append( + {"fallback": msg, "fields": [{"value": msg}], "color": color, "mrkdwn_in": ["text", "fallback", "fields"]} + ) self.send_msg(attachments=attachments) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index 2a3dcaef4bf..0cf23b70dcc 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -110,7 +110,7 @@ def __init__(self): self.user = getpass.getuser() def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime): - if result._task_fields['args'].get('_ansible_check_mode') is True: + if result._task_fields["args"].get("_ansible_check_mode") is True: self.ansible_check_mode = True if result._task._role: @@ -118,33 +118,33 @@ def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch else: ansible_role = None - if 'args' in result._task_fields: - del result._task_fields['args'] + if "args" in result._task_fields: + del result._task_fields["args"] data = {} - data['uuid'] = result._task._uuid - data['session'] = self.session + data["uuid"] = result._task._uuid + data["session"] = self.session if batch is not None: - data['batch'] = batch - data['status'] = state + data["batch"] = batch + data["status"] = state if include_milliseconds: - time_format = '%Y-%m-%d %H:%M:%S.%f +0000' + time_format = "%Y-%m-%d %H:%M:%S.%f +0000" else: - time_format = '%Y-%m-%d %H:%M:%S +0000' - - data['timestamp'] = now().strftime(time_format) - data['host'] = self.host - data['ip_address'] = self.ip_address - data['user'] = self.user - data['runtime'] = runtime - data['ansible_version'] = ansible_version - data['ansible_check_mode'] = self.ansible_check_mode - data['ansible_host'] = result._host.name - data['ansible_playbook'] = self.ansible_playbook - data['ansible_role'] = ansible_role - data['ansible_task'] = result._task_fields - data['ansible_result'] = result._result + time_format = "%Y-%m-%d %H:%M:%S +0000" + + data["timestamp"] = now().strftime(time_format) + data["host"] = self.host + data["ip_address"] = self.ip_address + data["user"] = self.user + data["runtime"] = runtime + data["ansible_version"] = ansible_version + data["ansible_check_mode"] = self.ansible_check_mode + data["ansible_host"] = result._host.name + data["ansible_playbook"] = self.ansible_playbook + data["ansible_role"] = ansible_role + data["ansible_task"] = result._task_fields + data["ansible_result"] = result._result # This wraps the json payload in and outer json event needed by Splunk jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) @@ -152,19 +152,16 @@ def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch open_url( url, jsondata, - headers={ - 'Content-type': 'application/json', - 'Authorization': f"Splunk {authtoken}" - }, - method='POST', - validate_certs=validate_certs + headers={"Content-type": "application/json", "Authorization": f"Splunk {authtoken}"}, + method="POST", + validate_certs=validate_certs, ) class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.splunk' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.splunk" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): @@ -178,39 +175,40 @@ def __init__(self, display=None): self.splunk = SplunkHTTPCollectorSource() def _runtime(self, result): - return ( - now() - - self.start_datetimes[result._task._uuid] - ).total_seconds() + return (now() - self.start_datetimes[result._task._uuid]).total_seconds() def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.url = self.get_option('url') + self.url = self.get_option("url") if self.url is None: self.disabled = True - self._display.warning('Splunk HTTP collector source URL was ' - 'not provided. The Splunk HTTP collector ' - 'source URL can be provided using the ' - '`SPLUNK_URL` environment variable or ' - 'in the ansible.cfg file.') + self._display.warning( + "Splunk HTTP collector source URL was " + "not provided. The Splunk HTTP collector " + "source URL can be provided using the " + "`SPLUNK_URL` environment variable or " + "in the ansible.cfg file." + ) - self.authtoken = self.get_option('authtoken') + self.authtoken = self.get_option("authtoken") if self.authtoken is None: self.disabled = True - self._display.warning('Splunk HTTP collector requires an authentication' - 'token. The Splunk HTTP collector ' - 'authentication token can be provided using the ' - '`SPLUNK_AUTHTOKEN` environment variable or ' - 'in the ansible.cfg file.') + self._display.warning( + "Splunk HTTP collector requires an authentication" + "token. The Splunk HTTP collector " + "authentication token can be provided using the " + "`SPLUNK_AUTHTOKEN` environment variable or " + "in the ansible.cfg file." + ) - self.validate_certs = self.get_option('validate_certs') + self.validate_certs = self.get_option("validate_certs") - self.include_milliseconds = self.get_option('include_milliseconds') + self.include_milliseconds = self.get_option("include_milliseconds") - self.batch = self.get_option('batch') + self.batch = self.get_option("batch") def v2_playbook_on_start(self, playbook): self.splunk.ansible_playbook = basename(playbook._file_name) @@ -228,9 +226,9 @@ def v2_runner_on_ok(self, result, **kwargs): self.validate_certs, self.include_milliseconds, self.batch, - 'OK', + "OK", result, - self._runtime(result) + self._runtime(result), ) def v2_runner_on_skipped(self, result, **kwargs): @@ -240,9 +238,9 @@ def v2_runner_on_skipped(self, result, **kwargs): self.validate_certs, self.include_milliseconds, self.batch, - 'SKIPPED', + "SKIPPED", result, - self._runtime(result) + self._runtime(result), ) def v2_runner_on_failed(self, result, **kwargs): @@ -252,9 +250,9 @@ def v2_runner_on_failed(self, result, **kwargs): self.validate_certs, self.include_milliseconds, self.batch, - 'FAILED', + "FAILED", result, - self._runtime(result) + self._runtime(result), ) def runner_on_async_failed(self, result, **kwargs): @@ -264,9 +262,9 @@ def runner_on_async_failed(self, result, **kwargs): self.validate_certs, self.include_milliseconds, self.batch, - 'FAILED', + "FAILED", result, - self._runtime(result) + self._runtime(result), ) def v2_runner_on_unreachable(self, result, **kwargs): @@ -276,7 +274,7 @@ def v2_runner_on_unreachable(self, result, **kwargs): self.validate_certs, self.include_milliseconds, self.batch, - 'UNREACHABLE', + "UNREACHABLE", result, - self._runtime(result) + self._runtime(result), ) diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 15928cacc8b..ea2bb9c91ea 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -67,7 +67,7 @@ def __init__(self): self.user = getpass.getuser() def send_event(self, url, state, result, runtime): - if result._task_fields['args'].get('_ansible_check_mode') is True: + if result._task_fields["args"].get("_ansible_check_mode") is True: self.ansible_check_mode = True if result._task._role: @@ -75,41 +75,38 @@ def send_event(self, url, state, result, runtime): else: ansible_role = None - if 'args' in result._task_fields: - del result._task_fields['args'] + if "args" in result._task_fields: + del result._task_fields["args"] data = {} - data['uuid'] = result._task._uuid - data['session'] = self.session - data['status'] = state - data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000') - data['host'] = self.host - data['ip_address'] = self.ip_address - data['user'] = self.user - data['runtime'] = runtime - data['ansible_version'] = ansible_version - data['ansible_check_mode'] = self.ansible_check_mode - data['ansible_host'] = result._host.name - data['ansible_playbook'] = self.ansible_playbook - data['ansible_role'] = ansible_role - data['ansible_task'] = result._task_fields - data['ansible_result'] = result._result + data["uuid"] = result._task._uuid + data["session"] = self.session + data["status"] = state + data["timestamp"] = now().strftime("%Y-%m-%d %H:%M:%S +0000") + data["host"] = self.host + data["ip_address"] = self.ip_address + data["user"] = self.user + data["runtime"] = runtime + data["ansible_version"] = ansible_version + data["ansible_check_mode"] = self.ansible_check_mode + data["ansible_host"] = result._host.name + data["ansible_playbook"] = self.ansible_playbook + data["ansible_role"] = ansible_role + data["ansible_task"] = result._task_fields + data["ansible_result"] = result._result open_url( url, data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True), - headers={ - 'Content-type': 'application/json', - 'X-Sumo-Host': data['ansible_host'] - }, - method='POST' + headers={"Content-type": "application/json", "X-Sumo-Host": data["ansible_host"]}, + method="POST", ) class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.sumologic' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.sumologic" CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): @@ -119,23 +116,22 @@ def __init__(self, display=None): self.sumologic = SumologicHTTPCollectorSource() def _runtime(self, result): - return ( - now() - - self.start_datetimes[result._task._uuid] - ).total_seconds() + return (now() - self.start_datetimes[result._task._uuid]).total_seconds() def set_options(self, task_keys=None, var_options=None, direct=None): super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) - self.url = self.get_option('url') + self.url = self.get_option("url") if self.url is None: self.disabled = True - self._display.warning('Sumologic HTTP collector source URL was ' - 'not provided. The Sumologic HTTP collector ' - 'source URL can be provided using the ' - '`SUMOLOGIC_URL` environment variable or ' - 'in the ansible.cfg file.') + self._display.warning( + "Sumologic HTTP collector source URL was " + "not provided. The Sumologic HTTP collector " + "source URL can be provided using the " + "`SUMOLOGIC_URL` environment variable or " + "in the ansible.cfg file." + ) def v2_playbook_on_start(self, playbook): self.sumologic.ansible_playbook = basename(playbook._file_name) @@ -147,41 +143,16 @@ def v2_playbook_on_handler_task_start(self, task): self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): - self.sumologic.send_event( - self.url, - 'OK', - result, - self._runtime(result) - ) + self.sumologic.send_event(self.url, "OK", result, self._runtime(result)) def v2_runner_on_skipped(self, result, **kwargs): - self.sumologic.send_event( - self.url, - 'SKIPPED', - result, - self._runtime(result) - ) + self.sumologic.send_event(self.url, "SKIPPED", result, self._runtime(result)) def v2_runner_on_failed(self, result, **kwargs): - self.sumologic.send_event( - self.url, - 'FAILED', - result, - self._runtime(result) - ) + self.sumologic.send_event(self.url, "FAILED", result, self._runtime(result)) def runner_on_async_failed(self, result, **kwargs): - self.sumologic.send_event( - self.url, - 'FAILED', - result, - self._runtime(result) - ) + self.sumologic.send_event(self.url, "FAILED", result, self._runtime(result)) def v2_runner_on_unreachable(self, result, **kwargs): - self.sumologic.send_event( - self.url, - 'UNREACHABLE', - result, - self._runtime(result) - ) + self.sumologic.send_event(self.url, "UNREACHABLE", result, self._runtime(result)) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 3dc53c0c303..21755f6049a 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -68,62 +68,89 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.syslog_json' + CALLBACK_TYPE = "notification" + CALLBACK_NAME = "community.general.syslog_json" CALLBACK_NEEDS_WHITELIST = True def __init__(self): - super().__init__() def set_options(self, task_keys=None, var_options=None, direct=None): - super().set_options(task_keys=task_keys, var_options=var_options, direct=direct) syslog_host = self.get_option("server") syslog_port = int(self.get_option("port")) syslog_facility = self.get_option("facility") - self.logger = logging.getLogger('ansible logger') + self.logger = logging.getLogger("ansible logger") self.logger.setLevel(logging.DEBUG) - self.handler = logging.handlers.SysLogHandler( - address=(syslog_host, syslog_port), - facility=syslog_facility - ) + self.handler = logging.handlers.SysLogHandler(address=(syslog_host, syslog_port), facility=syslog_facility) self.logger.addHandler(self.handler) self.hostname = socket.gethostname() def v2_runner_on_failed(self, result, ignore_errors=False): res = result._result host = result._host.get_name() - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + self.logger.error( + "%s ansible-command: task execution FAILED; host: %s; message: %s", + self.hostname, + host, + self._dump_results(res), + ) def v2_runner_on_ok(self, result): res = result._result host = result._host.get_name() if result._task.action != "gather_facts" or self.get_option("setup"): - self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + self.logger.info( + "%s ansible-command: task execution OK; host: %s; message: %s", + self.hostname, + host, + self._dump_results(res), + ) def v2_runner_on_skipped(self, result): host = result._host.get_name() - self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped') + self.logger.info( + "%s ansible-command: task execution SKIPPED; host: %s; message: %s", self.hostname, host, "skipped" + ) def v2_runner_on_unreachable(self, result): res = result._result host = result._host.get_name() - self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + self.logger.error( + "%s ansible-command: task execution UNREACHABLE; host: %s; message: %s", + self.hostname, + host, + self._dump_results(res), + ) def v2_runner_on_async_failed(self, result): res = result._result host = result._host.get_name() - jid = result._result.get('ansible_job_id') - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + jid = result._result.get("ansible_job_id") + self.logger.error( + "%s ansible-command: task execution FAILED; host: %s; message: %s", + self.hostname, + host, + self._dump_results(res), + ) def v2_playbook_on_import_for_host(self, result, imported_file): host = result._host.get_name() - self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file) + self.logger.info( + "%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s", + self.hostname, + host, + imported_file, + ) def v2_playbook_on_not_import_for_host(self, result, missing_file): host = result._host.get_name() - self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file) + self.logger.info( + "%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s", + self.hostname, + host, + missing_file, + ) diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py index 7f9498cebec..7b184be4358 100644 --- a/plugins/callback/tasks_only.py +++ b/plugins/callback/tasks_only.py @@ -1,4 +1,3 @@ - # Copyright (c) 2025, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -51,8 +50,8 @@ class CallbackModule(Default): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.tasks_only' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.tasks_only" def v2_playbook_on_play_start(self, play): pass diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py index a7de50ceffd..36282c833e7 100644 --- a/plugins/callback/timestamp.py +++ b/plugins/callback/timestamp.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024, kurokobo # Copyright (c) 2014, Michael DeHaan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index d155aefc66f..4d478d0f772 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -28,8 +28,7 @@ class CallbackModule(CallbackModule_default): - - ''' + """ Design goals: - Print consolidated output that looks like a *NIX startup log - Defaults should avoid displaying unnecessary information wherever possible @@ -39,14 +38,16 @@ class CallbackModule(CallbackModule_default): - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line) - Consolidate stats display - Don't show play name if no hosts found - ''' + """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.unixy' + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.unixy" def _run_is_verbose(self, result): - return ((self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result) + return ( + self._display.verbosity > 0 or "_ansible_verbose_always" in result._result + ) and "_ansible_verbose_override" not in result._result def _get_task_display_name(self, task): self.task_display_name = None @@ -59,8 +60,8 @@ def _get_task_display_name(self, task): self.task_display_name = task_display_name def _preprocess_result(self, result): - self.delegated_vars = result._result.get('_ansible_delegated_vars', None) - self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr')) + self.delegated_vars = result._result.get("_ansible_delegated_vars", None) + self._handle_exception(result._result, use_stderr=self.get_option("display_failed_stderr")) self._handle_warnings(result._result) def _process_result_output(self, result, msg): @@ -72,16 +73,16 @@ def _process_result_output(self, result, msg): return task_result if self.delegated_vars: - task_delegate_host = self.delegated_vars['ansible_host'] + task_delegate_host = self.delegated_vars["ansible_host"] task_result = f"{task_host} -> {task_delegate_host} {msg}" - if result._result.get('msg') and result._result.get('msg') != "All items completed": + if result._result.get("msg") and result._result.get("msg") != "All items completed": task_result += f" | msg: {to_text(result._result.get('msg'))}" - if result._result.get('stdout'): + if result._result.get("stdout"): task_result += f" | stdout: {result._result.get('stdout')}" - if result._result.get('stderr'): + if result._result.get("stderr"): task_result += f" | stderr: {result._result.get('stderr')}" return task_result @@ -89,7 +90,7 @@ def _process_result_output(self, result, msg): def v2_playbook_on_task_start(self, task, is_conditional): self._get_task_display_name(task) if self.task_display_name is not None: - if task.check_mode and self.get_option('check_mode_markers'): + if task.check_mode and self.get_option("check_mode_markers"): self._display.display(f"{self.task_display_name} (check mode)...") else: self._display.display(f"{self.task_display_name}...") @@ -97,14 +98,14 @@ def v2_playbook_on_task_start(self, task, is_conditional): def v2_playbook_on_handler_task_start(self, task): self._get_task_display_name(task) if self.task_display_name is not None: - if task.check_mode and self.get_option('check_mode_markers'): + if task.check_mode and self.get_option("check_mode_markers"): self._display.display(f"{self.task_display_name} (via handler in check mode)... ") else: self._display.display(f"{self.task_display_name} (via handler)... ") def v2_playbook_on_play_start(self, play): name = play.get_name().strip() - if play.check_mode and self.get_option('check_mode_markers'): + if play.check_mode and self.get_option("check_mode_markers"): if name and play.hosts: msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -" else: @@ -118,7 +119,7 @@ def v2_playbook_on_play_start(self, play): self._display.display(msg) def v2_runner_on_skipped(self, result, ignore_errors=False): - if self.get_option('display_skipped_hosts'): + if self.get_option("display_skipped_hosts"): self._preprocess_result(result) display_color = C.COLOR_SKIP msg = "skipped" @@ -137,12 +138,12 @@ def v2_runner_on_failed(self, result, ignore_errors=False): msg += f" | item: {item_value}" task_result = self._process_result_output(result, msg) - self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option("display_failed_stderr")) def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): self._preprocess_result(result) - result_was_changed = ('changed' in result._result and result._result['changed']) + result_was_changed = "changed" in result._result and result._result["changed"] if result_was_changed: msg = "done" item_value = self._get_item_label(result._result) @@ -151,7 +152,7 @@ def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): display_color = C.COLOR_CHANGED task_result = self._process_result_output(result, msg) self._display.display(f" {task_result}", display_color) - elif self.get_option('display_ok_hosts'): + elif self.get_option("display_ok_hosts"): task_result = self._process_result_output(result, msg) self._display.display(f" {task_result}", display_color) @@ -171,17 +172,17 @@ def v2_runner_on_unreachable(self, result): display_color = C.COLOR_UNREACHABLE task_result = self._process_result_output(result, msg) - self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option("display_failed_stderr")) def v2_on_file_diff(self, result): - if result._task.loop and 'results' in result._result: - for res in result._result['results']: - if 'diff' in res and res['diff'] and res.get('changed', False): - diff = self._get_diff(res['diff']) + if result._task.loop and "results" in result._result: + for res in result._result["results"]: + if "diff" in res and res["diff"] and res.get("changed", False): + diff = self._get_diff(res["diff"]) if diff: self._display.display(diff) - elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False): - diff = self._get_diff(result._result['diff']) + elif "diff" in result._result and result._result["diff"] and result._result.get("changed", False): + diff = self._get_diff(result._result["diff"]) if diff: self._display.display(diff) @@ -197,30 +198,30 @@ def v2_playbook_on_stats(self, stats): f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", - screen_only=True + screen_only=True, ) self._display.display( f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} " f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} " f"{colorize('ignored', t['ignored'], None)}", - log_only=True + log_only=True, ) - if stats.custom and self.get_option('show_custom_stats'): + if stats.custom and self.get_option("show_custom_stats"): self._display.banner("CUSTOM STATS: ") # per host # TODO: come up with 'pretty format' for k in sorted(stats.custom.keys()): - if k == '_run': + if k == "_run": continue - stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '') - self._display.display(f'\t{k}: {stat_val}') + stat_val = self._dump_results(stats.custom[k], indent=1).replace("\n", "") + self._display.display(f"\t{k}: {stat_val}") # print per run custom stats - if '_run' in stats.custom: + if "_run" in stats.custom: self._display.display("", screen_only=True) - stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') - self._display.display(f'\tRUN: {stat_val_run}') + stat_val_run = self._dump_results(stats.custom["_run"], indent=1).replace("\n", "") + self._display.display(f"\tRUN: {stat_val_run}") self._display.display("", screen_only=True) def v2_playbook_on_no_hosts_matched(self): @@ -230,21 +231,24 @@ def v2_playbook_on_no_hosts_remaining(self): self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) def v2_playbook_on_start(self, playbook): - if context.CLIARGS['check'] and self.get_option('check_mode_markers'): + if context.CLIARGS["check"] and self.get_option("check_mode_markers"): self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode") else: self._display.display(f"Executing playbook {basename(playbook._file_name)}") # show CLI arguments if self._display.verbosity > 3: - if context.CLIARGS.get('args'): - self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}", - color=C.COLOR_VERBOSE, screen_only=True) - - for argument in (a for a in context.CLIARGS if a != 'args'): + if context.CLIARGS.get("args"): + self._display.display( + f"Positional arguments: {' '.join(context.CLIARGS['args'])}", + color=C.COLOR_VERBOSE, + screen_only=True, + ) + + for argument in (a for a in context.CLIARGS if a != "args"): val = context.CLIARGS[argument] if val: - self._display.vvvv(f'{argument}: {val}') + self._display.vvvv(f"{argument}: {val}") def v2_runner_retry(self, result): msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 6c556e435fe..d55a69c73b1 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -87,16 +87,16 @@ class Connection(ConnectionBase): - """ Local chroot based connections """ + """Local chroot based connections""" - transport = 'community.general.chroot' + transport = "community.general.chroot" has_pipelining = True # su currently has an undiagnosed issue with calculating the file # checksums (so copy, for instance, doesn't work right) # Have to look into that before re-enabling this has_tty = False - default_user = 'root' + default_user = "root" def __init__(self, play_context, new_stdin, *args, **kwargs): super().__init__(play_context, new_stdin, *args, **kwargs) @@ -107,7 +107,7 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): if not os.path.isdir(self.chroot): raise AnsibleError(f"{self.chroot} is not a directory") - chrootsh = os.path.join(self.chroot, 'bin/sh') + chrootsh = os.path.join(self.chroot, "bin/sh") # Want to check for a usable bourne shell inside the chroot. # is_executable() == True is sufficient. For symlinks it # gets really complicated really fast. So we punt on finding that @@ -116,17 +116,18 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)") def _connect(self): - """ connect to the chroot """ - if not self.get_option('disable_root_check') and os.geteuid() != 0: + """connect to the chroot""" + if not self.get_option("disable_root_check") and os.geteuid() != 0: raise AnsibleError( "chroot connection requires running as root. " - "You can override this check with the `disable_root_check` option.") + "You can override this check with the `disable_root_check` option." + ) - if os.path.isabs(self.get_option('chroot_exe')): - self.chroot_cmd = self.get_option('chroot_exe') + if os.path.isabs(self.get_option("chroot_exe")): + self.chroot_cmd = self.get_option("chroot_exe") else: try: - self.chroot_cmd = get_bin_path(self.get_option('chroot_exe')) + self.chroot_cmd = get_bin_path(self.get_option("chroot_exe")) except ValueError as e: raise AnsibleError(str(e)) @@ -136,25 +137,24 @@ def _connect(self): self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - """ run a command on the chroot. This is only needed for implementing + """run a command on the chroot. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. """ - executable = self.get_option('executable') - local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] + executable = self.get_option("executable") + local_cmd = [self.chroot_cmd, self.chroot, executable, "-c", cmd] display.vvv(f"EXEC {local_cmd}", host=self.chroot) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p def exec_command(self, cmd, in_data=None, sudoable=False): - """ run a command on the chroot """ + """run a command on the chroot""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) @@ -164,33 +164,33 @@ def exec_command(self, cmd, in_data=None, sudoable=False): @staticmethod def _prefix_login_path(remote_path): - """ Make sure that we put files into a standard path + """Make sure that we put files into a standard path - If a path is relative, then we need to choose where to put it. - ssh chooses $HOME but we aren't guaranteed that a home dir will - exist in any given chroot. So for now we're choosing "/" instead. - This also happens to be the former default. + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. - Can revisit using $HOME instead if it is a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - """ transfer a file from local to chroot """ + """transfer a file from local to chroot""" super().put_file(in_path, out_path) display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot) out_path = shlex_quote(self._prefix_login_path(out_path)) try: - with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file: if not os.fstat(in_file.fileno()).st_size: - count = ' count=0' + count = " count=0" else: - count = '' + count = "" try: - p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) + p = self._buffered_exec_command(f"dd of={out_path} bs={BUFSIZE}{count}", stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: @@ -204,17 +204,17 @@ def put_file(self, in_path, out_path): raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): - """ fetch a file from chroot to local """ + """fetch a file from chroot to local""" super().fetch_file(in_path, out_path) display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') + p = self._buffered_exec_command(f"dd if={in_path} bs={BUFSIZE}") except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") - with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + with open(to_bytes(out_path, errors="surrogate_or_strict"), "wb+") as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: @@ -228,6 +228,6 @@ def fetch_file(self, in_path, out_path): raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): - """ terminate the connection; nothing to do here """ + """terminate the connection; nothing to do here""" super().close() self._connected = False diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 86d050c1db0..3af5720216a 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -29,6 +29,7 @@ HAVE_FUNC = False try: import func.overlord.client as fc + HAVE_FUNC = True except ImportError: pass @@ -45,7 +46,7 @@ class Connection(ConnectionBase): - """ Func-based connections """ + """Func-based connections""" has_pipelining = False @@ -64,7 +65,7 @@ def connect(self, port=None): return self def exec_command(self, cmd, in_data=None, sudoable=True): - """ run a command on the remote minion """ + """run a command on the remote minion""" if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -82,16 +83,16 @@ def _normalize_path(path, prefix): return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - """ transfer a file from local to remote """ + """transfer a file from local to remote""" - out_path = self._normalize_path(out_path, '/') + out_path = self._normalize_path(out_path, "/") display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): - """ fetch a file from remote to local """ + """fetch a file from remote to local""" - in_path = self._normalize_path(in_path, '/') + in_path = self._normalize_path(in_path, "/") display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who @@ -102,5 +103,5 @@ def fetch_file(self, in_path, out_path): shutil.rmtree(tmpdir) def close(self): - """ terminate the connection; nothing to do here """ + """terminate the connection; nothing to do here""" pass diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index 4de826dbc9e..c902de20dca 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -84,7 +84,7 @@ class Connection(ConnectionBase): - """ Incus based connections """ + """Incus based connections""" transport = "incus" has_pipelining = True @@ -98,12 +98,13 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): raise AnsibleError("incus command not found in PATH") def _connect(self): - """connect to Incus (nothing to do here) """ + """connect to Incus (nothing to do here)""" super()._connect() if not self._connected: - self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", - host=self._instance()) + self._display.vvv( + f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._instance() + ) self._connected = True def _build_command(self, cmd) -> list[str]: @@ -111,10 +112,12 @@ def _build_command(self, cmd) -> list[str]: exec_cmd: list[str] = [ self._incus_cmd, - "--project", self.get_option("project"), + "--project", + self.get_option("project"), "exec", f"{self.get_option('remote')}:{self._instance()}", - "--"] + "--", + ] if self.get_option("remote_user") != "root": self._display.vvv( @@ -122,9 +125,7 @@ def _build_command(self, cmd) -> list[str]: trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}", host=self._instance(), ) - exec_cmd.extend( - [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"] - ) + exec_cmd.extend([self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"]) exec_cmd.extend([self.get_option("executable"), "-c", cmd]) @@ -133,20 +134,19 @@ def _build_command(self, cmd) -> list[str]: def _instance(self): # Return only the leading part of the FQDN as the instance name # as Incus instance names cannot be a FQDN. - return self.get_option('remote_addr').split(".")[0] + return self.get_option("remote_addr").split(".")[0] def exec_command(self, cmd, in_data=None, sudoable=True): - """ execute a command on the Incus host """ + """execute a command on the Incus host""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(f"EXEC {cmd}", - host=self._instance()) + self._display.vvv(f"EXEC {cmd}", host=self._instance()) local_cmd = self._build_command(cmd) self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance()) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] + in_data = to_bytes(in_data, errors="surrogate_or_strict", nonstring="passthru") process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate(in_data) @@ -154,32 +154,22 @@ def exec_command(self, cmd, in_data=None, sudoable=True): stdout = to_text(stdout) stderr = to_text(stderr) - if stderr.startswith("Error: ") and stderr.rstrip().endswith( - ": Instance is not running" - ): + if stderr.startswith("Error: ") and stderr.rstrip().endswith(": Instance is not running"): raise AnsibleConnectionFailure( f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" ) - if stderr.startswith("Error: ") and stderr.rstrip().endswith( - ": Instance not found" - ): + if stderr.startswith("Error: ") and stderr.rstrip().endswith(": Instance not found"): raise AnsibleConnectionFailure( f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" ) - if ( - stderr.startswith("Error: ") - and ": User does not have permission " in stderr - ): + if stderr.startswith("Error: ") and ": User does not have permission " in stderr: raise AnsibleConnectionFailure( f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" ) - if ( - stderr.startswith("Error: ") - and ": User does not have entitlement " in stderr - ): + if stderr.startswith("Error: ") and ": User does not have entitlement " in stderr: raise AnsibleConnectionFailure( f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" ) @@ -191,28 +181,23 @@ def _get_remote_uid_gid(self) -> tuple[int, int]: rc, uid_out, err = self.exec_command("/bin/id -u") if rc != 0: - raise AnsibleError( - f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" - ) + raise AnsibleError(f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}") uid = uid_out.strip() rc, gid_out, err = self.exec_command("/bin/id -g") if rc != 0: - raise AnsibleError( - f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" - ) + raise AnsibleError(f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}") gid = gid_out.strip() return int(uid), int(gid) def put_file(self, in_path, out_path): - """ put a file from local to Incus """ + """put a file from local to Incus""" super().put_file(in_path, out_path) - self._display.vvv(f"PUT {in_path} TO {out_path}", - host=self._instance()) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._instance()) - if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + if not os.path.isfile(to_bytes(in_path, errors="surrogate_or_strict")): raise AnsibleFileNotFound(f"input path is not a file: {in_path}") if self.get_option("remote_user") != "root": @@ -245,30 +230,33 @@ def put_file(self, in_path, out_path): self._display.vvvvv(f"PUT {local_cmd}", host=self._instance()) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] call(local_cmd) def fetch_file(self, in_path, out_path): - """ fetch a file from Incus to local """ + """fetch a file from Incus to local""" super().fetch_file(in_path, out_path) - self._display.vvv(f"FETCH {in_path} TO {out_path}", - host=self._instance()) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._instance()) local_cmd = [ self._incus_cmd, - "--project", self.get_option("project"), - "file", "pull", "--quiet", + "--project", + self.get_option("project"), + "file", + "pull", + "--quiet", f"{self.get_option('remote')}:{self._instance()}/{in_path}", - out_path] + out_path, + ] - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] call(local_cmd) def close(self): - """ close the connection (nothing to do here) """ + """close the connection (nothing to do here)""" super().close() self._connected = False diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index e355b3be8ff..200797eb2b8 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -42,31 +42,33 @@ class Connection(Jail): - """ Local iocage based connections """ + """Local iocage based connections""" - transport = 'community.general.iocage' + transport = "community.general.iocage" def __init__(self, play_context, new_stdin, *args, **kwargs): self.ioc_jail = play_context.remote_addr - self.iocage_cmd = Jail._search_executable('iocage') + self.iocage_cmd = Jail._search_executable("iocage") jail_uuid = self.get_jail_uuid() - kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}' + kwargs[Jail.modified_jailname_key] = f"ioc-{jail_uuid}" display.vvv( f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}", - host=kwargs[Jail.modified_jailname_key] + host=kwargs[Jail.modified_jailname_key], ) super().__init__(play_context, new_stdin, *args, **kwargs) def get_jail_uuid(self): - p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + [self.iocage_cmd, "get", "host_hostuuid", self.ioc_jail], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) stdout, stderr = p.communicate() @@ -82,4 +84,4 @@ def get_jail_uuid(self): if p.returncode != 0: raise AnsibleError(f"iocage returned an error: {stdout}") - return stdout.strip('\n') + return stdout.strip("\n") diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 59e91e48c84..2eb6c0da185 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -49,11 +49,11 @@ class Connection(ConnectionBase): - """ Local BSD Jail based connections """ + """Local BSD Jail based connections""" - modified_jailname_key = 'conn_jail_name' + modified_jailname_key = "conn_jail_name" - transport = 'community.general.jail' + transport = "community.general.jail" # Pipelining may work. Someone needs to test by setting this to True and # having pipelining=True in their ansible.cfg has_pipelining = True @@ -69,8 +69,8 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): if os.geteuid() != 0: raise AnsibleError("jail connection requires running as root") - self.jls_cmd = self._search_executable('jls') - self.jexec_cmd = self._search_executable('jexec') + self.jls_cmd = self._search_executable("jls") + self.jexec_cmd = self._search_executable("jexec") if self.jail not in self.list_jails(): raise AnsibleError(f"incorrect jail name {self.jail}") @@ -83,23 +83,23 @@ def _search_executable(executable): raise AnsibleError(f"{executable} command not found in PATH") def list_jails(self): - p = subprocess.Popen([self.jls_cmd, '-q', 'name'], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen( + [self.jls_cmd, "-q", "name"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) stdout, stderr = p.communicate() - return to_text(stdout, errors='surrogate_or_strict').split() + return to_text(stdout, errors="surrogate_or_strict").split() def _connect(self): - """ connect to the jail; nothing to do here """ + """connect to the jail; nothing to do here""" super()._connect() if not self._connected: display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - """ run a command on the jail. This is only needed for implementing + """run a command on the jail. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. @@ -108,24 +108,23 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): """ local_cmd = [self.jexec_cmd] - set_env = '' + set_env = "" if self._play_context.remote_user is not None: - local_cmd += ['-U', self._play_context.remote_user] + local_cmd += ["-U", self._play_context.remote_user] # update HOME since -U does not update the jail environment set_env = f"HOME=~{self._play_context.remote_user} " - local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] + local_cmd += [self.jail, self._play_context.executable, "-c", set_env + cmd] display.vvv(f"EXEC {local_cmd}", host=self.jail) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p def exec_command(self, cmd, in_data=None, sudoable=False): - """ run a command on the jail """ + """run a command on the jail""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) @@ -135,33 +134,33 @@ def exec_command(self, cmd, in_data=None, sudoable=False): @staticmethod def _prefix_login_path(remote_path): - """ Make sure that we put files into a standard path + """Make sure that we put files into a standard path - If a path is relative, then we need to choose where to put it. - ssh chooses $HOME but we aren't guaranteed that a home dir will - exist in any given chroot. So for now we're choosing "/" instead. - This also happens to be the former default. + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. - Can revisit using $HOME instead if it is a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - """ transfer a file from local to jail """ + """transfer a file from local to jail""" super().put_file(in_path, out_path) display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail) out_path = shlex_quote(self._prefix_login_path(out_path)) try: - with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file: if not os.fstat(in_file.fileno()).st_size: - count = ' count=0' + count = " count=0" else: - count = '' + count = "" try: - p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) + p = self._buffered_exec_command(f"dd of={out_path} bs={BUFSIZE}{count}", stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: @@ -170,22 +169,24 @@ def put_file(self, in_path, out_path): traceback.print_exc() raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") + raise AnsibleError( + f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}" + ) except IOError: raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): - """ fetch a file from jail to local """ + """fetch a file from jail to local""" super().fetch_file(in_path, out_path) display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') + p = self._buffered_exec_command(f"dd if={in_path} bs={BUFSIZE}") except OSError: raise AnsibleError("jail connection requires dd command in the jail") - with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + with open(to_bytes(out_path, errors="surrogate_or_strict"), "wb+") as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: @@ -196,9 +197,11 @@ def fetch_file(self, in_path, out_path): raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") + raise AnsibleError( + f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}" + ) def close(self): - """ terminate the connection; nothing to do here """ + """terminate the connection; nothing to do here""" super().close() self._connected = False diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index e88dbb75b69..411a44e0038 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -41,6 +41,7 @@ HAS_LIBLXC = False try: import lxc as _lxc + HAS_LIBLXC = True except ImportError: pass @@ -51,11 +52,11 @@ class Connection(ConnectionBase): - """ Local lxc based connections """ + """Local lxc based connections""" - transport = 'community.general.lxc' + transport = "community.general.lxc" has_pipelining = True - default_user = 'root' + default_user = "root" def __init__(self, play_context, new_stdin, *args, **kwargs): super().__init__(play_context, new_stdin, *args, **kwargs) @@ -64,14 +65,14 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): self.container = None def _connect(self): - """ connect to the lxc; nothing to do here """ + """connect to the lxc; nothing to do here""" super()._connect() if not HAS_LIBLXC: msg = "lxc python bindings are not installed" raise errors.AnsibleError(msg) - container_name = self.get_option('remote_addr') + container_name = self.get_option("remote_addr") if self.container and self.container_name == container_name: return @@ -98,7 +99,7 @@ def _communicate(pid, in_data, stdin, stdout, stderr): continue raise for fd in ready_writes: - in_data = in_data[os.write(fd, in_data):] + in_data = in_data[os.write(fd, in_data) :] if len(in_data) == 0: write_fds.remove(fd) for fd in ready_reads: @@ -117,12 +118,12 @@ def _set_nonblocking(self, fd): return fd def exec_command(self, cmd, in_data=None, sudoable=False): - """ run a command on the chroot """ + """run a command on the chroot""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. - executable = to_native(self.get_option('executable'), errors='surrogate_or_strict') - local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')] + executable = to_native(self.get_option("executable"), errors="surrogate_or_strict") + local_cmd = [executable, "-c", to_native(cmd, errors="surrogate_or_strict")] read_stdout, write_stdout = None, None read_stderr, write_stderr = None, None @@ -133,14 +134,14 @@ def exec_command(self, cmd, in_data=None, sudoable=False): read_stderr, write_stderr = os.pipe() kwargs = { - 'stdout': self._set_nonblocking(write_stdout), - 'stderr': self._set_nonblocking(write_stderr), - 'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV + "stdout": self._set_nonblocking(write_stdout), + "stderr": self._set_nonblocking(write_stderr), + "env_policy": _lxc.LXC_ATTACH_CLEAR_ENV, } if in_data: read_stdin, write_stdin = os.pipe() - kwargs['stdin'] = self._set_nonblocking(read_stdin) + kwargs["stdin"] = self._set_nonblocking(read_stdin) self._display.vvv(f"EXEC {local_cmd}", host=self.container_name) pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) @@ -153,28 +154,19 @@ def exec_command(self, cmd, in_data=None, sudoable=False): if read_stdin: read_stdin = os.close(read_stdin) - return self._communicate(pid, - in_data, - write_stdin, - read_stdout, - read_stderr) + return self._communicate(pid, in_data, write_stdin, read_stdout, read_stderr) finally: - fds = [read_stdout, - write_stdout, - read_stderr, - write_stderr, - read_stdin, - write_stdin] + fds = [read_stdout, write_stdout, read_stderr, write_stderr, read_stdin, write_stdin] for fd in fds: if fd: os.close(fd) def put_file(self, in_path, out_path): - ''' transfer a file from local to lxc ''' + """transfer a file from local to lxc""" super().put_file(in_path, out_path) self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name) - in_path = to_bytes(in_path, errors='surrogate_or_strict') - out_path = to_bytes(out_path, errors='surrogate_or_strict') + in_path = to_bytes(in_path, errors="surrogate_or_strict") + out_path = to_bytes(out_path, errors="surrogate_or_strict") if not os.path.exists(in_path): msg = f"file or module does not exist: {in_path}" @@ -185,9 +177,11 @@ def put_file(self, in_path, out_path): traceback.print_exc() raise errors.AnsibleError(f"failed to open input file to {in_path}") try: + def write_file(args): - with open(out_path, 'wb+') as dst_file: + with open(out_path, "wb+") as dst_file: shutil.copyfileobj(src_file, dst_file) + try: self.container.attach_wait(write_file, None) except IOError: @@ -198,11 +192,11 @@ def write_file(args): src_file.close() def fetch_file(self, in_path, out_path): - ''' fetch a file from lxc to local ''' + """fetch a file from lxc to local""" super().fetch_file(in_path, out_path) self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name) - in_path = to_bytes(in_path, errors='surrogate_or_strict') - out_path = to_bytes(out_path, errors='surrogate_or_strict') + in_path = to_bytes(in_path, errors="surrogate_or_strict") + out_path = to_bytes(out_path, errors="surrogate_or_strict") try: dst_file = open(out_path, "wb") @@ -211,14 +205,16 @@ def fetch_file(self, in_path, out_path): msg = f"failed to open output file {out_path}" raise errors.AnsibleError(msg) try: + def write_file(args): try: - with open(in_path, 'rb') as src_file: + with open(in_path, "rb") as src_file: shutil.copyfileobj(src_file, dst_file) finally: # this is needed in the lxc child process # to flush internal python buffers dst_file.close() + try: self.container.attach_wait(write_file, None) except IOError: @@ -229,6 +225,6 @@ def write_file(args): dst_file.close() def close(self): - ''' terminate the connection; nothing to do here ''' + """terminate the connection; nothing to do here""" super().close() self._connected = False diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index f7f5deeb286..07aed32eca6 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -83,9 +83,9 @@ class Connection(ConnectionBase): - """ lxd based connections """ + """lxd based connections""" - transport = 'community.general.lxd' + transport = "community.general.lxd" has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): @@ -97,11 +97,11 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): raise AnsibleError("lxc command not found in PATH") def _host(self): - """ translate remote_addr to lxd (short) hostname """ + """translate remote_addr to lxd (short) hostname""" return self.get_option("remote_addr").split(".", 1)[0] def _connect(self): - """connect to lxd (nothing to do here) """ + """connect to lxd (nothing to do here)""" super()._connect() if not self._connected: @@ -124,16 +124,14 @@ def _build_command(self, cmd) -> list[str]: trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}", host=self._host(), ) - exec_cmd.extend( - [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"] - ) + exec_cmd.extend([self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"]) exec_cmd.extend([self.get_option("executable"), "-c", cmd]) return exec_cmd def exec_command(self, cmd, in_data=None, sudoable=True): - """ execute a command on the lxd host """ + """execute a command on the lxd host""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) self._display.vvv(f"EXEC {cmd}", host=self._host()) @@ -141,8 +139,8 @@ def exec_command(self, cmd, in_data=None, sudoable=True): local_cmd = self._build_command(cmd) self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] - in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] + in_data = to_bytes(in_data, errors="surrogate_or_strict", nonstring="passthru") process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate(in_data) @@ -165,27 +163,23 @@ def _get_remote_uid_gid(self) -> tuple[int, int]: rc, uid_out, err = self.exec_command("/bin/id -u") if rc != 0: - raise AnsibleError( - f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" - ) + raise AnsibleError(f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}") uid = uid_out.strip() rc, gid_out, err = self.exec_command("/bin/id -g") if rc != 0: - raise AnsibleError( - f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" - ) + raise AnsibleError(f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}") gid = gid_out.strip() return int(uid), int(gid) def put_file(self, in_path, out_path): - """ put a file from local to lxd """ + """put a file from local to lxd""" super().put_file(in_path, out_path) self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host()) - if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + if not os.path.isfile(to_bytes(in_path, errors="surrogate_or_strict")): raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [self._lxc_cmd] @@ -218,13 +212,13 @@ def put_file(self, in_path, out_path): self._display.vvvvv(f"PUT {local_cmd}", host=self._host()) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) process.communicate() def fetch_file(self, in_path, out_path): - """ fetch a file from lxd to local """ + """fetch a file from lxd to local""" super().fetch_file(in_path, out_path) self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host()) @@ -232,19 +226,15 @@ def fetch_file(self, in_path, out_path): local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "pull", - f"{self.get_option('remote')}:{self._host()}/{in_path}", - out_path - ]) + local_cmd.extend(["file", "pull", f"{self.get_option('remote')}:{self._host()}/{in_path}", out_path]) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) process.communicate() def close(self): - """ close the connection (nothing to do here) """ + """close the connection (nothing to do here)""" super().close() self._connected = False diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 3b815c339b9..84560713493 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -53,7 +53,7 @@ class Connection(ConnectionBase): """This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers.""" # String used to identify this Connection class from other classes - transport = 'community.general.qubes' + transport = "community.general.qubes" has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): @@ -88,16 +88,17 @@ def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"): local_cmd.append(shell) - local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd] display.vvvv("Local cmd: ", local_cmd) display.vvv(f"RUN {local_cmd}", host=self._remote_vmname) - p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen( + local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) # Here we are writing the actual command to the remote bash - p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict')) + p.stdin.write(to_bytes(cmd, errors="surrogate_or_strict")) stdout, stderr = p.communicate(input=in_data) return p.returncode, stdout, stderr @@ -108,7 +109,7 @@ def _connect(self): @ensure_connect # type: ignore # TODO: for some reason, the type infos for ensure_connect suck... def exec_command(self, cmd, in_data=None, sudoable=False): - """Run specified command in a running QubesVM """ + """Run specified command in a running QubesVM""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) display.vvvv(f"CMD IS: {cmd}") @@ -119,24 +120,24 @@ def exec_command(self, cmd, in_data=None, sudoable=False): return rc, stdout, stderr def put_file(self, in_path, out_path): - """ Place a local file located in 'in_path' inside VM at 'out_path' """ + """Place a local file located in 'in_path' inside VM at 'out_path'""" super().put_file(in_path, out_path) display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname) with open(in_path, "rb") as fobj: source_data = fobj.read() - retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell") + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}"\n', source_data, "qubes.VMRootShell") # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and # hope it will have appropriate permissions if retcode == 127: - retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data) + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}"\n', source_data) if retcode != 0: - raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}') + raise AnsibleConnectionFailure(f"Failed to put_file to {out_path}") def fetch_file(self, in_path, out_path): - """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ + """Obtain file specified via 'in_path' from the container and place it at 'out_path'""" super().fetch_file(in_path, out_path) display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname) @@ -146,9 +147,9 @@ def fetch_file(self, in_path, out_path): p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) p.communicate() if p.returncode != 0: - raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}') + raise AnsibleConnectionFailure(f"Failed to fetch file to {out_path}") def close(self): - """ Closing the connection """ + """Closing the connection""" super().close() self._connected = False diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 69dd67bda80..5e67c5b1aa5 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -25,18 +25,19 @@ HAVE_SALTSTACK = False try: import salt.client as sc + HAVE_SALTSTACK = True except ImportError: pass class Connection(ConnectionBase): - """ Salt-based connections """ + """Salt-based connections""" has_pipelining = False # while the name of the product is salt, naming that module salt cause # trouble with module import - transport = 'community.general.saltstack' + transport = "community.general.saltstack" def __init__(self, play_context, new_stdin, *args, **kwargs): super().__init__(play_context, new_stdin, *args, **kwargs) @@ -51,7 +52,7 @@ def _connect(self): return self def exec_command(self, cmd, in_data=None, sudoable=False): - """ run a command on the remote minion """ + """run a command on the remote minion""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) if in_data: @@ -59,12 +60,14 @@ def exec_command(self, cmd, in_data=None, sudoable=False): self._display.vvv(f"EXEC {cmd}", host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 - res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"]) + res = self.client.cmd(self.host, "cmd.exec_code_all", ["bash", f"true;{cmd}"]) if self.host not in res: - raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct") + raise errors.AnsibleError( + f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct" + ) p = res[self.host] - return p['retcode'], p['stdout'], p['stderr'] + return p["retcode"], p["stdout"], p["stderr"] @staticmethod def _normalize_path(path, prefix): @@ -74,27 +77,27 @@ def _normalize_path(path, prefix): return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - """ transfer a file from local to remote """ + """transfer a file from local to remote""" super().put_file(in_path, out_path) - out_path = self._normalize_path(out_path, '/') + out_path = self._normalize_path(out_path, "/") self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) - with open(in_path, 'rb') as in_fh: + with open(in_path, "rb") as in_fh: content = in_fh.read() - self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) + self.client.cmd(self.host, "hashutil.base64_decodefile", [base64.b64encode(content), out_path]) # TODO test it def fetch_file(self, in_path, out_path): - """ fetch a file from remote to local """ + """fetch a file from remote to local""" super().fetch_file(in_path, out_path) - in_path = self._normalize_path(in_path, '/') + in_path = self._normalize_path(in_path, "/") self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) - content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] - open(out_path, 'wb').write(content) + content = self.client.cmd(self.host, "cp.get_file_str", [in_path])[self.host] + open(out_path, "wb").write(content) def close(self): - """ terminate the connection; nothing to do here """ + """terminate the connection; nothing to do here""" pass diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py index 9c2c0e94513..c6318caba4b 100644 --- a/plugins/connection/wsl.py +++ b/plugins/connection/wsl.py @@ -336,6 +336,7 @@ try: import paramiko from paramiko import MissingHostKeyPolicy + PARAMIKO_IMPORT_ERR = None except ImportError: PARAMIKO_IMPORT_ERR = traceback.format_exc() @@ -369,24 +370,22 @@ def __init__(self, connection: Connection) -> None: self._options = connection._options def missing_host_key(self, client: paramiko.SSHClient, hostname: str, key: paramiko.PKey) -> None: - - if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))): - + if all((self.connection.get_option("host_key_checking"), not self.connection.get_option("host_key_auto_add"))): fingerprint = hexlify(key.get_fingerprint()) ktype = key.get_name() - if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + if self.connection.get_option("use_persistent_connections") or self.connection.force_persistence: # don't print the prompt string since the user cannot respond # to the question anyway raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92]) inp = to_text( display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False), - errors='surrogate_or_strict' + errors="surrogate_or_strict", ) - if inp.lower() not in ['yes', 'y', '']: - raise AnsibleError('host connection rejected by user') + if inp.lower() not in ["yes", "y", ""]: + raise AnsibleError("host connection rejected by user") key._added_by_ansible_this_time = True # type: ignore @@ -398,88 +397,96 @@ def missing_host_key(self, client: paramiko.SSHClient, hostname: str, key: param class Connection(ConnectionBase): - """ SSH based connections (paramiko) to WSL """ + """SSH based connections (paramiko) to WSL""" - transport = 'community.general.wsl' + transport = "community.general.wsl" _log_channel: str | None = None - def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any): + def __init__( + self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any + ): super().__init__(play_context, new_stdin, *args, **kwargs) def _set_log_channel(self, name: str) -> None: - """ Mimic paramiko.SSHClient.set_log_channel """ + """Mimic paramiko.SSHClient.set_log_channel""" self._log_channel = name def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]: - proxy_command = self.get_option('proxy_command') or None + proxy_command = self.get_option("proxy_command") or None sock_kwarg = {} if proxy_command: replacers: t.Dict[str, str] = { - '%h': self.get_option('remote_addr'), - '%p': str(port), - '%r': self.get_option('remote_user') + "%h": self.get_option("remote_addr"), + "%p": str(port), + "%r": self.get_option("remote_user"), } for find, replace in replacers.items(): proxy_command = proxy_command.replace(find, replace) try: - sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} - display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr')) + sock_kwarg = {"sock": paramiko.ProxyCommand(proxy_command)} + display.vvv( + f"CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}", host=self.get_option("remote_addr") + ) except AttributeError: - display.warning('Paramiko ProxyCommand support unavailable. ' - 'Please upgrade to Paramiko 1.9.0 or newer. ' - 'Not using configured ProxyCommand') + display.warning( + "Paramiko ProxyCommand support unavailable. " + "Please upgrade to Paramiko 1.9.0 or newer. " + "Not using configured ProxyCommand" + ) return sock_kwarg def _connect(self) -> Connection: - """ activates the connection object """ + """activates the connection object""" if PARAMIKO_IMPORT_ERR is not None: - raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}') + raise AnsibleError(f"paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}") - port = self.get_option('port') - display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}', - host=self.get_option('remote_addr')) + port = self.get_option("port") + display.vvv( + f"ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option('remote_user')} on PORT {to_text(port)} TO {self.get_option('remote_addr')}", + host=self.get_option("remote_addr"), + ) ssh = paramiko.SSHClient() # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently # is keeping or omitting rsa-sha2 algorithms # default_keys: t.Tuple[str] = () - paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) - paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) - use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + paramiko_preferred_pubkeys = getattr(paramiko.Transport, "_preferred_pubkeys", ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, "_preferred_keys", ()) + use_rsa_sha2_algorithms = self.get_option("use_rsa_sha2_algorithms") disabled_algorithms: t.Dict[str, t.Iterable[str]] = {} if not use_rsa_sha2_algorithms: if paramiko_preferred_pubkeys: - disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + disabled_algorithms["pubkeys"] = tuple(a for a in paramiko_preferred_pubkeys if "rsa-sha2" in a) if paramiko_preferred_hostkeys: - disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + disabled_algorithms["keys"] = tuple(a for a in paramiko_preferred_hostkeys if "rsa-sha2" in a) # override paramiko's default logger name if self._log_channel is not None: ssh.set_log_channel(self._log_channel) - self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file')) + self.keyfile = os.path.expanduser(self.get_option("user_known_hosts_file")) - if self.get_option('host_key_checking'): - for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile): + if self.get_option("host_key_checking"): + for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts", self.keyfile): try: ssh.load_system_host_keys(ssh_known_hosts) break except IOError: pass # file was not found, but not required to function except paramiko.hostkeys.InvalidHostKey as e: - raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + raise AnsibleConnectionFailure(f"Invalid host key: {to_text(e.line)}") try: ssh.load_system_host_keys() except paramiko.hostkeys.InvalidHostKey as e: - raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + raise AnsibleConnectionFailure(f"Invalid host key: {to_text(e.line)}") ssh_connect_kwargs = self._parse_proxy_command(port) ssh.set_missing_host_key_policy(MyAddPolicy(self)) - conn_password = self.get_option('password') + conn_password = self.get_option("password") allow_agent = True if conn_password is not None: @@ -487,42 +494,42 @@ def _connect(self) -> Connection: try: key_filename = None - if self.get_option('private_key_file'): - key_filename = os.path.expanduser(self.get_option('private_key_file')) + if self.get_option("private_key_file"): + key_filename = os.path.expanduser(self.get_option("private_key_file")) # paramiko 2.2 introduced auth_timeout parameter - if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): - ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout') + if LooseVersion(paramiko.__version__) >= LooseVersion("2.2.0"): + ssh_connect_kwargs["auth_timeout"] = self.get_option("timeout") # paramiko 1.15 introduced banner timeout parameter - if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): - ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + if LooseVersion(paramiko.__version__) >= LooseVersion("1.15.0"): + ssh_connect_kwargs["banner_timeout"] = self.get_option("banner_timeout") ssh.connect( - self.get_option('remote_addr').lower(), - username=self.get_option('remote_user'), + self.get_option("remote_addr").lower(), + username=self.get_option("remote_user"), allow_agent=allow_agent, - look_for_keys=self.get_option('look_for_keys'), + look_for_keys=self.get_option("look_for_keys"), key_filename=key_filename, password=conn_password, - timeout=self.get_option('timeout'), + timeout=self.get_option("timeout"), port=port, disabled_algorithms=disabled_algorithms, **ssh_connect_kwargs, ) except paramiko.ssh_exception.BadHostKeyException as e: - raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}') + raise AnsibleConnectionFailure(f"host key mismatch for {to_text(e.hostname)}") except paramiko.ssh_exception.AuthenticationException as e: - msg = f'Failed to authenticate: {e}' + msg = f"Failed to authenticate: {e}" raise AnsibleAuthenticationFailure(msg) except Exception as e: msg = to_text(e) - if 'PID check failed' in msg: - raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible') - elif 'Private key file is encrypted' in msg: + if "PID check failed" in msg: + raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") + elif "Private key file is encrypted" in msg: msg = ( - f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' - f'{msg}\nTo connect as a different user, use -u .' + f"ssh {self.get_option('remote_user')}@{self.get_options('remote_addr')}:{port} : " + f"{msg}\nTo connect as a different user, use -u ." ) raise AnsibleConnectionFailure(msg) else: @@ -534,7 +541,7 @@ def _connect(self) -> Connection: def _any_keys_added(self) -> bool: for hostname, keys in self.ssh._host_keys.items(): # type: ignore[attr-defined] # TODO: figure out what _host_keys is! for keytype, key in keys.items(): - added_this_time = getattr(key, '_added_by_ansible_this_time', False) + added_this_time = getattr(key, "_added_by_ansible_this_time", False) if added_this_time: return True return False @@ -548,42 +555,42 @@ def _save_ssh_host_keys(self, filename: str) -> None: if not self._any_keys_added(): return - path = os.path.expanduser('~/.ssh') + path = os.path.expanduser("~/.ssh") makedirs_safe(path) - with open(filename, 'w') as f: + with open(filename, "w") as f: for hostname, keys in self.ssh._host_keys.items(): # type: ignore[attr-defined] # TODO: figure out what _host_keys is! for keytype, key in keys.items(): # was f.write - added_this_time = getattr(key, '_added_by_ansible_this_time', False) + added_this_time = getattr(key, "_added_by_ansible_this_time", False) if not added_this_time: - f.write(f'{hostname} {keytype} {key.get_base64()}\n') + f.write(f"{hostname} {keytype} {key.get_base64()}\n") for hostname, keys in self.ssh._host_keys.items(): # type: ignore[attr-defined] # TODO: figure out what _host_keys is! for keytype, key in keys.items(): - added_this_time = getattr(key, '_added_by_ansible_this_time', False) + added_this_time = getattr(key, "_added_by_ansible_this_time", False) if added_this_time: - f.write(f'{hostname} {keytype} {key.get_base64()}\n') + f.write(f"{hostname} {keytype} {key.get_base64()}\n") def _build_wsl_command(self, cmd: str) -> str: - wsl_distribution = self.get_option('wsl_distribution') - become = self.get_option('become') - become_user = self.get_option('become_user') + wsl_distribution = self.get_option("wsl_distribution") + become = self.get_option("become") + become_user = self.get_option("become_user") if become and become_user: wsl_user = become_user else: - wsl_user = self.get_option('wsl_user') - args = ['wsl.exe', '--distribution', wsl_distribution] + wsl_user = self.get_option("wsl_user") + args = ["wsl.exe", "--distribution", wsl_distribution] if wsl_user: - args.extend(['--user', wsl_user]) - args.extend(['--']) + args.extend(["--user", wsl_user]) + args.extend(["--"]) args.extend(shlex.split(cmd)) - if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'): + if os.getenv("_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8"): return shlex.join(args) - return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 + return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]: - """ run a command on inside a WSL distribution """ + """run a command on inside a WSL distribution""" cmd = self._build_wsl_command(cmd) @@ -599,18 +606,18 @@ def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = chan = transport.open_session() except Exception as e: text_e = to_text(e) - msg = 'Failed to open session' + msg = "Failed to open session" if text_e: - msg += f': {text_e}' + msg += f": {text_e}" raise AnsibleConnectionFailure(to_native(msg)) - display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr')) + display.vvv(f"EXEC {cmd}", host=self.get_option("remote_addr")) - cmd = to_bytes(cmd, errors='surrogate_or_strict') + cmd = to_bytes(cmd, errors="surrogate_or_strict") - no_prompt_out = b'' - no_prompt_err = b'' - become_output = b'' + no_prompt_out = b"" + no_prompt_err = b"" + become_output = b"" try: chan.exec_command(cmd) @@ -618,14 +625,14 @@ def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = password_prompt = False become_success = False while not (become_success or password_prompt): - display.debug('Waiting for Privilege Escalation input') + display.debug("Waiting for Privilege Escalation input") chunk = chan.recv(bufsize) - display.debug(f'chunk is: {to_text(chunk)}') + display.debug(f"chunk is: {to_text(chunk)}") if not chunk: - if b'unknown user' in become_output: - n_become_user = to_native(self.become.get_option('become_user')) - raise AnsibleError(f'user {n_become_user} does not exist') + if b"unknown user" in become_output: + n_become_user = to_native(self.become.get_option("become_user")) + raise AnsibleError(f"user {n_become_user} does not exist") else: break # raise AnsibleError('ssh connection closed waiting for password prompt') @@ -643,80 +650,78 @@ def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = if password_prompt: if self.become: - become_pass = self.become.get_option('become_pass') - chan.sendall(to_bytes(f"{become_pass}\n", errors='surrogate_or_strict')) + become_pass = self.become.get_option("become_pass") + chan.sendall(to_bytes(f"{become_pass}\n", errors="surrogate_or_strict")) else: - raise AnsibleError('A password is required but none was supplied') + raise AnsibleError("A password is required but none was supplied") else: no_prompt_out += become_output no_prompt_err += become_output if in_data: for i in range(0, len(in_data), bufsize): - chan.send(in_data[i:i + bufsize]) + chan.send(in_data[i : i + bufsize]) chan.shutdown_write() - elif in_data == b'': + elif in_data == b"": chan.shutdown_write() except socket.timeout: - raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}') + raise AnsibleError(f"ssh timed out waiting for privilege escalation.\n{to_text(become_output)}") - stdout = b''.join(chan.makefile('rb', bufsize)) - stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + stdout = b"".join(chan.makefile("rb", bufsize)) + stderr = b"".join(chan.makefile_stderr("rb", bufsize)) returncode = chan.recv_exit_status() # NB the full english error message is: # 'wsl.exe' is not recognized as an internal or external command, # operable program or batch file. - if "'wsl.exe' is not recognized" in stderr.decode('utf-8'): - raise AnsibleError( - f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}') + if "'wsl.exe' is not recognized" in stderr.decode("utf-8"): + raise AnsibleError(f"wsl.exe not found in path of host: {to_text(self.get_option('remote_addr'))}") return (returncode, no_prompt_out + stdout, no_prompt_out + stderr) def put_file(self, in_path: str, out_path: str) -> None: - """ transfer a file from local to remote """ + """transfer a file from local to remote""" - display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr')) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr")) try: - with open(in_path, 'rb') as f: + with open(in_path, "rb") as f: data = f.read() returncode, stdout, stderr = self.exec_command( f"{self._shell.executable} -c {self._shell.quote(f'cat > {out_path}')}", in_data=data, - sudoable=False) + sudoable=False, + ) if returncode != 0: - if 'cat: not found' in stderr.decode('utf-8'): + if "cat: not found" in stderr.decode("utf-8"): raise AnsibleError( - f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') - raise AnsibleError( - f'{to_text(stdout)}\n{to_text(stderr)}') + f"cat not found in path of WSL distribution: {to_text(self.get_option('wsl_distribution'))}" + ) + raise AnsibleError(f"{to_text(stdout)}\n{to_text(stderr)}") except Exception as e: - raise AnsibleError( - f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}') + raise AnsibleError(f"error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}") def fetch_file(self, in_path: str, out_path: str) -> None: - """ save a remote file to the specified path """ + """save a remote file to the specified path""" - display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr')) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")) try: returncode, stdout, stderr = self.exec_command( - f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}", - sudoable=False) + f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}", sudoable=False + ) if returncode != 0: - if 'cat: not found' in stderr.decode('utf-8'): + if "cat: not found" in stderr.decode("utf-8"): raise AnsibleError( - f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') - raise AnsibleError( - f'{to_text(stdout)}\n{to_text(stderr)}') - with open(out_path, 'wb') as f: + f"cat not found in path of WSL distribution: {to_text(self.get_option('wsl_distribution'))}" + ) + raise AnsibleError(f"{to_text(stdout)}\n{to_text(stderr)}") + with open(out_path, "wb") as f: f.write(stdout) except Exception as e: - raise AnsibleError( - f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}') + raise AnsibleError(f"error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}") def reset(self) -> None: - """ reset the connection """ + """reset the connection""" if not self._connected: return @@ -724,9 +729,9 @@ def reset(self) -> None: self._connect() def close(self) -> None: - """ terminate the connection """ + """terminate the connection""" - if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + if self.get_option("host_key_checking") and self.get_option("record_host_keys") and self._any_keys_added(): # add any new SSH host keys -- warning -- this could be slow # (This doesn't acquire the connection lock because it needs # to exclude only other known_hosts writers, not connections @@ -736,7 +741,7 @@ def close(self) -> None: makedirs_safe(dirname) tmp_keyfile_name = None try: - with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')): + with FileLock().lock_file(lockfile, dirname, self.get_option("lock_file_timeout")): # just in case any were added recently self.ssh.load_system_host_keys() @@ -769,14 +774,14 @@ def close(self) -> None: os.rename(tmp_keyfile_name, self.keyfile) except LockTimeout: raise AnsibleError( - f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s') + f"writing lock file for {self.keyfile} ran in to the timeout of {self.get_option('lock_file_timeout')}s" + ) except paramiko.hostkeys.InvalidHostKey as e: - raise AnsibleConnectionFailure(f'Invalid host key: {e.line}') + raise AnsibleConnectionFailure(f"Invalid host key: {e.line}") except Exception as e: # unable to save keys, including scenario when key was invalid # and caught earlier - raise AnsibleError( - f'error occurred while writing SSH host keys!\n{to_text(e)}') + raise AnsibleError(f"error occurred while writing SSH host keys!\n{to_text(e)}") finally: if tmp_keyfile_name is not None: pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True) diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 5f6fb154794..46501765e3b 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -42,9 +42,9 @@ class Connection(ConnectionBase): - """ Local zone based connections """ + """Local zone based connections""" - transport = 'community.general.zone' + transport = "community.general.zone" has_pipelining = True has_tty = False @@ -56,8 +56,8 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): if os.geteuid() != 0: raise AnsibleError("zone connection requires running as root") - self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm')) - self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) + self.zoneadm_cmd = to_bytes(self._search_executable("zoneadm")) + self.zlogin_cmd = to_bytes(self._search_executable("zlogin")) if self.zone not in self.list_zones(): raise AnsibleError(f"incorrect zone name {self.zone}") @@ -70,15 +70,15 @@ def _search_executable(executable): raise AnsibleError(f"{executable} command not found in PATH") def list_zones(self): - process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen( + [self.zoneadm_cmd, "list", "-ip"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) zones = [] for line in process.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared - s = line.split(':') - if s[1] != 'global': + s = line.split(":") + if s[1] != "global": zones.append(s[1]) return zones @@ -86,23 +86,26 @@ def list_zones(self): def get_zone_path(self): # solaris10vm# zoneadm -z cswbuild list -p # -:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared - process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen( + [self.zoneadm_cmd, "-z", to_bytes(self.zone), "list", "-p"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) # stdout, stderr = p.communicate() - path = process.stdout.readlines()[0].split(':')[3] + path = process.stdout.readlines()[0].split(":")[3] return f"{path}/root" def _connect(self): - """ connect to the zone; nothing to do here """ + """connect to the zone; nothing to do here""" super()._connect() if not self._connected: display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - """ run a command on the zone. This is only needed for implementing + """run a command on the zone. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. @@ -116,13 +119,12 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): local_cmd = map(to_bytes, local_cmd) display.vvv(f"EXEC {local_cmd}", host=self.zone) - p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p def exec_command(self, cmd, in_data=None, sudoable=False): - """ run a command on the zone """ + """run a command on the zone""" super().exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) @@ -131,33 +133,33 @@ def exec_command(self, cmd, in_data=None, sudoable=False): return p.returncode, stdout, stderr def _prefix_login_path(self, remote_path): - """ Make sure that we put files into a standard path + """Make sure that we put files into a standard path - If a path is relative, then we need to choose where to put it. - ssh chooses $HOME but we aren't guaranteed that a home dir will - exist in any given chroot. So for now we're choosing "/" instead. - This also happens to be the former default. + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. - Can revisit using $HOME instead if it is a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - """ transfer a file from local to zone """ + """transfer a file from local to zone""" super().put_file(in_path, out_path) display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone) out_path = shlex_quote(self._prefix_login_path(out_path)) try: - with open(in_path, 'rb') as in_file: + with open(in_path, "rb") as in_file: if not os.fstat(in_file.fileno()).st_size: - count = ' count=0' + count = " count=0" else: - count = '' + count = "" try: - p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) + p = self._buffered_exec_command(f"dd of={out_path} bs={BUFSIZE}{count}", stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: @@ -171,17 +173,17 @@ def put_file(self, in_path, out_path): raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): - """ fetch a file from zone to local """ + """fetch a file from zone to local""" super().fetch_file(in_path, out_path) display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') + p = self._buffered_exec_command(f"dd if={in_path} bs={BUFSIZE}") except OSError: raise AnsibleError("zone connection requires dd command in the zone") - with open(out_path, 'wb+') as out_file: + with open(out_path, "wb+") as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: @@ -195,6 +197,6 @@ def fetch_file(self, in_path, out_path): raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): - """ terminate the connection; nothing to do here """ + """terminate the connection; nothing to do here""" super().close() self._connected = False diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py index 87ed982cd7a..204a40ac4cd 100644 --- a/plugins/doc_fragments/alicloud.py +++ b/plugins/doc_fragments/alicloud.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Alicloud only documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py index 56af8d75c89..2a864f031e3 100644 --- a/plugins/doc_fragments/attributes.py +++ b/plugins/doc_fragments/attributes.py @@ -1,4 +1,3 @@ - # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: {} @@ -27,7 +25,7 @@ class ModuleDocFragment: """ # Should be used together with the standard fragment - INFO_MODULE = r''' + INFO_MODULE = r""" options: {} attributes: check_mode: @@ -38,7 +36,7 @@ class ModuleDocFragment: support: N/A details: - This action does not modify state. -''' +""" CONN = r""" options: {} @@ -59,7 +57,7 @@ class ModuleDocFragment: """ # Should be used together with the standard fragment and the FACTS fragment - FACTS_MODULE = r''' + FACTS_MODULE = r""" options: {} attributes: check_mode: @@ -72,7 +70,7 @@ class ModuleDocFragment: - This action does not modify state. facts: support: full -''' +""" FILES = r""" options: {} diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py index 4af17e96a8e..cf95b625dad 100644 --- a/plugins/doc_fragments/auth_basic.py +++ b/plugins/doc_fragments/auth_basic.py @@ -6,7 +6,6 @@ class ModuleDocFragment: - # Standard files documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py index a27ee13dc9b..0a0c6551f19 100644 --- a/plugins/doc_fragments/bitbucket.py +++ b/plugins/doc_fragments/bitbucket.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py index 895c31ecc6c..91ece276191 100644 --- a/plugins/doc_fragments/dimensiondata.py +++ b/plugins/doc_fragments/dimensiondata.py @@ -16,7 +16,6 @@ class ModuleDocFragment: - # Dimension Data doc fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py index 25bca02d687..933c7e9fbf8 100644 --- a/plugins/doc_fragments/dimensiondata_wait.py +++ b/plugins/doc_fragments/dimensiondata_wait.py @@ -16,7 +16,6 @@ class ModuleDocFragment: - # Dimension Data ("wait-for-completion" parameters) doc fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py index 5af0e6eb973..a491a29742b 100644 --- a/plugins/doc_fragments/emc.py +++ b/plugins/doc_fragments/emc.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Luca Lorenzetto (@remix_tj) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,9 +6,8 @@ class ModuleDocFragment: - # Documentation fragment for VNX (emc_vnx) - EMC_VNX = r''' + EMC_VNX = r""" options: sp_address: description: @@ -31,4 +29,4 @@ class ModuleDocFragment: - storops (0.5.10 or greater). Install using C(pip install storops). notes: - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform. -''' +""" diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py index b78b6ac2c6b..15658a39368 100644 --- a/plugins/doc_fragments/gitlab.py +++ b/plugins/doc_fragments/gitlab.py @@ -6,7 +6,6 @@ class ModuleDocFragment: - # Standard files documentation fragment DOCUMENTATION = r""" requirements: diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index da415098497..f508e035cda 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -6,7 +6,6 @@ class ModuleDocFragment: - # HPE 3PAR doc fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index 2415c8d0d98..e89869c8ba8 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -6,7 +6,6 @@ class ModuleDocFragment: - # HWC doc fragment. DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py index 950e8f443f3..05169c8979d 100644 --- a/plugins/doc_fragments/ibm_storage.py +++ b/plugins/doc_fragments/ibm_storage.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -8,7 +7,6 @@ class ModuleDocFragment: - # ibm_storage documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py index 7fe50c29dc6..db48fc99238 100644 --- a/plugins/doc_fragments/influxdb.py +++ b/plugins/doc_fragments/influxdb.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017, Ansible Project # Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py index d5ace8d9212..9ff7d5cfba2 100644 --- a/plugins/doc_fragments/ipa.py +++ b/plugins/doc_fragments/ipa.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017-18, Ansible Project # Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index dd598f8cd9c..e64e3ea8fea 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017, Eike Frost # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py index abaa92246c1..b666c12c181 100644 --- a/plugins/doc_fragments/ldap.py +++ b/plugins/doc_fragments/ldap.py @@ -1,4 +1,3 @@ - # Copyright (c) 2016, Peter Sagerson # Copyright (c) 2016, Jiri Tyr # Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py index f542e283fd9..0ae60db0f1b 100644 --- a/plugins/doc_fragments/lxca_common.py +++ b/plugins/doc_fragments/lxca_common.py @@ -1,4 +1,3 @@ - # Copyright (C) 2017 Lenovo, Inc. # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py index ecbd9de4e22..d88237ae0ea 100644 --- a/plugins/doc_fragments/manageiq.py +++ b/plugins/doc_fragments/manageiq.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017, Daniel Korn # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard ManageIQ documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py index 170e1104552..02ae34eb127 100644 --- a/plugins/doc_fragments/nomad.py +++ b/plugins/doc_fragments/nomad.py @@ -1,4 +1,3 @@ - # Copyright (c) 2020 FERREIRA Christophe # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard files documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py index c57f648945a..ad2777da327 100644 --- a/plugins/doc_fragments/onepassword.py +++ b/plugins/doc_fragments/onepassword.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index 62bbe7dd1a2..b14a38ce0b6 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -7,7 +7,6 @@ class ModuleDocFragment: - # OneView doc fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py index a093be1f44b..a9cbafe7b9e 100644 --- a/plugins/doc_fragments/online.py +++ b/plugins/doc_fragments/online.py @@ -6,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py index 525e0345563..a35cf1aaf79 100644 --- a/plugins/doc_fragments/opennebula.py +++ b/plugins/doc_fragments/opennebula.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, www.privaz.io Valletech AB # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index c2f7269fee5..de78d26d769 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -1,4 +1,3 @@ - # Copyright (c) 2015, Peter Sprygada # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard files documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py index 9da39651809..0edcf421010 100644 --- a/plugins/doc_fragments/pipx.py +++ b/plugins/doc_fragments/pipx.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py index 24ab62c7994..5e7ada1e9fb 100644 --- a/plugins/doc_fragments/pritunl.py +++ b/plugins/doc_fragments/pritunl.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Florian Dambrine # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - DOCUMENTATION = r""" options: pritunl_url: diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py index 25fe748e4ad..de26ffa2a66 100644 --- a/plugins/doc_fragments/redfish.py +++ b/plugins/doc_fragments/redfish.py @@ -1,4 +1,3 @@ - # Copyright (c) 2025 Ansible community # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index 6784c867083..ded71a7bc3e 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Andreas Botzner # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py index 8642a49ffcc..a9591289964 100644 --- a/plugins/doc_fragments/rundeck.py +++ b/plugins/doc_fragments/rundeck.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Phillipe Smith # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard files documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py index bfef59edaea..1db61fbf020 100644 --- a/plugins/doc_fragments/scaleway.py +++ b/plugins/doc_fragments/scaleway.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Yanis Guenane # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py index 009a300f70e..52061450369 100644 --- a/plugins/doc_fragments/scaleway_waitable_resource.py +++ b/plugins/doc_fragments/scaleway_waitable_resource.py @@ -1,4 +1,3 @@ - # Copyright (c) 2022, Guillaume MARTINEZ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,6 @@ class ModuleDocFragment: - # Standard documentation fragment DOCUMENTATION = r""" options: diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index fa52dc87a84..e9c2c4dcde9 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 7d07183bb7e..4c5cd6e5437 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -7,9 +7,8 @@ class ModuleDocFragment: - # Documentation fragment for Vexata VX100 series - VX100 = r''' + VX100 = r""" options: array: description: @@ -41,4 +40,4 @@ class ModuleDocFragment: - vexatapi >= 0.0.1 - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if user and password arguments are not passed to the module directly. -''' +""" diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index 89c6b0548cd..f1abe0be960 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Bojan Vitnik # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py index bd2f5ae932f..8b4b70015b1 100644 --- a/plugins/filter/accumulate.py +++ b/plugins/filter/accumulate.py @@ -49,14 +49,13 @@ def list_accumulate(sequence): if not isinstance(sequence, Sequence): - raise AnsibleFilterError(f'Invalid value type ({type(sequence)}) for accumulate ({sequence!r})') + raise AnsibleFilterError(f"Invalid value type ({type(sequence)}) for accumulate ({sequence!r})") return accumulate(sequence) class FilterModule: - def filters(self): return { - 'accumulate': list_accumulate, + "accumulate": list_accumulate, } diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py index 7830baa5b55..e903859ead7 100644 --- a/plugins/filter/counter.py +++ b/plugins/filter/counter.py @@ -40,9 +40,11 @@ def counter(sequence): - ''' Count elements in a sequence. Returns dict with count result. ''' + """Count elements in a sequence. Returns dict with count result.""" if not isinstance(sequence, Sequence): - raise AnsibleFilterError(f'Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}') + raise AnsibleFilterError( + f"Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}" + ) try: result = dict(Counter(sequence)) @@ -54,11 +56,11 @@ def counter(sequence): class FilterModule: - ''' Ansible counter jinja2 filters ''' + """Ansible counter jinja2 filters""" def filters(self): filters = { - 'counter': counter, + "counter": counter, } return filters diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py index 11a6e774959..76922d8562f 100644 --- a/plugins/filter/crc32.py +++ b/plugins/filter/crc32.py @@ -9,6 +9,7 @@ try: from zlib import crc32 + HAS_ZLIB = True except ImportError: HAS_ZLIB = False @@ -45,17 +46,17 @@ def crc32s(value): if not is_string(value): - raise AnsibleFilterError(f'Invalid value type ({type(value)}) for crc32 ({value!r})') + raise AnsibleFilterError(f"Invalid value type ({type(value)}) for crc32 ({value!r})") if not HAS_ZLIB: - raise AnsibleFilterError('Failed to import zlib module') + raise AnsibleFilterError("Failed to import zlib module") - data = to_bytes(value, errors='surrogate_or_strict') - return f"{crc32(data) & 0xffffffff:x}" + data = to_bytes(value, errors="surrogate_or_strict") + return f"{crc32(data) & 0xFFFFFFFF:x}" class FilterModule: def filters(self): return { - 'crc32': crc32s, + "crc32": crc32s, } diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 193871d5854..fd1f6703a7e 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -61,17 +60,17 @@ def dict_filter(sequence): - '''Convert a list of tuples to a dictionary. + """Convert a list of tuples to a dictionary. Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}`` - ''' + """ return dict(sequence) class FilterModule: - '''Ansible jinja2 filters''' + """Ansible jinja2 filters""" def filters(self): return { - 'dict': dict_filter, + "dict": dict_filter, } diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index a8f418d38f4..1b6501714d8 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -38,7 +38,7 @@ def dict_kv(value, key): - '''Return a dictionary with a single key-value pair + """Return a dictionary with a single key-value pair Example: @@ -89,14 +89,12 @@ def dict_kv(value, key): } ] } - ''' + """ return {key: value} class FilterModule: - ''' Query filter ''' + """Query filter""" def filters(self): - return { - 'dict_kv': dict_kv - } + return {"dict_kv": dict_kv} diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index b57a5a07a4a..01329ec6dd9 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # Copyright (c) 2018, Dag Wieers (@dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -80,13 +79,16 @@ from ansible.errors import AnsibleFilterError -from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, - DialectNotAvailableError, - CustomDialectFailureError) - +from ansible_collections.community.general.plugins.module_utils.csv import ( + initialize_dialect, + read_csv, + CSVError, + DialectNotAvailableError, + CustomDialectFailureError, +) -def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None): +def from_csv(data, dialect="excel", fieldnames=None, delimiter=None, skipinitialspace=None, strict=None): dialect_params = { "delimiter": delimiter, "skipinitialspace": skipinitialspace, @@ -112,8 +114,5 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial class FilterModule: - def filters(self): - return { - 'from_csv': from_csv - } + return {"from_csv": from_csv} diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py index 52112d56d4d..12dc3e8d9a4 100644 --- a/plugins/filter/from_ini.py +++ b/plugins/filter/from_ini.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Steffen Scheib # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -51,7 +50,7 @@ class IniParser(ConfigParser): - ''' Implements a configparser which is able to return a dict ''' + """Implements a configparser which is able to return a dict""" def __init__(self): super().__init__(interpolation=None) @@ -61,35 +60,32 @@ def as_dict(self): d = dict(self._sections) for k in d: d[k] = dict(self._defaults, **d[k]) - d[k].pop('__name__', None) + d[k].pop("__name__", None) if self._defaults: - d['DEFAULT'] = dict(self._defaults) + d["DEFAULT"] = dict(self._defaults) return d def from_ini(obj): - ''' Read the given string as INI file and return a dict ''' + """Read the given string as INI file and return a dict""" if not isinstance(obj, str): - raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}') + raise AnsibleFilterError(f"from_ini requires a str, got {type(obj)}") parser = IniParser() try: parser.read_file(StringIO(obj)) except Exception as ex: - raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex) + raise AnsibleFilterError(f"from_ini failed to parse given string: {ex}", orig_exc=ex) return parser.as_dict() class FilterModule: - ''' Query filter ''' + """Query filter""" def filters(self): - - return { - 'from_ini': from_ini - } + return {"from_ini": from_ini} diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index fc174da0d95..f73fd52b749 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -57,33 +57,33 @@ def groupby_as_dict(sequence, attribute): - ''' + """ Given a sequence of dictionaries and an attribute name, returns a dictionary mapping the value of this attribute to the dictionary. If multiple dictionaries in the sequence have the same value for this attribute, the filter will fail. - ''' + """ if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Input is not a sequence') + raise AnsibleFilterError("Input is not a sequence") result = dict() for list_index, element in enumerate(sequence): if not isinstance(element, Mapping): - raise AnsibleFilterError(f'Sequence element #{list_index} is not a mapping') + raise AnsibleFilterError(f"Sequence element #{list_index} is not a mapping") if attribute not in element: - raise AnsibleFilterError(f'Attribute not contained in element #{list_index} of sequence') + raise AnsibleFilterError(f"Attribute not contained in element #{list_index} of sequence") result_index = element[attribute] if result_index in result: - raise AnsibleFilterError(f'Multiple sequence entries have attribute value {result_index!r}') + raise AnsibleFilterError(f"Multiple sequence entries have attribute value {result_index!r}") result[result_index] = element return result class FilterModule: - ''' Ansible list filters ''' + """Ansible list filters""" def filters(self): return { - 'groupby_as_dict': groupby_as_dict, + "groupby_as_dict": groupby_as_dict, } diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index 0f76505b2aa..d67b8579562 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -20,6 +19,7 @@ try: from hashids import Hashids + HAS_HASHIDS = True except ImportError: HAS_HASHIDS = False @@ -35,27 +35,21 @@ def initialize_hashids(**kwargs): return Hashids(**params) except TypeError as e: raise AnsibleFilterError( - "The provided parameters %s are invalid: %s" % ( - ', '.join(["%s=%s" % (k, v) for k, v in params.items()]), - to_native(e) - ) + "The provided parameters %s are invalid: %s" + % (", ".join(["%s=%s" % (k, v) for k, v in params.items()]), to_native(e)) ) def hashids_encode(nums, salt=None, alphabet=None, min_length=None): """Generates a YouTube-like hash from a sequence of ints - :nums: Sequence of one or more ints to hash - :salt: String to use as salt when hashing - :alphabet: String of 16 or more unique characters to produce a hash - :min_length: Minimum length of hash produced + :nums: Sequence of one or more ints to hash + :salt: String to use as salt when hashing + :alphabet: String of 16 or more unique characters to produce a hash + :min_length: Minimum length of hash produced """ - hashids = initialize_hashids( - salt=salt, - alphabet=alphabet, - min_length=min_length - ) + hashids = initialize_hashids(salt=salt, alphabet=alphabet, min_length=min_length) # Handles the case where a single int is not encapsulated in a list or tuple. # User convenience seems preferable to strict typing in this case @@ -74,25 +68,20 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None): def hashids_decode(hashid, salt=None, alphabet=None, min_length=None): """Decodes a YouTube-like hash to a sequence of ints - :hashid: Hash string to decode - :salt: String to use as salt when hashing - :alphabet: String of 16 or more unique characters to produce a hash - :min_length: Minimum length of hash produced + :hashid: Hash string to decode + :salt: String to use as salt when hashing + :alphabet: String of 16 or more unique characters to produce a hash + :min_length: Minimum length of hash produced """ - hashids = initialize_hashids( - salt=salt, - alphabet=alphabet, - min_length=min_length - ) + hashids = initialize_hashids(salt=salt, alphabet=alphabet, min_length=min_length) nums = hashids.decode(hashid) return list(nums) class FilterModule: - def filters(self): return { - 'hashids_encode': hashids_encode, - 'hashids_decode': hashids_decode, + "hashids_encode": hashids_encode, + "hashids_decode": hashids_decode, } diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index d4edb16e4bf..05022baf149 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -79,6 +79,7 @@ try: import jc + HAS_LIB = True except ImportError: HAS_LIB = False @@ -133,26 +134,28 @@ def jc_filter(data, parser, quiet=True, raw=False): """ if not HAS_LIB: - raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter') + raise AnsibleError( + 'You need to install "jc" as a Python library on the Ansible controller prior to running jc filter' + ) try: # new API (jc v1.18.0 and higher) allows use of plugin parsers - if hasattr(jc, 'parse'): + if hasattr(jc, "parse"): return jc.parse(parser, data, quiet=quiet, raw=raw) # old API (jc v1.17.7 and lower) else: - jc_parser = importlib.import_module(f'jc.parsers.{parser}') + jc_parser = importlib.import_module(f"jc.parsers.{parser}") return jc_parser.parse(data, quiet=quiet, raw=raw) except Exception as e: - raise AnsibleFilterError(f'Error in jc filter plugin: {e}') + raise AnsibleFilterError(f"Error in jc filter plugin: {e}") class FilterModule: - ''' Query filter ''' + """Query filter""" def filters(self): return { - 'jc': jc_filter, + "jc": jc_filter, } diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py index 59d6caddb32..66536331537 100644 --- a/plugins/filter/json_patch.py +++ b/plugins/filter/json_patch.py @@ -35,39 +35,28 @@ def check_json_object(self, filter_name: str, object_name: str, inp: Any): try: return loads(inp) except Exception as e: - raise AnsibleFilterError( - f"{filter_name}: could not decode JSON from {object_name}: {e}" - ) from e + raise AnsibleFilterError(f"{filter_name}: could not decode JSON from {object_name}: {e}") from e if not isinstance(inp, (list, dict)): - raise AnsibleFilterError( - f"{filter_name}: {object_name} is not dictionary, list or string" - ) + raise AnsibleFilterError(f"{filter_name}: {object_name} is not dictionary, list or string") return inp def check_patch_arguments(self, filter_name: str, args: dict): - if "op" not in args or not isinstance(args["op"], str): raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string") if args["op"] not in OPERATIONS_AVAILABLE: - raise AnsibleFilterError( - f"{filter_name}: unsupported 'op' argument: {args['op']}" - ) + raise AnsibleFilterError(f"{filter_name}: unsupported 'op' argument: {args['op']}") if "path" not in args or not isinstance(args["path"], str): raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string") if args["op"] in OPERATIONS_NEEDING_FROM: if "from" not in args: - raise AnsibleFilterError( - f"{filter_name}: 'from' argument missing for '{args['op']}' operation" - ) + raise AnsibleFilterError(f"{filter_name}: 'from' argument missing for '{args['op']}' operation") if not isinstance(args["from"], str): - raise AnsibleFilterError( - f"{filter_name}: 'from' argument is not a string" - ) + raise AnsibleFilterError(f"{filter_name}: 'from' argument is not a string") def json_patch( self, @@ -77,7 +66,6 @@ def json_patch( value: Any = None, **kwargs: dict, ) -> Any: - if not HAS_LIB: raise AnsibleFilterError( "You need to install 'jsonpatch' package prior to running 'json_patch' filter" @@ -88,9 +76,7 @@ def json_patch( fail_test = kwargs.pop("fail_test", False) if kwargs: - raise AnsibleFilterError( - f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}" - ) + raise AnsibleFilterError(f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}") if not isinstance(fail_test, bool): raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") @@ -109,9 +95,7 @@ def json_patch( result = jsonpatch.apply_patch(inp, [args]) except jsonpatch.JsonPatchTestFailed as e: if fail_test: - raise AnsibleFilterError( - f"json_patch: test operation failed: {e}" - ) from e + raise AnsibleFilterError(f"json_patch: test operation failed: {e}") from e else: pass except Exception as e: @@ -126,16 +110,13 @@ def json_patch_recipe( /, fail_test: bool = False, ) -> Any: - if not HAS_LIB: raise AnsibleFilterError( "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter" ) from JSONPATCH_IMPORT_ERROR if not isinstance(operations, list): - raise AnsibleFilterError( - "json_patch_recipe: 'operations' needs to be a list" - ) + raise AnsibleFilterError("json_patch_recipe: 'operations' needs to be a list") if not isinstance(fail_test, bool): raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") @@ -150,9 +131,7 @@ def json_patch_recipe( result = jsonpatch.apply_patch(inp, operations) except jsonpatch.JsonPatchTestFailed as e: if fail_test: - raise AnsibleFilterError( - f"json_patch_recipe: test operation failed: {e}" - ) from e + raise AnsibleFilterError(f"json_patch_recipe: test operation failed: {e}") from e else: pass except Exception as e: @@ -165,7 +144,6 @@ def json_diff( inp: Union[str, list, dict, bytes, bytearray], target: Union[str, list, dict, bytes, bytearray], ) -> list: - if not HAS_LIB: raise AnsibleFilterError( "You need to install 'jsonpatch' package prior to running 'json_diff' filter" diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 320258ed52a..9c98234c58b 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -109,44 +109,46 @@ try: import jmespath + HAS_LIB = True except ImportError: HAS_LIB = False def json_query(data, expr): - '''Query data using jmespath query language ( http://jmespath.org ). Example: + """Query data using jmespath query language ( http://jmespath.org ). Example: - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}" - ''' + """ if not HAS_LIB: - raise AnsibleError('You need to install "jmespath" prior to running ' - 'json_query filter') + raise AnsibleError('You need to install "jmespath" prior to running json_query filter') # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence # See issues https://github.com/ansible-collections/community.general/issues/320 # and https://github.com/ansible/ansible/issues/85600. - jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ( - 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr', + jmespath.functions.REVERSE_TYPES_MAP["string"] = jmespath.functions.REVERSE_TYPES_MAP["string"] + ( + "AnsibleUnicode", + "AnsibleUnsafeText", + "_AnsibleTaggedStr", ) - jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ( - 'AnsibleSequence', '_AnsibleLazyTemplateList', + jmespath.functions.REVERSE_TYPES_MAP["array"] = jmespath.functions.REVERSE_TYPES_MAP["array"] + ( + "AnsibleSequence", + "_AnsibleLazyTemplateList", ) - jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ( - 'AnsibleMapping', '_AnsibleLazyTemplateDict', + jmespath.functions.REVERSE_TYPES_MAP["object"] = jmespath.functions.REVERSE_TYPES_MAP["object"] + ( + "AnsibleMapping", + "_AnsibleLazyTemplateDict", ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: - raise AnsibleFilterError(f'JMESPathError in json_query filter plugin:\n{e}') + raise AnsibleFilterError(f"JMESPathError in json_query filter plugin:\n{e}") except Exception as e: # For older jmespath, we can get ValueError and TypeError without much info. - raise AnsibleFilterError(f'Error in jmespath.search in json_query filter plugin:\n{e}') + raise AnsibleFilterError(f"Error in jmespath.search in json_query filter plugin:\n{e}") class FilterModule: - ''' Query filter ''' + """Query filter""" def filters(self): - return { - 'json_query': json_query - } + return {"json_query": json_query} diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py index d1a0ab83199..a2170f973ec 100644 --- a/plugins/filter/keep_keys.py +++ b/plugins/filter/keep_keys.py @@ -101,10 +101,11 @@ from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, - _keys_filter_target_str) + _keys_filter_target_str, +) -def keep_keys(data, target=None, matching_parameter='equal'): +def keep_keys(data, target=None, matching_parameter="equal"): """keep specific keys from dictionaries in a list""" # test parameters @@ -112,16 +113,20 @@ def keep_keys(data, target=None, matching_parameter='equal'): # test and transform target tt = _keys_filter_target_str(target, matching_parameter) - if matching_parameter == 'equal': + if matching_parameter == "equal": + def keep_key(key): return key in tt - elif matching_parameter == 'starts_with': + elif matching_parameter == "starts_with": + def keep_key(key): return key.startswith(tt) - elif matching_parameter == 'ends_with': + elif matching_parameter == "ends_with": + def keep_key(key): return key.endswith(tt) - elif matching_parameter == 'regex': + elif matching_parameter == "regex": + def keep_key(key): return tt.match(key) is not None @@ -129,8 +134,7 @@ def keep_key(key): class FilterModule: - def filters(self): return { - 'keep_keys': keep_keys, + "keep_keys": keep_keys, } diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py index 9f5652078ff..0c6154075f4 100644 --- a/plugins/filter/lists.py +++ b/plugins/filter/lists.py @@ -32,7 +32,7 @@ def flatten_list(lst): result = [] for sublist in lst: if not is_sequence(sublist): - msg = ("All arguments must be lists. %s is %s") + msg = "All arguments must be lists. %s is %s" raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: if all(is_sequence(sub) for sub in sublist): @@ -45,13 +45,11 @@ def flatten_list(lst): def lists_union(*args, **kwargs): lists = args - flatten = kwargs.pop('flatten', False) + flatten = kwargs.pop("flatten", False) if kwargs: # Some unused kwargs remain - raise AnsibleFilterError( - f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}" - ) + raise AnsibleFilterError(f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}") if flatten: lists = flatten_list(args) @@ -74,13 +72,11 @@ def do_union(a, b): def lists_intersect(*args, **kwargs): lists = args - flatten = kwargs.pop('flatten', False) + flatten = kwargs.pop("flatten", False) if kwargs: # Some unused kwargs remain - raise AnsibleFilterError( - f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}" - ) + raise AnsibleFilterError(f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}") if flatten: lists = flatten_list(args) @@ -112,13 +108,11 @@ def do_intersect(a, b): def lists_difference(*args, **kwargs): lists = args - flatten = kwargs.pop('flatten', False) + flatten = kwargs.pop("flatten", False) if kwargs: # Some unused kwargs remain - raise AnsibleFilterError( - f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" - ) + raise AnsibleFilterError(f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}") if flatten: lists = flatten_list(args) @@ -150,13 +144,11 @@ def do_difference(a, b): def lists_symmetric_difference(*args, **kwargs): lists = args - flatten = kwargs.pop('flatten', False) + flatten = kwargs.pop("flatten", False) if kwargs: # Some unused kwargs remain - raise AnsibleFilterError( - f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" - ) + raise AnsibleFilterError(f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}") if flatten: lists = flatten_list(args) @@ -189,12 +181,12 @@ def do_symmetric_difference(a, b): class FilterModule: - ''' Ansible lists jinja2 filters ''' + """Ansible lists jinja2 filters""" def filters(self): return { - 'lists_union': lists_union, - 'lists_intersect': lists_intersect, - 'lists_difference': lists_difference, - 'lists_symmetric_difference': lists_symmetric_difference, + "lists_union": lists_union, + "lists_intersect": lists_intersect, + "lists_difference": lists_difference, + "lists_symmetric_difference": lists_symmetric_difference, } diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index 1b49cbefaa7..0ca1d7bc6fc 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -202,11 +202,11 @@ from operator import itemgetter -def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - '''Merge 2 lists by attribute 'index'. The function 'merge_hash' - from ansible.utils.vars is used. This function is used by the - function lists_mergeby. - ''' +def list_mergeby(x, y, index, recursive=False, list_merge="replace"): + """Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. + """ d = defaultdict(dict) for lst in (x, y): @@ -220,13 +220,13 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'): def lists_mergeby(*terms, **kwargs): - '''Merge 2 or more lists by attribute 'index'. To learn details - on how to use the parameters 'recursive' and 'list_merge' see - the filter ansible.builtin.combine. - ''' + """Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. + """ - recursive = kwargs.pop('recursive', False) - list_merge = kwargs.pop('list_merge', 'replace') + recursive = kwargs.pop("recursive", False) + list_merge = kwargs.pop("list_merge", "replace") if kwargs: raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.") if len(terms) < 2: @@ -236,8 +236,7 @@ def lists_mergeby(*terms, **kwargs): flat_list = [] for sublist in terms[:-1]: if not isinstance(sublist, Sequence): - msg = ("All arguments before the argument index for community.general.lists_mergeby " - "must be lists. %s is %s") + msg = "All arguments before the argument index for community.general.lists_mergeby must be lists. %s is %s" raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: if all(isinstance(lst, Sequence) for lst in sublist): @@ -256,8 +255,7 @@ def lists_mergeby(*terms, **kwargs): index = terms[-1] if not isinstance(index, str): - msg = ("First argument after the lists for community.general.lists_mergeby must be string. " - "%s is %s") + msg = "First argument after the lists for community.general.lists_mergeby must be string. %s is %s" raise AnsibleFilterError(msg % (index, type(index))) high_to_low_prio_list_iterator = reversed(lists) @@ -269,9 +267,9 @@ def lists_mergeby(*terms, **kwargs): class FilterModule: - ''' Ansible list filters ''' + """Ansible list filters""" def filters(self): return { - 'lists_mergeby': lists_mergeby, + "lists_mergeby": lists_mergeby, } diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index e5e6201f1cd..36cd275718e 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -47,29 +47,29 @@ def random_mac(value, seed=None): - ''' takes string prefix, and return it completed with random bytes - to get a complete 6 bytes MAC address ''' + """takes string prefix, and return it completed with random bytes + to get a complete 6 bytes MAC address""" if not isinstance(value, str): - raise AnsibleFilterError(f'Invalid value type ({type(value)}) for random_mac ({value})') + raise AnsibleFilterError(f"Invalid value type ({type(value)}) for random_mac ({value})") value = value.lower() - mac_items = value.split(':') + mac_items = value.split(":") if len(mac_items) > 5: - raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: 5 colon(:) separated items max') + raise AnsibleFilterError(f"Invalid value ({value}) for random_mac: 5 colon(:) separated items max") err = "" for mac in mac_items: if not mac: err += ",empty item" continue - if not re.match('[a-f0-9]{2}', mac): + if not re.match("[a-f0-9]{2}", mac): err += f",{mac} not hexa byte" - err = err.strip(',') + err = err.strip(",") if err: - raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: {err}') + raise AnsibleFilterError(f"Invalid value ({value}) for random_mac: {err}") if seed is None: r = SystemRandom() @@ -79,13 +79,14 @@ def random_mac(value, seed=None): v = r.randint(68719476736, 1099511627775) # Select first n chars to complement input prefix remain = 2 * (6 - len(mac_items)) - rnd = f'{v:x}'[:remain] - return value + re.sub(r'(..)', r':\1', rnd) + rnd = f"{v:x}"[:remain] + return value + re.sub(r"(..)", r":\1", rnd) class FilterModule: - ''' Ansible jinja2 filters ''' + """Ansible jinja2 filters""" + def filters(self): return { - 'random_mac': random_mac, + "random_mac": random_mac, } diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py index d498418ec06..743471127f1 100644 --- a/plugins/filter/remove_keys.py +++ b/plugins/filter/remove_keys.py @@ -101,10 +101,11 @@ from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, - _keys_filter_target_str) + _keys_filter_target_str, +) -def remove_keys(data, target=None, matching_parameter='equal'): +def remove_keys(data, target=None, matching_parameter="equal"): """remove specific keys from dictionaries in a list""" # test parameters @@ -112,16 +113,20 @@ def remove_keys(data, target=None, matching_parameter='equal'): # test and transform target tt = _keys_filter_target_str(target, matching_parameter) - if matching_parameter == 'equal': + if matching_parameter == "equal": + def keep_key(key): return key not in tt - elif matching_parameter == 'starts_with': + elif matching_parameter == "starts_with": + def keep_key(key): return not key.startswith(tt) - elif matching_parameter == 'ends_with': + elif matching_parameter == "ends_with": + def keep_key(key): return not key.endswith(tt) - elif matching_parameter == 'regex': + elif matching_parameter == "regex": + def keep_key(key): return tt.match(key) is None @@ -129,8 +134,7 @@ def keep_key(key): class FilterModule: - def filters(self): return { - 'remove_keys': remove_keys, + "remove_keys": remove_keys, } diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py index 887d00ab9fc..c53b31dc914 100644 --- a/plugins/filter/replace_keys.py +++ b/plugins/filter/replace_keys.py @@ -131,10 +131,11 @@ from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( _keys_filter_params, - _keys_filter_target_dict) + _keys_filter_target_dict, +) -def replace_keys(data, target=None, matching_parameter='equal'): +def replace_keys(data, target=None, matching_parameter="equal"): """replace specific keys in a list of dictionaries""" # test parameters @@ -142,25 +143,29 @@ def replace_keys(data, target=None, matching_parameter='equal'): # test and transform target tz = _keys_filter_target_dict(target, matching_parameter) - if matching_parameter == 'equal': + if matching_parameter == "equal": + def replace_key(key): for b, a in tz: if key == b: return a return key - elif matching_parameter == 'starts_with': + elif matching_parameter == "starts_with": + def replace_key(key): for b, a in tz: if key.startswith(b): return a return key - elif matching_parameter == 'ends_with': + elif matching_parameter == "ends_with": + def replace_key(key): for b, a in tz: if key.endswith(b): return a return key - elif matching_parameter == 'regex': + elif matching_parameter == "regex": + def replace_key(key): for b, a in tz: if b.match(key): @@ -171,8 +176,7 @@ def replace_key(key): class FilterModule: - def filters(self): return { - 'replace_keys': replace_keys, + "replace_keys": replace_keys, } diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py index 718bc0c750b..8b2c956bffc 100644 --- a/plugins/filter/reveal_ansible_type.py +++ b/plugins/filter/reveal_ansible_type.py @@ -140,8 +140,5 @@ def reveal_ansible_type(data, alias=None): class FilterModule: - def filters(self): - return { - 'reveal_ansible_type': reveal_ansible_type - } + return {"reveal_ansible_type": reveal_ansible_type} diff --git a/plugins/filter/time.py b/plugins/filter/time.py index 3c1bcf7c884..a29961a1ea3 100644 --- a/plugins/filter/time.py +++ b/plugins/filter/time.py @@ -9,30 +9,30 @@ UNIT_FACTORS = { - 'ms': [], - 's': [1000], - 'm': [1000, 60], - 'h': [1000, 60, 60], - 'd': [1000, 60, 60, 24], - 'w': [1000, 60, 60, 24, 7], - 'mo': [1000, 60, 60, 24, 30], - 'y': [1000, 60, 60, 24, 365], + "ms": [], + "s": [1000], + "m": [1000, 60], + "h": [1000, 60, 60], + "d": [1000, 60, 60, 24], + "w": [1000, 60, 60, 24, 7], + "mo": [1000, 60, 60, 24, 30], + "y": [1000, 60, 60, 24, 365], } UNIT_TO_SHORT_FORM = { - 'millisecond': 'ms', - 'msec': 'ms', - 'msecond': 'ms', - 'sec': 's', - 'second': 's', - 'hour': 'h', - 'min': 'm', - 'minute': 'm', - 'day': 'd', - 'week': 'w', - 'month': 'mo', - 'year': 'y', + "millisecond": "ms", + "msec": "ms", + "msecond": "ms", + "sec": "s", + "second": "s", + "hour": "h", + "min": "m", + "minute": "m", + "day": "d", + "week": "w", + "month": "mo", + "year": "y", } @@ -43,8 +43,8 @@ def multiply(factors): return result -def to_time_unit(human_time, unit='ms', **kwargs): - ''' Return a time unit from a human readable string ''' +def to_time_unit(human_time, unit="ms", **kwargs): + """Return a time unit from a human readable string""" # No need to handle 0 if human_time == "0": @@ -53,35 +53,35 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit_to_short_form = UNIT_TO_SHORT_FORM unit_factors = UNIT_FACTORS - unit = unit_to_short_form.get(unit.rstrip('s'), unit) + unit = unit_to_short_form.get(unit.rstrip("s"), unit) if unit not in unit_factors: - raise AnsibleFilterError(( - f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):" - f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}" - )) - - if 'year' in kwargs: - unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')] - if 'month' in kwargs: - unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')] + raise AnsibleFilterError( + ( + f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):" + f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}" + ) + ) + + if "year" in kwargs: + unit_factors["y"] = unit_factors["y"][:-1] + [kwargs.pop("year")] + if "month" in kwargs: + unit_factors["mo"] = unit_factors["mo"][:-1] + [kwargs.pop("month")] if kwargs: raise AnsibleFilterError(f"to_time_unit() got unknown keyword arguments: {', '.join(kwargs.keys())}") result = 0 for h_time_string in human_time.split(): - res = re.match(r'(-?\d+)(\w+)', h_time_string) + res = re.match(r"(-?\d+)(\w+)", h_time_string) if not res: - raise AnsibleFilterError( - f"to_time_unit() can not interpret following string: {human_time}") + raise AnsibleFilterError(f"to_time_unit() can not interpret following string: {human_time}") h_time_int = int(res.group(1)) h_time_unit = res.group(2) - h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit) + h_time_unit = unit_to_short_form.get(h_time_unit.rstrip("s"), h_time_unit) if h_time_unit not in unit_factors: - raise AnsibleFilterError( - f"to_time_unit() can not interpret following string: {human_time}") + raise AnsibleFilterError(f"to_time_unit() can not interpret following string: {human_time}") time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit]) result += time_in_milliseconds @@ -89,59 +89,59 @@ def to_time_unit(human_time, unit='ms', **kwargs): def to_milliseconds(human_time, **kwargs): - ''' Return milli seconds from a human readable string ''' - return to_time_unit(human_time, 'ms', **kwargs) + """Return milli seconds from a human readable string""" + return to_time_unit(human_time, "ms", **kwargs) def to_seconds(human_time, **kwargs): - ''' Return seconds from a human readable string ''' - return to_time_unit(human_time, 's', **kwargs) + """Return seconds from a human readable string""" + return to_time_unit(human_time, "s", **kwargs) def to_minutes(human_time, **kwargs): - ''' Return minutes from a human readable string ''' - return to_time_unit(human_time, 'm', **kwargs) + """Return minutes from a human readable string""" + return to_time_unit(human_time, "m", **kwargs) def to_hours(human_time, **kwargs): - ''' Return hours from a human readable string ''' - return to_time_unit(human_time, 'h', **kwargs) + """Return hours from a human readable string""" + return to_time_unit(human_time, "h", **kwargs) def to_days(human_time, **kwargs): - ''' Return days from a human readable string ''' - return to_time_unit(human_time, 'd', **kwargs) + """Return days from a human readable string""" + return to_time_unit(human_time, "d", **kwargs) def to_weeks(human_time, **kwargs): - ''' Return weeks from a human readable string ''' - return to_time_unit(human_time, 'w', **kwargs) + """Return weeks from a human readable string""" + return to_time_unit(human_time, "w", **kwargs) def to_months(human_time, **kwargs): - ''' Return months from a human readable string ''' - return to_time_unit(human_time, 'mo', **kwargs) + """Return months from a human readable string""" + return to_time_unit(human_time, "mo", **kwargs) def to_years(human_time, **kwargs): - ''' Return years from a human readable string ''' - return to_time_unit(human_time, 'y', **kwargs) + """Return years from a human readable string""" + return to_time_unit(human_time, "y", **kwargs) class FilterModule: - ''' Ansible time jinja2 filters ''' + """Ansible time jinja2 filters""" def filters(self): filters = { - 'to_time_unit': to_time_unit, - 'to_milliseconds': to_milliseconds, - 'to_seconds': to_seconds, - 'to_minutes': to_minutes, - 'to_hours': to_hours, - 'to_days': to_days, - 'to_weeks': to_weeks, - 'to_months': to_months, - 'to_years': to_years, + "to_time_unit": to_time_unit, + "to_milliseconds": to_milliseconds, + "to_seconds": to_seconds, + "to_minutes": to_minutes, + "to_hours": to_hours, + "to_days": to_days, + "to_weeks": to_weeks, + "to_months": to_months, + "to_years": to_years, } return filters diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py index 718dc3b886e..d48532336a6 100644 --- a/plugins/filter/to_ini.py +++ b/plugins/filter/to_ini.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Steffen Scheib # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -55,7 +54,7 @@ class IniParser(ConfigParser): - ''' Implements a configparser which sets the correct optionxform ''' + """Implements a configparser which sets the correct optionxform""" def __init__(self): super().__init__(interpolation=None) @@ -63,23 +62,21 @@ def __init__(self): def to_ini(obj): - ''' Read the given dict and return an INI formatted string ''' + """Read the given dict and return an INI formatted string""" if not isinstance(obj, Mapping): - raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}') + raise AnsibleFilterError(f"to_ini requires a dict, got {type(obj)}") ini_parser = IniParser() try: ini_parser.read_dict(obj) except Exception as ex: - raise AnsibleFilterError('to_ini failed to parse given dict:' - f'{ex}', orig_exc=ex) + raise AnsibleFilterError(f"to_ini failed to parse given dict:{ex}", orig_exc=ex) # catching empty dicts if obj == dict(): - raise AnsibleFilterError('to_ini received an empty dict. ' - 'An empty dict cannot be converted.') + raise AnsibleFilterError("to_ini received an empty dict. An empty dict cannot be converted.") config = StringIO() ini_parser.write(config) @@ -87,14 +84,11 @@ def to_ini(obj): # config.getvalue() returns two \n at the end # with the below insanity, we remove the very last character of # the resulting string - return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1)) + return "".join(config.getvalue().rsplit(config.getvalue()[-1], 1)) class FilterModule: - ''' Query filter ''' + """Query filter""" def filters(self): - - return { - 'to_ini': to_ini - } + return {"to_ini": to_ini} diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py index 154944a4524..02b902c4f76 100644 --- a/plugins/filter/to_prettytable.py +++ b/plugins/filter/to_prettytable.py @@ -110,6 +110,7 @@ try: import prettytable + HAS_PRETTYTABLE = True except ImportError: HAS_PRETTYTABLE = False @@ -125,6 +126,7 @@ class TypeValidationError(AnsibleFilterError): obj: The object with incorrect type expected: Description of expected type """ + def __init__(self, obj, expected): type_name = "string" if isinstance(obj, str) else type(obj).__name__ super().__init__(f"Expected {expected}, got a {type_name}") @@ -142,10 +144,7 @@ def _validate_list_param(param, param_name, ensure_strings=True): AnsibleFilterError: If validation fails """ # Map parameter names to their original error message format - error_messages = { - "column_order": "a list of column names", - "header_names": "a list of header names" - } + error_messages = {"column_order": "a list of column names", "header_names": "a list of header names"} # Use the specific error message if available, otherwise use a generic one error_msg = error_messages.get(param_name, f"a list for {param_name}") @@ -182,9 +181,9 @@ def _match_key(item_dict, lookup_key): # Try boolean conversion for 'true'/'false' strings if isinstance(lookup_key, str): - if lookup_key.lower() == 'true' and True in item_dict: + if lookup_key.lower() == "true" and True in item_dict: return True - if lookup_key.lower() == 'false' and False in item_dict: + if lookup_key.lower() == "false" and False in item_dict: return False # Try numeric conversion for string numbers @@ -258,9 +257,7 @@ def to_prettytable(data, *args, **kwargs): String containing the ASCII table """ if not HAS_PRETTYTABLE: - raise AnsibleFilterError( - 'You need to install "prettytable" Python module to use this filter' - ) + raise AnsibleFilterError('You need to install "prettytable" Python module to use this filter') # === Input validation === # Validate list type @@ -278,7 +275,7 @@ def to_prettytable(data, *args, **kwargs): # === Process column order === # Handle both positional and keyword column_order - column_order = kwargs.pop('column_order', None) + column_order = kwargs.pop("column_order", None) # Check for conflict between args and column_order if args and column_order is not None: @@ -295,7 +292,8 @@ def to_prettytable(data, *args, **kwargs): # Validate column_order doesn't exceed the number of fields (skip if data is empty) if data and len(column_order) > max_fields: raise AnsibleFilterError( - f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})") + f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})" + ) # === Process headers === # Determine field names and ensure they are strings @@ -306,24 +304,26 @@ def to_prettytable(data, *args, **kwargs): field_names = [to_text(k) for k in sample_dict] # Process custom headers - header_names = kwargs.pop('header_names', None) + header_names = kwargs.pop("header_names", None) if header_names is not None: _validate_list_param(header_names, "header_names") # Validate header_names doesn't exceed the number of fields (skip if data is empty) if data and len(header_names) > max_fields: raise AnsibleFilterError( - f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})") + f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})" + ) # Validate that column_order and header_names have the same size if both provided if column_order is not None and len(column_order) != len(header_names): raise AnsibleFilterError( f"'column_order' and 'header_names' must have the same number of elements. " - f"Got {len(column_order)} columns and {len(header_names)} headers.") + f"Got {len(column_order)} columns and {len(header_names)} headers." + ) # === Process alignments === # Get column alignments and validate - column_alignments = kwargs.pop('column_alignments', {}) + column_alignments = kwargs.pop("column_alignments", {}) valid_alignments = {"left", "center", "right", "l", "c", "r"} # Validate column_alignments is a dictionary @@ -344,12 +344,14 @@ def to_prettytable(data, *args, **kwargs): if value.lower() not in valid_alignments: raise AnsibleFilterError( f"Invalid alignment '{value}' in 'column_alignments'. " - f"Valid alignments are: {', '.join(sorted(valid_alignments))}") + f"Valid alignments are: {', '.join(sorted(valid_alignments))}" + ) # Validate column_alignments doesn't have more keys than fields (skip if data is empty) if data and len(column_alignments) > max_fields: raise AnsibleFilterError( - f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})") + f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})" + ) # Check for unknown parameters if kwargs: @@ -404,6 +406,4 @@ class FilterModule: """Ansible core jinja2 filters.""" def filters(self): - return { - 'to_prettytable': to_prettytable - } + return {"to_prettytable": to_prettytable} diff --git a/plugins/filter/to_yaml.py b/plugins/filter/to_yaml.py index 82e3f5cfa00..1fef077c643 100644 --- a/plugins/filter/to_yaml.py +++ b/plugins/filter/to_yaml.py @@ -8,16 +8,19 @@ from collections.abc import Mapping, Set from yaml import dump + try: from yaml.cyaml import CSafeDumper as SafeDumper except ImportError: from yaml import SafeDumper # type: ignore from ansible.module_utils.common.collections import is_sequence + try: # This is ansible-core 2.19+ from ansible.utils.vars import transform_to_native_types from ansible.parsing.vault import VaultHelper, VaultLib + HAS_TRANSFORM_TO_NATIVE_TYPES = True except ImportError: HAS_TRANSFORM_TO_NATIVE_TYPES = False @@ -36,7 +39,9 @@ def _to_native_types_compat(value: t.Any, *, redact_value: str | None) -> t.Any: # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. if isinstance(value, Mapping): return { - _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat(val, redact_value=redact_value) + _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat( + val, redact_value=redact_value + ) for key, val in value.items() } if isinstance(value, Set): @@ -80,11 +85,15 @@ def remove_all_tags(value: t.Any, *, redact_sensitive_values: bool = False) -> t return _to_native_types_compat( # type: ignore[unreachable] value, - redact_value="" if redact_sensitive_values else None, # same string as in ansible-core 2.19 by transform_to_native_types() + redact_value="" + if redact_sensitive_values + else None, # same string as in ansible-core 2.19 by transform_to_native_types() ) -def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs) -> str: +def to_yaml( + value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs +) -> str: """Serialize input as terse flow-style YAML.""" return dump( remove_all_tags(value, redact_sensitive_values=redact_sensitive_values), @@ -95,7 +104,9 @@ def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow ) -def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs) -> str: +def to_nice_yaml( + value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs +) -> str: """Serialize input as verbose multi-line YAML.""" return to_yaml( value, @@ -109,6 +120,6 @@ def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: class FilterModule: def filters(self): return { - 'to_yaml': to_yaml, - 'to_nice_yaml': to_nice_yaml, + "to_yaml": to_yaml, + "to_nice_yaml": to_nice_yaml, } diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py index 410ae763351..ca5abfe2f5e 100644 --- a/plugins/filter/unicode_normalize.py +++ b/plugins/filter/unicode_normalize.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -55,7 +54,7 @@ from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError # type: ignore -def unicode_normalize(data, form='NFC'): +def unicode_normalize(data, form="NFC"): """Applies normalization to 'unicode' strings. Args: @@ -70,7 +69,7 @@ def unicode_normalize(data, form='NFC'): if not isinstance(data, str): raise AnsibleTypeError(f"{type(data)} is not a valid input type") - if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): + if form not in ("NFC", "NFD", "NFKC", "NFKD"): raise AnsibleFilterError(f"{form!r} is not a valid form") return normalize(form, data) @@ -79,5 +78,5 @@ def unicode_normalize(data, form='NFC'): class FilterModule: def filters(self): return { - 'unicode_normalize': unicode_normalize, + "unicode_normalize": unicode_normalize, } diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index fc3f5e01768..64c3a3d5735 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -37,14 +37,12 @@ def version_sort(value, reverse=False): - '''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10''' + """Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10""" return sorted(value, key=LooseVersion, reverse=reverse) class FilterModule: - ''' Version sort filter ''' + """Version sort filter""" def filters(self): - return { - 'version_sort': version_sort - } + return {"version_sort": version_sort} diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 06cd3493fc7..45e01aecbb5 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -140,12 +140,13 @@ # xmlrpc try: import xmlrpc.client as xmlrpc_client + HAS_XMLRPC_CLIENT = True except ImportError: HAS_XMLRPC_CLIENT = False -class TimeoutTransport (xmlrpc_client.SafeTransport): +class TimeoutTransport(xmlrpc_client.SafeTransport): def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): super().__init__() self._timeout = timeout @@ -158,21 +159,21 @@ def make_connection(self, host): class InventoryModule(BaseInventoryPlugin, Cacheable): - ''' Host inventory parser for ansible using cobbler as source. ''' + """Host inventory parser for ansible using cobbler as source.""" - NAME = 'community.general.cobbler' + NAME = "community.general.cobbler" def __init__(self): super().__init__() self.cache_key = None if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') + raise AnsibleError("Could not import xmlrpc client library") def verify_file(self, path): valid = False if super().verify_file(path): - if path.endswith(('cobbler.yaml', 'cobbler.yml')): + if path.endswith(("cobbler.yaml", "cobbler.yml")): valid = True else: self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') @@ -183,14 +184,14 @@ def _init_cache(self): self._cache[self.cache_key] = {} def _reload_cache(self): - if self.get_option('cache_fallback'): - self.display.vvv('Cannot connect to server, loading cache\n') - self._options['cache_timeout'] = 0 + if self.get_option("cache_fallback"): + self.display.vvv("Cannot connect to server, loading cache\n") + self._options["cache_timeout"] = 0 self.load_cache_plugin() self._cache.get(self.cache_key, {}) def _get_profiles(self): - if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): + if not self.use_cache or "profiles" not in self._cache.get(self.cache_key, {}): try: if self.token is not None: data = self.cobbler.get_profiles(self.token) @@ -200,12 +201,12 @@ def _get_profiles(self): self._reload_cache() else: self._init_cache() - self._cache[self.cache_key]['profiles'] = data + self._cache[self.cache_key]["profiles"] = data - return self._cache[self.cache_key]['profiles'] + return self._cache[self.cache_key]["profiles"] def _get_systems(self): - if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): + if not self.use_cache or "systems" not in self._cache.get(self.cache_key, {}): try: if self.token is not None: data = self.cobbler.get_systems(self.token) @@ -217,19 +218,21 @@ def _get_systems(self): for i, host in enumerate(data): self.display.vvvv(f"Gathering all facts for {host['name']}\n") if self.token is not None: - data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token) + data[i] = self.cobbler.get_system_as_rendered(host["name"], self.token) else: - data[i] = self.cobbler.get_system_as_rendered(host['name']) + data[i] = self.cobbler.get_system_as_rendered(host["name"]) except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: self._init_cache() - self._cache[self.cache_key]['systems'] = data + self._cache[self.cache_key]["systems"] = data - return self._cache[self.cache_key]['systems'] + return self._cache[self.cache_key]["systems"] def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}")) + group_name = self.inventory.add_group( + to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}") + ) if child is not None: self.inventory.add_child(group_name, child) return group_name @@ -241,101 +244,103 @@ def _exclude_profile(self, profile): return profile in self.exclude_profiles def parse(self, inventory, loader, path, cache=True): - super().parse(inventory, loader, path) # read config from file, this sets 'options' self._read_config_data(path) # get connection host - self.cobbler_url = self.get_option('url') - self.display.vvvv(f'Connecting to {self.cobbler_url}\n') - - if 'connection_timeout' in self._options: - self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True, - transport=TimeoutTransport(timeout=self.get_option('connection_timeout'))) + self.cobbler_url = self.get_option("url") + self.display.vvvv(f"Connecting to {self.cobbler_url}\n") + + if "connection_timeout" in self._options: + self.cobbler = xmlrpc_client.Server( + self.cobbler_url, + allow_none=True, + transport=TimeoutTransport(timeout=self.get_option("connection_timeout")), + ) else: self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True) self.token = None - if self.get_option('user') is not None: - self.token = self.cobbler.login(str(self.get_option('user')), str(self.get_option('password'))) + if self.get_option("user") is not None: + self.token = self.cobbler.login(str(self.get_option("user")), str(self.get_option("password"))) self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') + self.use_cache = cache and self.get_option("cache") - self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes') - self.include_mgmt_classes = self.get_option('include_mgmt_classes') - self.exclude_profiles = self.get_option('exclude_profiles') - self.include_profiles = self.get_option('include_profiles') - self.group_by = self.get_option('group_by') - self.inventory_hostname = self.get_option('inventory_hostname') - self.facts_level = self.get_option('facts_level') + self.exclude_mgmt_classes = self.get_option("exclude_mgmt_classes") + self.include_mgmt_classes = self.get_option("include_mgmt_classes") + self.exclude_profiles = self.get_option("exclude_profiles") + self.include_profiles = self.get_option("include_profiles") + self.group_by = self.get_option("group_by") + self.inventory_hostname = self.get_option("inventory_hostname") + self.facts_level = self.get_option("facts_level") for profile in self._get_profiles(): - if profile['parent']: + if profile["parent"]: self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n") - if not self._exclude_profile(profile['parent']): - parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv(f'Added profile parent group {parent_group_name}\n') - if not self._exclude_profile(profile['name']): - group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv(f'Added profile group {group_name}\n') + if not self._exclude_profile(profile["parent"]): + parent_group_name = self._add_safe_group_name(profile["parent"]) + self.display.vvvv(f"Added profile parent group {parent_group_name}\n") + if not self._exclude_profile(profile["name"]): + group_name = self._add_safe_group_name(profile["name"]) + self.display.vvvv(f"Added profile group {group_name}\n") self.inventory.add_child(parent_group_name, group_name) else: self.display.vvvv(f"Processing profile {profile['name']} without parent\n") # Create a hierarchy of profile names - profile_elements = profile['name'].split('-') + profile_elements = profile["name"].split("-") i = 0 while i < len(profile_elements) - 1: - profile_group = '-'.join(profile_elements[0:i + 1]) - profile_group_child = '-'.join(profile_elements[0:i + 2]) + profile_group = "-".join(profile_elements[0 : i + 1]) + profile_group_child = "-".join(profile_elements[0 : i + 2]) if self._exclude_profile(profile_group): - self.display.vvvv(f'Excluding profile {profile_group}\n') + self.display.vvvv(f"Excluding profile {profile_group}\n") break group_name = self._add_safe_group_name(profile_group) - self.display.vvvv(f'Added profile group {group_name}\n') + self.display.vvvv(f"Added profile group {group_name}\n") child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n') + self.display.vvvv(f"Added profile child group {child_group_name} to {group_name}\n") self.inventory.add_child(group_name, child_group_name) i = i + 1 # Add default group for this inventory if specified - self.group = to_safe_group_name(self.get_option('group')) - if self.group is not None and self.group != '': + self.group = to_safe_group_name(self.get_option("group")) + if self.group is not None and self.group != "": self.inventory.add_group(self.group) - self.display.vvvv(f'Added site group {self.group}\n') + self.display.vvvv(f"Added site group {self.group}\n") ip_addresses = {} ipv6_addresses = {} for host in self._get_systems(): # Get the FQDN for the host and add it to the right groups - if self.inventory_hostname == 'system': - hostname = make_unsafe(host['name']) # None + if self.inventory_hostname == "system": + hostname = make_unsafe(host["name"]) # None else: - hostname = make_unsafe(host['hostname']) # None - interfaces = host['interfaces'] + hostname = make_unsafe(host["hostname"]) # None + interfaces = host["interfaces"] - if set(host['mgmt_classes']) & set(self.include_mgmt_classes): + if set(host["mgmt_classes"]) & set(self.include_mgmt_classes): self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") else: - if self._exclude_profile(host['profile']): + if self._exclude_profile(host["profile"]): self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n") continue - if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes): + if set(host["mgmt_classes"]) & set(self.exclude_mgmt_classes): self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") continue # hostname is often empty for non-static IP hosts - if hostname == '': + if hostname == "": for iname, ivalue in interfaces.items(): - if ivalue['management'] or not ivalue['static']: - this_dns_name = ivalue.get('dns_name', None) + if ivalue["management"] or not ivalue["static"]: + this_dns_name = ivalue.get("dns_name", None) if this_dns_name is not None and this_dns_name != "": hostname = make_unsafe(this_dns_name) - self.display.vvvv(f'Set hostname to {hostname} from {iname}\n') + self.display.vvvv(f"Set hostname to {hostname} from {iname}\n") - if hostname == '': + if hostname == "": self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n") continue @@ -343,21 +348,21 @@ def parse(self, inventory, loader, path, cache=True): self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n") # Add host to profile group - if host['profile'] != '': - group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n') + if host["profile"] != "": + group_name = self._add_safe_group_name(host["profile"], child=hostname) + self.display.vvvv(f"Added host {hostname} to profile group {group_name}\n") else: - self.display.warning(f'Host {hostname} has an empty profile\n') + self.display.warning(f"Host {hostname} has an empty profile\n") # Add host to groups specified by group_by fields for group_by in self.group_by: - if host[group_by] == '<>' or host[group_by] == '': + if host[group_by] == "<>" or host[group_by] == "": groups = [] else: groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] for group in groups: group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n') + self.display.vvvv(f"Added host {hostname} to group_by {group_by} group {group_name}\n") # Add to group for this inventory if self.group is not None: @@ -370,45 +375,45 @@ def parse(self, inventory, loader, path, cache=True): ipv6_address_first = None for iname, ivalue in interfaces.items(): # Set to first interface or management interface if defined or hostname matches dns_name - if ivalue['ip_address'] != "": + if ivalue["ip_address"] != "": if ip_address_first is None: - ip_address_first = ivalue['ip_address'] - if ivalue['management']: - ip_address = ivalue['ip_address'] - elif ivalue['dns_name'] == hostname and ip_address is None: - ip_address = ivalue['ip_address'] - if ivalue['ipv6_address'] != "": + ip_address_first = ivalue["ip_address"] + if ivalue["management"]: + ip_address = ivalue["ip_address"] + elif ivalue["dns_name"] == hostname and ip_address is None: + ip_address = ivalue["ip_address"] + if ivalue["ipv6_address"] != "": if ipv6_address_first is None: - ipv6_address_first = ivalue['ipv6_address'] - if ivalue['management']: - ipv6_address = ivalue['ipv6_address'] - elif ivalue['dns_name'] == hostname and ipv6_address is None: - ipv6_address = ivalue['ipv6_address'] + ipv6_address_first = ivalue["ipv6_address"] + if ivalue["management"]: + ipv6_address = ivalue["ipv6_address"] + elif ivalue["dns_name"] == hostname and ipv6_address is None: + ipv6_address = ivalue["ipv6_address"] # Collect all interface name mappings for adding to group vars - if self.get_option('want_ip_addresses'): - if ivalue['dns_name'] != "": - if ivalue['ip_address'] != "": - ip_addresses[ivalue['dns_name']] = ivalue['ip_address'] - if ivalue['ipv6_address'] != "": - ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address'] + if self.get_option("want_ip_addresses"): + if ivalue["dns_name"] != "": + if ivalue["ip_address"] != "": + ip_addresses[ivalue["dns_name"]] = ivalue["ip_address"] + if ivalue["ipv6_address"] != "": + ip_addresses[ivalue["dns_name"]] = ivalue["ipv6_address"] # Add ip_address to host if defined, use first if no management or matched dns_name if ip_address is None and ip_address_first is not None: ip_address = ip_address_first if ip_address is not None: - self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address)) + self.inventory.set_variable(hostname, "cobbler_ipv4_address", make_unsafe(ip_address)) if ipv6_address is None and ipv6_address_first is not None: ipv6_address = ipv6_address_first if ipv6_address is not None: - self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address)) + self.inventory.set_variable(hostname, "cobbler_ipv6_address", make_unsafe(ipv6_address)) - if self.get_option('want_facts'): + if self.get_option("want_facts"): try: - self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) + self.inventory.set_variable(hostname, "cobbler", make_unsafe(host)) except ValueError as e: self.display.warning(f"Could not set host info for {hostname}: {e}") - if self.get_option('want_ip_addresses'): - self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) - self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses)) + if self.get_option("want_ip_addresses"): + self.inventory.set_variable(self.group, "cobbler_ipv4_addresses", make_unsafe(ip_addresses)) + self.inventory.set_variable(self.group, "cobbler_ipv6_addresses", make_unsafe(ipv6_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index b482968c5f6..514229708d3 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -87,53 +87,54 @@ try: import gitlab + HAS_GITLAB = True except ImportError: HAS_GITLAB = False class InventoryModule(BaseInventoryPlugin, Constructable): - ''' Host inventory parser for ansible using GitLab API as source. ''' + """Host inventory parser for ansible using GitLab API as source.""" - NAME = 'community.general.gitlab_runners' + NAME = "community.general.gitlab_runners" def _populate(self): - gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token')) - self.inventory.add_group('gitlab_runners') + gl = gitlab.Gitlab(self.get_option("server_url"), private_token=self.get_option("api_token")) + self.inventory.add_group("gitlab_runners") try: - if self.get_option('filter'): - runners = gl.runners.all(scope=self.get_option('filter')) + if self.get_option("filter"): + runners = gl.runners.all(scope=self.get_option("filter")) else: runners = gl.runners.all() for runner in runners: - host = make_unsafe(str(runner['id'])) - ip_address = runner['ip_address'] - host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs']) - self.inventory.add_host(host, group='gitlab_runners') - self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address)) - if self.get_option('verbose_output', True): - self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) + host = make_unsafe(str(runner["id"])) + ip_address = runner["ip_address"] + host_attrs = make_unsafe(vars(gl.runners.get(runner["id"]))["_attrs"]) + self.inventory.add_host(host, group="gitlab_runners") + self.inventory.set_variable(host, "ansible_host", make_unsafe(ip_address)) + if self.get_option("verbose_output", True): + self.inventory.set_variable(host, "gitlab_runner_attributes", host_attrs) # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") # Composed variables - self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict) + self._set_composite_vars(self.get_option("compose"), host_attrs, host, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host_attrs, host, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host_attrs, host, strict=strict) except Exception as e: - raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}') + raise AnsibleParserError(f"Unable to fetch hosts from GitLab API, this was the original exception: {e}") def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" - return ( - super().verify_file(path) and - path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml"))) + return super().verify_file(path) and path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml")) def parse(self, inventory, loader, path, cache=True): if not HAS_GITLAB: - raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/') + raise AnsibleError( + "The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/" + ) super().parse(inventory, loader, path, cache) self._read_config_data(path) self._populate() diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 658087b1079..1849c6222ea 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -104,12 +104,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): - ''' Host inventory parser for ansible using Icinga2 as source. ''' + """Host inventory parser for ansible using Icinga2 as source.""" - NAME = 'community.general.icinga2' + NAME = "community.general.icinga2" def __init__(self): - super().__init__() # from config @@ -127,7 +126,7 @@ def __init__(self): def verify_file(self, path): valid = False if super().verify_file(path): - if path.endswith(('icinga2.yaml', 'icinga2.yml')): + if path.endswith(("icinga2.yaml", "icinga2.yml")): valid = True else: self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"') @@ -135,28 +134,28 @@ def verify_file(self, path): def _api_connect(self): self.headers = { - 'User-Agent': "ansible-icinga2-inv", - 'Accept': "application/json", + "User-Agent": "ansible-icinga2-inv", + "Accept": "application/json", } api_status_url = f"{self.icinga2_url}/status" request_args = { - 'headers': self.headers, - 'url_username': self.icinga2_user, - 'url_password': self.icinga2_password, - 'validate_certs': self.ssl_verify + "headers": self.headers, + "url_username": self.icinga2_user, + "url_password": self.icinga2_password, + "validate_certs": self.ssl_verify, } open_url(api_status_url, **request_args) def _post_request(self, request_url, data=None): self.display.vvv(f"Requested URL: {request_url}") request_args = { - 'headers': self.headers, - 'url_username': self.icinga2_user, - 'url_password': self.icinga2_password, - 'validate_certs': self.ssl_verify + "headers": self.headers, + "url_username": self.icinga2_user, + "url_password": self.icinga2_password, + "validate_certs": self.ssl_verify, } if data is not None: - request_args['data'] = json.dumps(data) + request_args["data"] = json.dumps(data) self.display.vvv(f"Request Args: {request_args}") try: response = open_url(request_url, **request_args) @@ -166,51 +165,60 @@ def _post_request(self, request_url, data=None): self.display.vvv(f"Error returned: {error_body}") except Exception: error_body = {"status": None} - if e.code == 404 and error_body.get('status') == "No objects found.": + if e.code == 404 and error_body.get("status") == "No objects found.": raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid") raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() - json_data = json.loads(response_body.decode('utf-8')) + json_data = json.loads(response_body.decode("utf-8")) self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}") if 200 <= response.status <= 299: return json_data - if response.status == 404 and json_data['status'] == "No objects found.": - raise AnsibleParserError( - f"API returned no data -- Response: {response.status} - {json_data['status']}") + if response.status == 404 and json_data["status"] == "No objects found.": + raise AnsibleParserError(f"API returned no data -- Response: {response.status} - {json_data['status']}") if response.status == 401: raise AnsibleParserError( - f"API was unable to complete query -- Response: {response.status} - {json_data['status']}") + f"API was unable to complete query -- Response: {response.status} - {json_data['status']}" + ) if response.status == 500: - raise AnsibleParserError( - f"API Response - {json_data['status']} - {json_data['errors']}") - raise AnsibleParserError( - f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") + raise AnsibleParserError(f"API Response - {json_data['status']} - {json_data['errors']}") + raise AnsibleParserError(f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): query_hosts_url = f"{self.icinga2_url}/objects/hosts" - self.headers['X-HTTP-Method-Override'] = 'GET' + self.headers["X-HTTP-Method-Override"] = "GET" data_dict = dict() if hosts: - data_dict['hosts'] = hosts + data_dict["hosts"] = hosts if attrs is not None: - data_dict['attrs'] = attrs + data_dict["attrs"] = attrs if joins is not None: - data_dict['joins'] = joins + data_dict["joins"] = joins if host_filter is not None: - data_dict['filter'] = host_filter.replace("\\\"", "\"") + data_dict["filter"] = host_filter.replace('\\"', '"') self.display.vvv(host_filter) host_dict = self._post_request(query_hosts_url, data_dict) - return host_dict['results'] + return host_dict["results"] def get_inventory_from_icinga(self): - """Query for all hosts """ + """Query for all hosts""" self.display.vvv("Querying Icinga2 for inventory") query_args = { - "attrs": ["address", "address6", "name", "display_name", "state_type", "state", "templates", "groups", "vars", "zone"], + "attrs": [ + "address", + "address6", + "name", + "display_name", + "state_type", + "state", + "templates", + "groups", + "vars", + "zone", + ], } if self.host_filter is not None: - query_args['host_filter'] = self.host_filter + query_args["host_filter"] = self.host_filter # Icinga2 API Call results_json = self._query_hosts(**query_args) # Manipulate returned API data to Ansible inventory spec @@ -218,10 +226,10 @@ def get_inventory_from_icinga(self): return ansible_inv def _apply_constructable(self, name, variables): - strict = self.get_option('strict') - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) - self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + strict = self.get_option("strict") + self._add_host_to_composed_groups(self.get_option("groups"), variables, name, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), variables, name, strict=strict) + self._set_composite_vars(self.get_option("compose"), variables, name, strict=strict) def _populate(self): groups = self._to_json(self.get_inventory_from_icinga()) @@ -235,58 +243,55 @@ def _convert_inv(self, json_data): """Convert Icinga2 API data to JSON format for Ansible""" groups_dict = {"_meta": {"hostvars": {}}} for entry in json_data: - host_attrs = make_unsafe(entry['attrs']) + host_attrs = make_unsafe(entry["attrs"]) if self.inventory_attr == "name": - host_name = make_unsafe(entry.get('name')) + host_name = make_unsafe(entry.get("name")) if self.inventory_attr == "address": # When looking for address for inventory, if missing fallback to object name - if host_attrs.get('address', '') != '': - host_name = make_unsafe(host_attrs.get('address')) + if host_attrs.get("address", "") != "": + host_name = make_unsafe(host_attrs.get("address")) else: - host_name = make_unsafe(entry.get('name')) + host_name = make_unsafe(entry.get("name")) if self.inventory_attr == "display_name": - host_name = host_attrs.get('display_name') - if host_attrs['state'] == 0: - host_attrs['state'] = 'on' + host_name = host_attrs.get("display_name") + if host_attrs["state"] == 0: + host_attrs["state"] = "on" else: - host_attrs['state'] = 'off' + host_attrs["state"] = "off" self.inventory.add_host(host_name) if self.group_by_hostgroups: - host_groups = host_attrs.get('groups') + host_groups = host_attrs.get("groups") for group in host_groups: if group not in self.inventory.groups.keys(): self.inventory.add_group(group) self.inventory.add_child(group, host_name) # If the address attribute is populated, override ansible_host with the value - if host_attrs.get('address') != '': - self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) - self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name'))) - self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name')) - self.inventory.set_variable(host_name, 'state', - host_attrs['state']) - self.inventory.set_variable(host_name, 'state_type', - host_attrs['state_type']) + if host_attrs.get("address") != "": + self.inventory.set_variable(host_name, "ansible_host", host_attrs.get("address")) + self.inventory.set_variable(host_name, "hostname", make_unsafe(entry.get("name"))) + self.inventory.set_variable(host_name, "display_name", host_attrs.get("display_name")) + self.inventory.set_variable(host_name, "state", host_attrs["state"]) + self.inventory.set_variable(host_name, "state_type", host_attrs["state_type"]) # Adds all attributes to a variable 'icinga2_attributes' construct_vars = dict(self.inventory.get_host(host_name).get_vars()) - construct_vars['icinga2_attributes'] = host_attrs + construct_vars["icinga2_attributes"] = host_attrs self._apply_constructable(host_name, construct_vars) return groups_dict def parse(self, inventory, loader, path, cache=True): - super().parse(inventory, loader, path) # read config from file, this sets 'options' self._read_config_data(path) # Store the options from the YAML file - self.icinga2_url = self.get_option('url') - self.icinga2_user = self.get_option('user') - self.icinga2_password = self.get_option('password') - self.ssl_verify = self.get_option('validate_certs') - self.host_filter = self.get_option('host_filter') - self.inventory_attr = self.get_option('inventory_attr') - self.group_by_hostgroups = self.get_option('group_by_hostgroups') + self.icinga2_url = self.get_option("url") + self.icinga2_user = self.get_option("user") + self.icinga2_password = self.get_option("password") + self.ssl_verify = self.get_option("validate_certs") + self.host_filter = self.get_option("host_filter") + self.inventory_attr = self.get_option("inventory_attr") + self.group_by_hostgroups = self.get_option("group_by_hostgroups") if self.templar.is_template(self.icinga2_url): self.icinga2_url = self.templar.template(variable=self.icinga2_url) diff --git a/plugins/inventory/incus.py b/plugins/inventory/incus.py index a9e3c572cdb..04eb34a7f06 100644 --- a/plugins/inventory/incus.py +++ b/plugins/inventory/incus.py @@ -106,9 +106,7 @@ def verify_file(self, path): if path.endswith(("incus.yaml", "incus.yml")): valid = True else: - self.display.vvv( - 'Skipping due to inventory source not ending in "incus.yaml" nor "incus.yml"' - ) + self.display.vvv('Skipping due to inventory source not ending in "incus.yaml" nor "incus.yml"') return valid @@ -148,10 +146,7 @@ def populate(self): if project_name: projects = [project_name] else: - projects = [ - entry["name"] - for entry in self._run_incus("project", "list", f"{remote_name}:") - ] + projects = [entry["name"] for entry in self._run_incus("project", "list", f"{remote_name}:")] # Get a list of instances. for project in projects: @@ -214,17 +209,11 @@ def _add_host(self, hostname, host_vars): strict = self.get_option("strict") # Add variables created by the user's Jinja2 expressions to the host - self._set_composite_vars( - self.get_option("compose"), host_vars, hostname, strict=True - ) + self._set_composite_vars(self.get_option("compose"), host_vars, hostname, strict=True) # Create user-defined groups using variables and Jinja2 conditionals - self._add_host_to_composed_groups( - self.get_option("groups"), host_vars, hostname, strict=strict - ) - self._add_host_to_keyed_groups( - self.get_option("keyed_groups"), host_vars, hostname, strict=strict - ) + self._add_host_to_composed_groups(self.get_option("groups"), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host_vars, hostname, strict=strict) def _run_incus(self, *args): local_cmd = ["incus"] + list(args) + ["--format=json"] diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py index 4e981583ae1..ae8a85db4a2 100644 --- a/plugins/inventory/iocage.py +++ b/plugins/inventory/iocage.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024 Vladimir Botka # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -184,34 +183,34 @@ def _parse_ip4(ip4): - ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. - If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. - Otherwise, append item to msg. - ''' + """Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. + If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. + Otherwise, append item to msg. + """ iocage_ip4_dict = {} - iocage_ip4_dict['ip4'] = [] - iocage_ip4_dict['msg'] = '' + iocage_ip4_dict["ip4"] = [] + iocage_ip4_dict["msg"] = "" - items = ip4.split(',') + items = ip4.split(",") for item in items: - if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item): - i = re.split('\\||/', item) + if re.match("^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$", item): + i = re.split("\\||/", item) if len(i) == 3: - iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]}) + iocage_ip4_dict["ip4"].append({"ifc": i[0], "ip": i[1], "mask": i[2]}) else: - iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'}) + iocage_ip4_dict["ip4"].append({"ifc": i[0], "ip": i[1], "mask": "-"}) else: - iocage_ip4_dict['msg'] += item + iocage_ip4_dict["msg"] += item return iocage_ip4_dict class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using iocage as source. ''' + """Host inventory parser for ansible using iocage as source.""" - NAME = 'community.general.iocage' - IOCAGE = '/usr/local/bin/iocage' + NAME = "community.general.iocage" + IOCAGE = "/usr/local/bin/iocage" def __init__(self): super().__init__() @@ -219,7 +218,7 @@ def __init__(self): def verify_file(self, path): valid = False if super().verify_file(path): - if path.endswith(('iocage.yaml', 'iocage.yml')): + if path.endswith(("iocage.yaml", "iocage.yml")): valid = True else: self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"') @@ -231,7 +230,7 @@ def parse(self, inventory, loader, path, cache=True): self._read_config_data(path) cache_key = self.get_cache_key(path) - user_cache_setting = self.get_option('cache') + user_cache_setting = self.get_option("cache") attempt_to_read_cache = user_cache_setting and cache cache_needs_update = user_cache_setting and not cache @@ -248,52 +247,52 @@ def parse(self, inventory, loader, path, cache=True): self.populate(results) def get_inventory(self, path): - host = self.get_option('host') - sudo = self.get_option('sudo') - sudo_preserve_env = self.get_option('sudo_preserve_env') - env = self.get_option('env') - get_properties = self.get_option('get_properties') - hooks_results = self.get_option('hooks_results') - inventory_hostname_tag = self.get_option('inventory_hostname_tag') - inventory_hostname_required = self.get_option('inventory_hostname_required') + host = self.get_option("host") + sudo = self.get_option("sudo") + sudo_preserve_env = self.get_option("sudo_preserve_env") + env = self.get_option("env") + get_properties = self.get_option("get_properties") + hooks_results = self.get_option("hooks_results") + inventory_hostname_tag = self.get_option("inventory_hostname_tag") + inventory_hostname_required = self.get_option("inventory_hostname_required") cmd = [] my_env = os.environ.copy() - if host == 'localhost': + if host == "localhost": my_env.update({str(k): str(v) for k, v in env.items()}) else: - user = self.get_option('user') + user = self.get_option("user") cmd.append("ssh") cmd.append(f"{user}@{host}") cmd.extend([f"{k}={v}" for k, v in env.items()]) cmd_list = cmd.copy() if sudo: - cmd_list.append('sudo') + cmd_list.append("sudo") if sudo_preserve_env: - cmd_list.append('--preserve-env') + cmd_list.append("--preserve-env") cmd_list.append(self.IOCAGE) - cmd_list.append('list') - cmd_list.append('--long') + cmd_list.append("list") + cmd_list.append("--long") try: p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}') + raise AnsibleError(f"Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}") try: - t_stdout = to_text(stdout, errors='surrogate_or_strict') + t_stdout = to_text(stdout, errors="surrogate_or_strict") except UnicodeError as e: - raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + raise AnsibleError(f"Invalid (non unicode) input returned: {e}") from e except Exception as e: - raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e + raise AnsibleParserError(f"Failed to parse {to_native(path)}: {e}") from e - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} self.get_jails(t_stdout, results) if get_properties: - for hostname, host_vars in results['_meta']['hostvars'].items(): + for hostname, host_vars in results["_meta"]["hostvars"].items(): cmd_get_properties = cmd.copy() cmd_get_properties.append(self.IOCAGE) cmd_get_properties.append("get") @@ -304,76 +303,78 @@ def get_inventory(self, path): stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError( - f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}') + f"Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}" + ) try: - t_stdout = to_text(stdout, errors='surrogate_or_strict') + t_stdout = to_text(stdout, errors="surrogate_or_strict") except UnicodeError as e: - raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + raise AnsibleError(f"Invalid (non unicode) input returned: {e}") from e except Exception as e: - raise AnsibleError(f'Failed to get properties: {e}') from e + raise AnsibleError(f"Failed to get properties: {e}") from e self.get_properties(t_stdout, results, hostname) if hooks_results: cmd_get_pool = cmd.copy() cmd_get_pool.append(self.IOCAGE) - cmd_get_pool.append('get') - cmd_get_pool.append('--pool') + cmd_get_pool.append("get") + cmd_get_pool.append("--pool") try: p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env) stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError( - f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}') + f"Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}" + ) try: - iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip() + iocage_pool = to_text(stdout, errors="surrogate_or_strict").strip() except UnicodeError as e: - raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + raise AnsibleError(f"Invalid (non unicode) input returned: {e}") from e except Exception as e: - raise AnsibleError(f'Failed to get pool: {e}') from e + raise AnsibleError(f"Failed to get pool: {e}") from e - for hostname, host_vars in results['_meta']['hostvars'].items(): + for hostname, host_vars in results["_meta"]["hostvars"].items(): iocage_hooks = [] for hook in hooks_results: path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}" cmd_cat_hook = cmd.copy() - cmd_cat_hook.append('cat') + cmd_cat_hook.append("cat") cmd_cat_hook.append(path) try: p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env) stdout, stderr = p.communicate() if p.returncode != 0: - iocage_hooks.append('-') + iocage_hooks.append("-") continue try: - iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip() + iocage_hook = to_text(stdout, errors="surrogate_or_strict").strip() except UnicodeError as e: - raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + raise AnsibleError(f"Invalid (non unicode) input returned: {e}") from e except Exception: - iocage_hooks.append('-') + iocage_hooks.append("-") else: iocage_hooks.append(iocage_hook) - results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks + results["_meta"]["hostvars"][hostname]["iocage_hooks"] = iocage_hooks # Optionally, get the jails names from the properties notes. # Requires the notes format "t1=v1 t2=v2 ..." if inventory_hostname_tag: if not get_properties: - raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties') + raise AnsibleError("Jail properties are needed to use inventory_hostname_tag. Enable get_properties") update = {} - for hostname, host_vars in results['_meta']['hostvars'].items(): - tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag) + for hostname, host_vars in results["_meta"]["hostvars"].items(): + tags = dict(tag.split("=", 1) for tag in host_vars["iocage_properties"]["notes"].split() if "=" in tag) if inventory_hostname_tag in tags: update[hostname] = tags[inventory_hostname_tag] elif inventory_hostname_required: - raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.') + raise AnsibleError(f"Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.") for hostname, alias in update.items(): - results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname) + results["_meta"]["hostvars"][alias] = results["_meta"]["hostvars"].pop(hostname) return results @@ -381,38 +382,38 @@ def get_jails(self, t_stdout, results): lines = t_stdout.splitlines() if len(lines) < 5: return - indices = [i for i, val in enumerate(lines[1]) if val == '|'] + indices = [i for i, val in enumerate(lines[1]) if val == "|"] for line in lines[3::2]: - jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])] + jail = [line[i + 1 : j].strip() for i, j in zip(indices[:-1], indices[1:])] iocage_name = jail[1] iocage_ip4_dict = _parse_ip4(jail[6]) - if iocage_ip4_dict['ip4']: - iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']]) + if iocage_ip4_dict["ip4"]: + iocage_ip4 = ",".join([d["ip"] for d in iocage_ip4_dict["ip4"]]) else: - iocage_ip4 = '-' - results['_meta']['hostvars'][iocage_name] = {} - results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] - results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] - results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] - results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] - results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] - results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict - results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4 - results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] - results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] - results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] + iocage_ip4 = "-" + results["_meta"]["hostvars"][iocage_name] = {} + results["_meta"]["hostvars"][iocage_name]["iocage_jid"] = jail[0] + results["_meta"]["hostvars"][iocage_name]["iocage_boot"] = jail[2] + results["_meta"]["hostvars"][iocage_name]["iocage_state"] = jail[3] + results["_meta"]["hostvars"][iocage_name]["iocage_type"] = jail[4] + results["_meta"]["hostvars"][iocage_name]["iocage_release"] = jail[5] + results["_meta"]["hostvars"][iocage_name]["iocage_ip4_dict"] = iocage_ip4_dict + results["_meta"]["hostvars"][iocage_name]["iocage_ip4"] = iocage_ip4 + results["_meta"]["hostvars"][iocage_name]["iocage_ip6"] = jail[7] + results["_meta"]["hostvars"][iocage_name]["iocage_template"] = jail[8] + results["_meta"]["hostvars"][iocage_name]["iocage_basejail"] = jail[9] def get_properties(self, t_stdout, results, hostname): - properties = dict(x.split(':', 1) for x in t_stdout.splitlines()) - results['_meta']['hostvars'][hostname]['iocage_properties'] = properties + properties = dict(x.split(":", 1) for x in t_stdout.splitlines()) + results["_meta"]["hostvars"][hostname]["iocage_properties"] = properties def populate(self, results): - strict = self.get_option('strict') + strict = self.get_option("strict") - for hostname, host_vars in results['_meta']['hostvars'].items(): - self.inventory.add_host(hostname, group='all') + for hostname, host_vars in results["_meta"]["hostvars"].items(): + self.inventory.add_host(hostname, group="all") for var, value in host_vars.items(): self.inventory.set_variable(hostname, var, value) - self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True) - self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), host_vars, hostname, strict=True) + self._add_host_to_composed_groups(self.get_option("groups"), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host_vars, hostname, strict=strict) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 520d536b667..1bc7203e137 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -134,27 +134,24 @@ from linode_api4 import LinodeClient from linode_api4.objects.linode import Instance from linode_api4.errors import ApiError as LinodeApiError + HAS_LINODE = True except ImportError: HAS_LINODE = False class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.linode' + NAME = "community.general.linode" def _build_client(self, loader): """Build the Linode client.""" - access_token = self.get_option('access_token') + access_token = self.get_option("access_token") if self.templar.is_template(access_token): access_token = self.templar.template(variable=access_token) if access_token is None: - raise AnsibleError(( - 'Could not retrieve Linode access token ' - 'from plugin configuration sources' - )) + raise AnsibleError(("Could not retrieve Linode access token from plugin configuration sources")) self.client = LinodeClient(access_token) @@ -163,7 +160,7 @@ def _get_instances_inventory(self): try: self.instances = self.client.linode.instances() except LinodeApiError as exception: - raise AnsibleError(f'Linode client raised: {exception}') + raise AnsibleError(f"Linode client raised: {exception}") def _add_groups(self): """Add Linode instance groups to the dynamic inventory.""" @@ -174,26 +171,17 @@ def _add_groups(self): def _filter_by_config(self): """Filter instances by user specified configuration.""" - regions = self.get_option('regions') + regions = self.get_option("regions") if regions: - self.instances = [ - instance for instance in self.instances - if instance.region.id in regions - ] + self.instances = [instance for instance in self.instances if instance.region.id in regions] - types = self.get_option('types') + types = self.get_option("types") if types: - self.instances = [ - instance for instance in self.instances - if instance.type.id in types - ] + self.instances = [instance for instance in self.instances if instance.type.id in types] - tags = self.get_option('tags') + tags = self.get_option("tags") if tags: - self.instances = [ - instance for instance in self.instances - if any(tag in instance.tags for tag in tags) - ] + self.instances = [instance for instance in self.instances if any(tag in instance.tags for tag in tags)] def _add_instances_to_groups(self): """Add instance names to their dynamic inventory groups.""" @@ -202,28 +190,22 @@ def _add_instances_to_groups(self): def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" - ip_style = self.get_option('ip_style') + ip_style = self.get_option("ip_style") for instance in self.instances: hostvars = instance._raw_json hostname = make_unsafe(instance.label) for hostvar_key in hostvars: - if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: + if ip_style == "api" and hostvar_key in ["ipv4", "ipv6"]: continue - self.inventory.set_variable( - hostname, - hostvar_key, - make_unsafe(hostvars[hostvar_key]) - ) - if ip_style == 'api': + self.inventory.set_variable(hostname, hostvar_key, make_unsafe(hostvars[hostvar_key])) + if ip_style == "api": ips = instance.ips.ipv4.public + instance.ips.ipv4.private ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local] ips += instance.ips.ipv6.pools for ip_type in set(ip.type for ip in ips): self.inventory.set_variable( - hostname, - ip_type, - make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) + hostname, ip_type, make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) ) def _ip_data(self, ip_list): @@ -231,13 +213,13 @@ def _ip_data(self, ip_list): for ip in list(ip_list): data.append( { - 'address': ip.address, - 'subnet_mask': ip.subnet_mask, - 'gateway': ip.gateway, - 'public': ip.public, - 'prefix': ip.prefix, - 'rdns': ip.rdns, - 'type': ip.type + "address": ip.address, + "subnet_mask": ip.subnet_mask, + "gateway": ip.gateway, + "public": ip.public, + "prefix": ip.prefix, + "rdns": ip.rdns, + "type": ip.type, } ) return data @@ -246,7 +228,7 @@ def _cacheable_inventory(self): return [i._raw_json for i in self.instances] def populate(self): - strict = self.get_option('strict') + strict = self.get_option("strict") self._filter_by_config() @@ -256,21 +238,9 @@ def populate(self): for instance in self.instances: hostname = make_unsafe(instance.label) variables = self.inventory.get_host(hostname).get_vars() - self._add_host_to_composed_groups( - self.get_option('groups'), - variables, - hostname, - strict=strict) - self._add_host_to_keyed_groups( - self.get_option('keyed_groups'), - variables, - hostname, - strict=strict) - self._set_composite_vars( - self.get_option('compose'), - variables, - hostname, - strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), variables, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), variables, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), variables, hostname, strict=strict) def verify_file(self, path): """Verify the Linode configuration file. @@ -299,14 +269,14 @@ def parse(self, inventory, loader, path, cache=True): self.instances = None if not HAS_LINODE: - raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.') + raise AnsibleError("the Linode dynamic inventory plugin requires linode_api4.") self._read_config_data(path) cache_key = self.get_cache_key(path) if cache: - cache = self.get_option('cache') + cache = self.get_option("cache") update_cache = False if cache: diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 94622d4b94f..aee5d43ab1f 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -190,9 +190,9 @@ class InventoryModule(BaseInventoryPlugin): DEBUG = 4 - NAME = 'community.general.lxd' - SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket' - SOCKET_URL = 'unix:/var/lib/lxd/unix.socket' + NAME = "community.general.lxd" + SNAP_SOCKET_URL = "unix:/var/snap/lxd/common/lxd/unix.socket" + SOCKET_URL = "unix:/var/lib/lxd/unix.socket" @staticmethod def load_json_data(path): @@ -210,10 +210,10 @@ def load_json_data(path): Returns: dict(json_data): json data""" try: - with open(path, 'r') as json_file: + with open(path, "r") as json_file: return json.load(json_file) except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}') + raise AnsibleParserError(f"Could not load the test data from {to_native(path)}: {err}") def save_json_data(self, path, file_name=None): """save data as json @@ -233,17 +233,17 @@ def save_json_data(self, path, file_name=None): if file_name: path.append(file_name) else: - prefix = 'lxd_data-' - time_stamp = time.strftime('%Y%m%d-%H%M%S') - suffix = '.atd' + prefix = "lxd_data-" + time_stamp = time.strftime("%Y%m%d-%H%M%S") + suffix = ".atd" path.append(prefix + time_stamp + suffix) try: cwd = os.path.abspath(os.path.dirname(__file__)) - with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: + with open(os.path.abspath(os.path.join(cwd, *path)), "w") as json_file: json.dump(self.data, json_file) except IOError as err: - raise AnsibleParserError(f'Could not save data: {err}') + raise AnsibleParserError(f"Could not save data: {err}") def verify_file(self, path): """Check the config @@ -260,7 +260,7 @@ def verify_file(self, path): bool(valid): is valid""" valid = False if super().verify_file(path): - if path.endswith(('lxd.yaml', 'lxd.yml')): + if path.endswith(("lxd.yaml", "lxd.yml")): valid = True else: self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"') @@ -282,8 +282,8 @@ def validate_url(url): bool""" if not isinstance(url, str): return False - if not url.startswith(('unix:', 'https:')): - raise AnsibleError(f'URL is malformed: {url}') + if not url.startswith(("unix:", "https:")): + raise AnsibleError(f"URL is malformed: {url}") return True def _connect_to_socket(self): @@ -300,15 +300,17 @@ def _connect_to_socket(self): Returns: None""" error_storage = {} - url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL] + url_list = [self.get_option("url"), self.SNAP_SOCKET_URL, self.SOCKET_URL] urls = (url for url in url_list if self.validate_url(url)) for url in urls: try: - socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname) + socket_connection = LXDClient( + url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname + ) return socket_connection except LXDClientException as err: error_storage[url] = err - raise AnsibleError(f'No connection to the socket: {error_storage}') + raise AnsibleError(f"No connection to the socket: {error_storage}") def _get_networks(self): """Get Networknames @@ -330,8 +332,8 @@ def _get_networks(self): # 'error_code': 0, # 'error': '', # 'metadata': ['/1.0/networks/lxdbr0']} - network_configs = self.socket.do('GET', '/1.0/networks') - return [m.split('/')[3] for m in network_configs['metadata']] + network_configs = self.socket.do("GET", "/1.0/networks") + return [m.split("/")[3] for m in network_configs["metadata"]] def _get_instances(self): """Get instancenames @@ -355,16 +357,16 @@ def _get_instances(self): # "status_code": 200, # "type": "sync" # } - url = '/1.0/instances' + url = "/1.0/instances" if self.project: url = f"{url}?{urlencode(dict(project=self.project))}" - instances = self.socket.do('GET', url) + instances = self.socket.do("GET", url) if self.project: - return [m.split('/')[3].split('?')[0] for m in instances['metadata']] + return [m.split("/")[3].split("?")[0] for m in instances["metadata"]] - return [m.split('/')[3] for m in instances['metadata']] + return [m.split("/")[3] for m in instances["metadata"]] def _get_config(self, branch, name): """Get inventory of instance @@ -384,11 +386,18 @@ def _get_config(self, branch, name): dict(config): Config of the instance""" config = {} if isinstance(branch, (tuple, list)): - config[name] = {branch[1]: self.socket.do( - 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')} + config[name] = { + branch[1]: self.socket.do( + "GET", + f"/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}", + ) + } else: - config[name] = {branch: self.socket.do( - 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')} + config[name] = { + branch: self.socket.do( + "GET", f"/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}" + ) + } return config def get_instance_data(self, names): @@ -406,11 +415,11 @@ def get_instance_data(self, names): None""" # tuple(('instances','metadata/templates')) to get section in branch # e.g. /1.0/instances//metadata/templates - branches = ['instances', ('instances', 'state')] + branches = ["instances", ("instances", "state")] instance_config = {} for branch in branches: for name in names: - instance_config['instances'] = self._get_config(branch, name) + instance_config["instances"] = self._get_config(branch, name) self.data = dict_merge(instance_config, self.data) def get_network_data(self, names): @@ -428,14 +437,14 @@ def get_network_data(self, names): None""" # tuple(('instances','metadata/templates')) to get section in branch # e.g. /1.0/instances//metadata/templates - branches = [('networks', 'state')] + branches = [("networks", "state")] network_config = {} for branch in branches: for name in names: try: - network_config['networks'] = self._get_config(branch, name) + network_config["networks"] = self._get_config(branch, name) except LXDClientException: - network_config['networks'] = {name: None} + network_config["networks"] = {name: None} self.data = dict_merge(network_config, self.data) def extract_network_information_from_instance_config(self, instance_name): @@ -451,20 +460,26 @@ def extract_network_information_from_instance_config(self, instance_name): None Returns: dict(network_configuration): network config""" - instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network') + instance_network_interfaces = self._get_data_entry(f"instances/{instance_name}/state/metadata/network") network_configuration = None if instance_network_interfaces: network_configuration = {} - gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo'] + gen_interface_names = [ + interface_name for interface_name in instance_network_interfaces if interface_name != "lo" + ] for interface_name in gen_interface_names: - gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link'] + gen_address = [ + address + for address in instance_network_interfaces[interface_name]["addresses"] + if address.get("scope") != "link" + ] network_configuration[interface_name] = [] for address in gen_address: address_set = {} - address_set['family'] = address.get('family') - address_set['address'] = address.get('address') - address_set['netmask'] = address.get('netmask') - address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}" + address_set["family"] = address.get("family") + address_set["address"] = address.get("address") + address_set["netmask"] = address.get("netmask") + address_set["combined"] = f"{address.get('address')}/{address.get('netmask')}" network_configuration[interface_name].append(address_set) return network_configuration @@ -481,11 +496,15 @@ def get_prefered_instance_network_interface(self, instance_name): None Returns: str(prefered_interface): None or interface name""" - instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') + instance_network_interfaces = self._get_data_entry(f"inventory/{instance_name}/network_interfaces") prefered_interface = None # init if instance_network_interfaces: # instance have network interfaces # generator if interfaces which start with the desired pattern - net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)] + net_generator = [ + interface + for interface in instance_network_interfaces + if interface.startswith(self.prefered_instance_network_interface) + ] selected_interfaces = [] # init for interface in net_generator: selected_interfaces.append(interface) @@ -508,9 +527,11 @@ def get_instance_vlans(self, instance_name): None""" # get network device configuration and store {network: vlan_id} network_vlans = {} - for network in self._get_data_entry('networks'): - if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)): - network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)) + for network in self._get_data_entry("networks"): + if self._get_data_entry("state/metadata/vlan/vid", data=self.data["networks"].get(network)): + network_vlans[network] = self._get_data_entry( + "state/metadata/vlan/vid", data=self.data["networks"].get(network) + ) # get networkdevices of instance and return # e.g. @@ -518,14 +539,14 @@ def get_instance_vlans(self, instance_name): # "network":"lxdbr0", # "type":"nic"}, vlan_ids = {} - devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices') + devices = self._get_data_entry(f"instances/{to_native(instance_name)}/instances/metadata/expanded_devices") for device in devices: - if 'network' in devices[device]: - if devices[device]['network'] in network_vlans: - vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')] + if "network" in devices[device]: + if devices[device]["network"] in network_vlans: + vlan_ids[devices[device].get("network")] = network_vlans[devices[device].get("network")] return vlan_ids if vlan_ids else None - def _get_data_entry(self, path, data=None, delimiter='/'): + def _get_data_entry(self, path, data=None, delimiter="/"): """Helper to get data Helper to get data from self.data by a path like 'path/to/target' @@ -571,7 +592,7 @@ def _set_data_entry(self, instance_name, key, value, path=None): Returns: None""" if not path: - path = self.data['inventory'] + path = self.data["inventory"] if instance_name not in path: path[instance_name] = {} @@ -597,29 +618,53 @@ def extract_information_from_instance_configs(self): Returns: None""" # create branch "inventory" - if 'inventory' not in self.data: - self.data['inventory'] = {} - - for instance_name in self.data['instances']: - self._set_data_entry(instance_name, 'os', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/config/image.os')) - self._set_data_entry(instance_name, 'release', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/config/image.release')) - self._set_data_entry(instance_name, 'version', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/config/image.version')) - self._set_data_entry(instance_name, 'profile', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/profiles')) - self._set_data_entry(instance_name, 'location', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/location')) - self._set_data_entry(instance_name, 'state', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power')) - self._set_data_entry(instance_name, 'type', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/type')) - self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) - self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) - self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) - self._set_data_entry(instance_name, 'project', self._get_data_entry( - f'instances/{instance_name}/instances/metadata/project')) + if "inventory" not in self.data: + self.data["inventory"] = {} + + for instance_name in self.data["instances"]: + self._set_data_entry( + instance_name, + "os", + self._get_data_entry(f"instances/{instance_name}/instances/metadata/config/image.os"), + ) + self._set_data_entry( + instance_name, + "release", + self._get_data_entry(f"instances/{instance_name}/instances/metadata/config/image.release"), + ) + self._set_data_entry( + instance_name, + "version", + self._get_data_entry(f"instances/{instance_name}/instances/metadata/config/image.version"), + ) + self._set_data_entry( + instance_name, "profile", self._get_data_entry(f"instances/{instance_name}/instances/metadata/profiles") + ) + self._set_data_entry( + instance_name, + "location", + self._get_data_entry(f"instances/{instance_name}/instances/metadata/location"), + ) + self._set_data_entry( + instance_name, + "state", + self._get_data_entry(f"instances/{instance_name}/instances/metadata/config/volatile.last_state.power"), + ) + self._set_data_entry( + instance_name, "type", self._get_data_entry(f"instances/{instance_name}/instances/metadata/type") + ) + self._set_data_entry( + instance_name, + "network_interfaces", + self.extract_network_information_from_instance_config(instance_name), + ) + self._set_data_entry( + instance_name, "preferred_interface", self.get_prefered_instance_network_interface(instance_name) + ) + self._set_data_entry(instance_name, "vlan_ids", self.get_instance_vlans(instance_name)) + self._set_data_entry( + instance_name, "project", self._get_data_entry(f"instances/{instance_name}/instances/metadata/project") + ) def build_inventory_network(self, instance_name): """Add the network interfaces of the instance to the inventory @@ -653,30 +698,30 @@ def interface_selection(instance_name): None Returns: dict(interface_name: ip)""" - prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None + prefered_interface = self._get_data_entry(f"inventory/{instance_name}/preferred_interface") # name or None prefered_instance_network_family = self.prefered_instance_network_family - ip_address = '' + ip_address = "" if prefered_interface: - interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}') + interface = self._get_data_entry(f"inventory/{instance_name}/network_interfaces/{prefered_interface}") for config in interface: - if config['family'] == prefered_instance_network_family: - ip_address = config['address'] + if config["family"] == prefered_instance_network_family: + ip_address = config["address"] break else: - interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') + interfaces = self._get_data_entry(f"inventory/{instance_name}/network_interfaces") for interface in interfaces.values(): for config in interface: - if config['family'] == prefered_instance_network_family: - ip_address = config['address'] + if config["family"] == prefered_instance_network_family: + ip_address = config["address"] break return ip_address - if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces - self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) + if self._get_data_entry(f"inventory/{instance_name}/network_interfaces"): # instance have network interfaces + self.inventory.set_variable(instance_name, "ansible_connection", "ssh") + self.inventory.set_variable(instance_name, "ansible_host", make_unsafe(interface_selection(instance_name))) else: - self.inventory.set_variable(instance_name, 'ansible_connection', 'local') + self.inventory.set_variable(instance_name, "ansible_connection", "local") def build_inventory_hosts(self): """Build host-part dynamic inventory @@ -692,8 +737,8 @@ def build_inventory_hosts(self): None Returns: None""" - for instance_name in self.data['inventory']: - instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower() + for instance_name in self.data["inventory"]: + instance_state = str(self._get_data_entry(f"inventory/{instance_name}/state") or "STOPPED").lower() # Only consider instances that match the "state" filter, if self.state is not None if self.filter: @@ -705,34 +750,47 @@ def build_inventory_hosts(self): # add network information self.build_inventory_network(instance_name) # add os - v = self._get_data_entry(f'inventory/{instance_name}/os') + v = self._get_data_entry(f"inventory/{instance_name}/os") if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) + self.inventory.set_variable(instance_name, "ansible_lxd_os", make_unsafe(v.lower())) # add release - v = self._get_data_entry(f'inventory/{instance_name}/release') + v = self._get_data_entry(f"inventory/{instance_name}/release") if v: - self.inventory.set_variable( - instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) + self.inventory.set_variable(instance_name, "ansible_lxd_release", make_unsafe(v.lower())) # add profile self.inventory.set_variable( - instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile'))) + instance_name, + "ansible_lxd_profile", + make_unsafe(self._get_data_entry(f"inventory/{instance_name}/profile")), + ) # add state - self.inventory.set_variable( - instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) + self.inventory.set_variable(instance_name, "ansible_lxd_state", make_unsafe(instance_state)) # add type self.inventory.set_variable( - instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type'))) + instance_name, "ansible_lxd_type", make_unsafe(self._get_data_entry(f"inventory/{instance_name}/type")) + ) # add location information - if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None' + if ( + self._get_data_entry(f"inventory/{instance_name}/location") != "none" + ): # wrong type by lxd 'none' != 'None' self.inventory.set_variable( - instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location'))) + instance_name, + "ansible_lxd_location", + make_unsafe(self._get_data_entry(f"inventory/{instance_name}/location")), + ) # add VLAN_ID information - if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'): + if self._get_data_entry(f"inventory/{instance_name}/vlan_ids"): self.inventory.set_variable( - instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids'))) + instance_name, + "ansible_lxd_vlan_ids", + make_unsafe(self._get_data_entry(f"inventory/{instance_name}/vlan_ids")), + ) # add project self.inventory.set_variable( - instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project'))) + instance_name, + "ansible_lxd_project", + make_unsafe(self._get_data_entry(f"inventory/{instance_name}/project")), + ) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -750,7 +808,7 @@ def build_inventory_groups_location(self, group_name): self.inventory.add_group(group_name) for instance_name in self.inventory.hosts: - if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars(): + if "ansible_lxd_location" in self.inventory.get_host(instance_name).get_vars(): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_pattern(self, group_name): @@ -768,7 +826,7 @@ def build_inventory_groups_pattern(self, group_name): if group_name not in self.inventory.groups: self.inventory.add_group(group_name) - regex_pattern = self.groupby[group_name].get('attribute') + regex_pattern = self.groupby[group_name].get("attribute") for instance_name in self.inventory.hosts: result = re.search(regex_pattern, instance_name) @@ -791,17 +849,18 @@ def build_inventory_groups_network_range(self, group_name): self.inventory.add_group(group_name) try: - network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) + network = ipaddress.ip_network(to_text(self.groupby[group_name].get("attribute"))) except ValueError as err: raise AnsibleParserError( - f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}") + f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}" + ) for instance_name in self.inventory.hosts: - if self.data['inventory'][instance_name].get('network_interfaces') is not None: - for interface in self.data['inventory'][instance_name].get('network_interfaces'): - for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]: + if self.data["inventory"][instance_name].get("network_interfaces") is not None: + for interface in self.data["inventory"][instance_name].get("network_interfaces"): + for interface_family in self.data["inventory"][instance_name].get("network_interfaces")[interface]: try: - address = ipaddress.ip_address(to_text(interface_family['address'])) + address = ipaddress.ip_address(to_text(interface_family["address"])) if address.version == network.version and address in network: self.inventory.add_child(group_name, instance_name) except ValueError: @@ -824,10 +883,14 @@ def build_inventory_groups_project(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()] + instance_name + for instance_name in self.inventory.hosts + if "ansible_lxd_project" in self.inventory.get_host(instance_name).get_vars() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'): + if self.groupby[group_name].get("attribute").lower() == self.inventory.get_host( + instance_name + ).get_vars().get("ansible_lxd_project"): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_os(self, group_name): @@ -846,10 +909,14 @@ def build_inventory_groups_os(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()] + instance_name + for instance_name in self.inventory.hosts + if "ansible_lxd_os" in self.inventory.get_host(instance_name).get_vars() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'): + if self.groupby[group_name].get("attribute").lower() == self.inventory.get_host( + instance_name + ).get_vars().get("ansible_lxd_os"): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_release(self, group_name): @@ -868,10 +935,14 @@ def build_inventory_groups_release(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()] + instance_name + for instance_name in self.inventory.hosts + if "ansible_lxd_release" in self.inventory.get_host(instance_name).get_vars() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'): + if self.groupby[group_name].get("attribute").lower() == self.inventory.get_host( + instance_name + ).get_vars().get("ansible_lxd_release"): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_profile(self, group_name): @@ -890,10 +961,14 @@ def build_inventory_groups_profile(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts.keys() - if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()] + instance_name + for instance_name in self.inventory.hosts.keys() + if "ansible_lxd_profile" in self.inventory.get_host(instance_name).get_vars().keys() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'): + if self.groupby[group_name].get("attribute").lower() in self.inventory.get_host( + instance_name + ).get_vars().get("ansible_lxd_profile"): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_vlanid(self, group_name): @@ -912,10 +987,15 @@ def build_inventory_groups_vlanid(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts.keys() - if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()] + instance_name + for instance_name in self.inventory.hosts.keys() + if "ansible_lxd_vlan_ids" in self.inventory.get_host(instance_name).get_vars().keys() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values(): + if ( + self.groupby[group_name].get("attribute") + in self.inventory.get_host(instance_name).get_vars().get("ansible_lxd_vlan_ids").values() + ): self.inventory.add_child(group_name, instance_name) def build_inventory_groups_type(self, group_name): @@ -934,10 +1014,14 @@ def build_inventory_groups_type(self, group_name): self.inventory.add_group(group_name) gen_instances = [ - instance_name for instance_name in self.inventory.hosts - if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()] + instance_name + for instance_name in self.inventory.hosts + if "ansible_lxd_type" in self.inventory.get_host(instance_name).get_vars() + ] for instance_name in gen_instances: - if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'): + if self.groupby[group_name].get("attribute").lower() == self.inventory.get_host( + instance_name + ).get_vars().get("ansible_lxd_type"): self.inventory.add_child(group_name, instance_name) def build_inventory_groups(self): @@ -980,31 +1064,31 @@ def group_type(group_name): None""" # Due to the compatibility with python 2 no use of map - if self.groupby[group_name].get('type') == 'location': + if self.groupby[group_name].get("type") == "location": self.build_inventory_groups_location(group_name) - elif self.groupby[group_name].get('type') == 'pattern': + elif self.groupby[group_name].get("type") == "pattern": self.build_inventory_groups_pattern(group_name) - elif self.groupby[group_name].get('type') == 'network_range': + elif self.groupby[group_name].get("type") == "network_range": self.build_inventory_groups_network_range(group_name) - elif self.groupby[group_name].get('type') == 'os': + elif self.groupby[group_name].get("type") == "os": self.build_inventory_groups_os(group_name) - elif self.groupby[group_name].get('type') == 'release': + elif self.groupby[group_name].get("type") == "release": self.build_inventory_groups_release(group_name) - elif self.groupby[group_name].get('type') == 'profile': + elif self.groupby[group_name].get("type") == "profile": self.build_inventory_groups_profile(group_name) - elif self.groupby[group_name].get('type') == 'vlanid': + elif self.groupby[group_name].get("type") == "vlanid": self.build_inventory_groups_vlanid(group_name) - elif self.groupby[group_name].get('type') == 'type': + elif self.groupby[group_name].get("type") == "type": self.build_inventory_groups_type(group_name) - elif self.groupby[group_name].get('type') == 'project': + elif self.groupby[group_name].get("type") == "project": self.build_inventory_groups_project(group_name) else: - raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}') + raise AnsibleParserError(f"Unknown group type: {to_native(group_name)}") if self.groupby: for group_name in self.groupby: if not group_name.isalnum(): - raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}') + raise AnsibleParserError(f"Invalid character(s) in groupname: {to_native(group_name)}") group_type(make_unsafe(group_name)) def build_inventory(self): @@ -1039,10 +1123,10 @@ def cleandata(self): None Returns: None""" - iter_keys = list(self.data['instances'].keys()) + iter_keys = list(self.data["instances"].keys()) for instance_name in iter_keys: - if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter: - del self.data['instances'][instance_name] + if self._get_data_entry(f"instances/{instance_name}/instances/metadata/type") != self.type_filter: + del self.data["instances"][instance_name] def _populate(self): """Return the hosts and groups @@ -1066,7 +1150,7 @@ def _populate(self): # The first version of the inventory only supported containers. # This will change in the future. # The following function cleans up the data. - if self.type_filter != 'both': + if self.type_filter != "both": self.cleandata() self.extract_information_from_instance_configs() @@ -1094,32 +1178,31 @@ def parse(self, inventory, loader, path, cache): Returns: None""" if IPADDRESS_IMPORT_ERROR: - raise AnsibleError('another_library must be installed to use this plugin') from IPADDRESS_IMPORT_ERROR + raise AnsibleError("another_library must be installed to use this plugin") from IPADDRESS_IMPORT_ERROR super().parse(inventory, loader, path, cache=False) # Read the inventory YAML file self._read_config_data(path) try: - self.client_key = self.get_option('client_key') - self.client_cert = self.get_option('client_cert') - self.server_cert = self.get_option('server_cert') - self.server_check_hostname = self.get_option('server_check_hostname') - self.project = self.get_option('project') + self.client_key = self.get_option("client_key") + self.client_cert = self.get_option("client_cert") + self.server_cert = self.get_option("server_cert") + self.server_check_hostname = self.get_option("server_check_hostname") + self.project = self.get_option("project") self.debug = self.DEBUG self.data = {} # store for inventory-data - self.groupby = self.get_option('groupby') - self.plugin = self.get_option('plugin') - self.prefered_instance_network_family = self.get_option('prefered_instance_network_family') - self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface') - self.type_filter = self.get_option('type_filter') - if self.get_option('state').lower() == 'none': # none in config is str() + self.groupby = self.get_option("groupby") + self.plugin = self.get_option("plugin") + self.prefered_instance_network_family = self.get_option("prefered_instance_network_family") + self.prefered_instance_network_interface = self.get_option("prefered_instance_network_interface") + self.type_filter = self.get_option("type_filter") + if self.get_option("state").lower() == "none": # none in config is str() self.filter = None else: - self.filter = self.get_option('state').lower() - self.trust_password = self.get_option('trust_password') - self.url = self.get_option('url') + self.filter = self.get_option("state").lower() + self.trust_password = self.get_option("trust_password") + self.url = self.get_option("url") except Exception as err: - raise AnsibleParserError( - f'All correct options required: {err}') + raise AnsibleParserError(f"All correct options required: {err}") # Call our internal helper to populate the dynamic inventory self._populate() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index d68d0bb3ecb..2b8001828cc 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -138,10 +138,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.nmap' - find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?') - find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)') + NAME = "community.general.nmap" + find_host = re.compile(r"^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?") + find_port = re.compile(r"^(\d+)/(\w+)\s+(\w+)\s+(\w+)") def __init__(self): self._nmap = None @@ -149,26 +148,25 @@ def __init__(self): def _populate(self, hosts): # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") for host in hosts: host = make_unsafe(host) - hostname = host['name'] + hostname = host["name"] self.inventory.add_host(hostname) for var, value in host.items(): self.inventory.set_variable(hostname, var, value) # Composed variables - self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), host, hostname, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host, hostname, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host, hostname, strict=strict) def verify_file(self, path): - valid = False if super().verify_file(path): file_name, ext = os.path.splitext(path) @@ -179,11 +177,10 @@ def verify_file(self, path): return valid def parse(self, inventory, loader, path, cache=True): - try: - self._nmap = get_bin_path('nmap') + self._nmap = get_bin_path("nmap") except ValueError as e: - raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}') + raise AnsibleParserError(f"nmap inventory plugin requires the nmap cli tool to work: {e}") super().parse(inventory, loader, path, cache=cache) @@ -193,7 +190,7 @@ def parse(self, inventory, loader, path, cache=True): # cache may be True or False at this point to indicate if the inventory is being refreshed # get the user's cache option too to see if we should save the cache if it is changing - user_cache_setting = self.get_option('cache') + user_cache_setting = self.get_option("cache") # read if the user has caching enabled and the cache isn't being refreshed attempt_to_read_cache = user_cache_setting and cache @@ -211,53 +208,53 @@ def parse(self, inventory, loader, path, cache=True): # setup command cmd = [self._nmap] - if self.get_option('sudo'): - cmd.insert(0, 'sudo') + if self.get_option("sudo"): + cmd.insert(0, "sudo") - if self.get_option('port'): - cmd.append('-p') - cmd.append(self.get_option('port')) + if self.get_option("port"): + cmd.append("-p") + cmd.append(self.get_option("port")) - if not self.get_option('ports'): - cmd.append('-sP') + if not self.get_option("ports"): + cmd.append("-sP") - if self.get_option('ipv4') and not self.get_option('ipv6'): - cmd.append('-4') - elif self.get_option('ipv6') and not self.get_option('ipv4'): - cmd.append('-6') - elif not self.get_option('ipv6') and not self.get_option('ipv4'): - raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') + if self.get_option("ipv4") and not self.get_option("ipv6"): + cmd.append("-4") + elif self.get_option("ipv6") and not self.get_option("ipv4"): + cmd.append("-6") + elif not self.get_option("ipv6") and not self.get_option("ipv4"): + raise AnsibleParserError("One of ipv4 or ipv6 must be enabled for this plugin") - if self.get_option('exclude'): - cmd.append('--exclude') - cmd.append(','.join(self.get_option('exclude'))) + if self.get_option("exclude"): + cmd.append("--exclude") + cmd.append(",".join(self.get_option("exclude"))) - if self.get_option('dns_resolve'): - cmd.append('-n') + if self.get_option("dns_resolve"): + cmd.append("-n") - if self.get_option('dns_servers'): - cmd.append('--dns-servers') - cmd.append(','.join(self.get_option('dns_servers'))) + if self.get_option("dns_servers"): + cmd.append("--dns-servers") + cmd.append(",".join(self.get_option("dns_servers"))) - if self.get_option('udp_scan'): - cmd.append('-sU') + if self.get_option("udp_scan"): + cmd.append("-sU") - if self.get_option('icmp_timestamp'): - cmd.append('-PP') + if self.get_option("icmp_timestamp"): + cmd.append("-PP") - if self.get_option('open'): - cmd.append('--open') + if self.get_option("open"): + cmd.append("--open") - if not self.get_option('use_arp_ping'): - cmd.append('--disable-arp-ping') + if not self.get_option("use_arp_ping"): + cmd.append("--disable-arp-ping") - cmd.append(self.get_option('address')) + cmd.append(self.get_option("address")) try: # execute p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}') + raise AnsibleParserError(f"Failed to run nmap, rc={p.returncode}: {to_native(stderr)}") # parse results host = None @@ -266,18 +263,18 @@ def parse(self, inventory, loader, path, cache=True): results = [] try: - t_stdout = to_text(stdout, errors='surrogate_or_strict') + t_stdout = to_text(stdout, errors="surrogate_or_strict") except UnicodeError as e: - raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}') + raise AnsibleParserError(f"Invalid (non unicode) input returned: {e}") for line in t_stdout.splitlines(): hits = self.find_host.match(line) if hits: if host is not None and ports: - results[-1]['ports'] = ports + results[-1]["ports"] = ports # if dns only shows arpa, just use ip instead as hostname - if hits.group(1).endswith('.in-addr.arpa'): + if hits.group(1).endswith(".in-addr.arpa"): host = hits.group(2) else: host = hits.group(1) @@ -291,22 +288,26 @@ def parse(self, inventory, loader, path, cache=True): if host is not None: # update inventory results.append(dict()) - results[-1]['name'] = host - results[-1]['ip'] = ip + results[-1]["name"] = host + results[-1]["ip"] = ip ports = [] continue host_ports = self.find_port.match(line) if host is not None and host_ports: - ports.append({'port': host_ports.group(1), - 'protocol': host_ports.group(2), - 'state': host_ports.group(3), - 'service': host_ports.group(4)}) + ports.append( + { + "port": host_ports.group(1), + "protocol": host_ports.group(2), + "state": host_ports.group(3), + "service": host_ports.group(4), + } + ) continue # if any leftovers if host and ports: - results[-1]['ports'] = ports + results[-1]["ports"] = ports except Exception as e: raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ") diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index b88dd31494a..21b344dcc68 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -73,7 +73,7 @@ class InventoryModule(BaseInventoryPlugin): - NAME = 'community.general.online' + NAME = "community.general.online" API_ENDPOINT = "https://api.online.net" def extract_public_ipv4(self, host_infos): @@ -140,7 +140,7 @@ def _fetch_information(self, url): return None try: - raw_data = to_text(response.read(), errors='surrogate_or_strict') + raw_data = to_text(response.read(), errors="surrogate_or_strict") except UnicodeError: raise AnsibleError("Incorrect encoding of fetched payload from Online servers") @@ -168,26 +168,33 @@ def _fill_host_variables(self, hostname, host_infos): "last_reboot", "anti_ddos", "hardware_watch", - "support" + "support", ) for attribute in targeted_attributes: self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute])) if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) - self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) + self.inventory.set_variable( + hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)) + ) + self.inventory.set_variable( + hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)) + ) if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos))) + self.inventory.set_variable( + hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos)) + ) if self.extract_os_name(host_infos=host_infos): self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos))) if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos))) + self.inventory.set_variable( + hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos)) + ) def _filter_host(self, host_infos, hostname_preferences): - for pref in hostname_preferences: if self.extractors[pref](host_infos): return self.extractors[pref](host_infos) @@ -195,9 +202,7 @@ def _filter_host(self, host_infos, hostname_preferences): return None def do_server_inventory(self, host_infos, hostname_preferences, group_preferences): - - hostname = self._filter_host(host_infos=host_infos, - hostname_preferences=hostname_preferences) + hostname = self._filter_host(host_infos=host_infos, hostname_preferences=hostname_preferences) # No suitable hostname were found in the attributes and the host won't be in the inventory if not hostname: @@ -239,13 +244,13 @@ def parse(self, inventory, loader, path, cache=True): self.group_extractors = { "location": self.extract_location, "offer": self.extract_offer, - "rpn": self.extract_rpn + "rpn": self.extract_rpn, } self.headers = { - 'Authorization': f"Bearer {token}", - 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", - 'Content-type': 'application/json' + "Authorization": f"Bearer {token}", + "User-Agent": f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", + "Content-type": "application/json", } servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server") @@ -257,13 +262,14 @@ def parse(self, inventory, loader, path, cache=True): self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list) for server_api_path in servers_api_path: - server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path) raw_server_info = self._fetch_information(url=server_url) if raw_server_info is None: continue - self.do_server_inventory(host_infos=raw_server_info, - hostname_preferences=hostname_preferences, - group_preferences=group_preferences) + self.do_server_inventory( + host_infos=raw_server_info, + hostname_preferences=hostname_preferences, + group_preferences=group_preferences, + ) diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 18007d9775b..cc4e802c3df 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -100,20 +100,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable): - NAME = 'community.general.opennebula' + NAME = "community.general.opennebula" def verify_file(self, path): valid = False if super().verify_file(path): - if path.endswith(('opennebula.yaml', 'opennebula.yml')): + if path.endswith(("opennebula.yaml", "opennebula.yml")): valid = True return valid def _get_connection_info(self): - url = self.get_option('api_url') - username = self.get_option('api_username') - password = self.get_option('api_password') - authfile = self.get_option('api_authfile') + url = self.get_option("api_url") + username = self.get_option("api_username") + password = self.get_option("api_password") + authfile = self.get_option("api_authfile") if not username and not password: if authfile is None: @@ -127,31 +127,31 @@ def _get_connection_info(self): except Exception: raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'") - auth_params = namedtuple('auth', ('url', 'username', 'password')) + auth_params = namedtuple("auth", ("url", "username", "password")) return auth_params(url=url, username=username, password=password) def _get_vm_ipv4(self, vm): - nic = vm.TEMPLATE.get('NIC') + nic = vm.TEMPLATE.get("NIC") if isinstance(nic, dict): nic = [nic] for net in nic: - if net.get('IP'): - return net['IP'] + if net.get("IP"): + return net["IP"] return False def _get_vm_ipv6(self, vm): - nic = vm.TEMPLATE.get('NIC') + nic = vm.TEMPLATE.get("NIC") if isinstance(nic, dict): nic = [nic] for net in nic: - if net.get('IP6_GLOBAL'): - return net['IP6_GLOBAL'] + if net.get("IP6_GLOBAL"): + return net["IP6_GLOBAL"] return False @@ -159,7 +159,7 @@ def _get_vm_pool(self): auth = self._get_connection_info() if not (auth.username and auth.password): - raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') + raise AnsibleError("API Credentials missing. Check OpenNebula inventory file.") else: one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}") @@ -181,72 +181,74 @@ def _retrieve_servers(self, label_filter=None): server = vm.USER_TEMPLATE labels = [] - if vm.USER_TEMPLATE.get('LABELS'): - labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()] - labels = ''.join(labels) - labels = labels.replace(' ', '_') - labels = labels.replace('-', '_') - labels = labels.split(',') + if vm.USER_TEMPLATE.get("LABELS"): + labels = [ + s for s in vm.USER_TEMPLATE.get("LABELS") if s == "," or s == "-" or s.isalnum() or s.isspace() + ] + labels = "".join(labels) + labels = labels.replace(" ", "_") + labels = labels.replace("-", "_") + labels = labels.split(",") # filter by label if label_filter is not None: if label_filter not in labels: continue - server['name'] = vm.NAME - server['id'] = vm.ID - if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: - server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME - server['LABELS'] = labels - server['v4_first_ip'] = self._get_vm_ipv4(vm) - server['v6_first_ip'] = self._get_vm_ipv6(vm) + server["name"] = vm.NAME + server["id"] = vm.ID + if hasattr(vm.HISTORY_RECORDS, "HISTORY") and vm.HISTORY_RECORDS.HISTORY: + server["host"] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME + server["LABELS"] = labels + server["v4_first_ip"] = self._get_vm_ipv4(vm) + server["v6_first_ip"] = self._get_vm_ipv6(vm) result.append(server) return result def _populate(self): - hostname_preference = self.get_option('hostname') - group_by_labels = self.get_option('group_by_labels') - strict = self.get_option('strict') + hostname_preference = self.get_option("hostname") + group_by_labels = self.get_option("group_by_labels") + strict = self.get_option("strict") # Add a top group 'one' - self.inventory.add_group(group='all') + self.inventory.add_group(group="all") - filter_by_label = self.get_option('filter_by_label') + filter_by_label = self.get_option("filter_by_label") servers = self._retrieve_servers(filter_by_label) for server in servers: server = make_unsafe(server) - hostname = server['name'] + hostname = server["name"] # check for labels - if group_by_labels and server['LABELS']: - for label in server['LABELS']: + if group_by_labels and server["LABELS"]: + for label in server["LABELS"]: self.inventory.add_group(group=label) self.inventory.add_host(host=hostname, group=label) - self.inventory.add_host(host=hostname, group='all') + self.inventory.add_host(host=hostname, group="all") for attribute, value in server.items(): self.inventory.set_variable(hostname, attribute, value) - if hostname_preference != 'name': - self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference]) + if hostname_preference != "name": + self.inventory.set_variable(hostname, "ansible_host", server[hostname_preference]) - if server.get('SSH_PORT'): - self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT']) + if server.get("SSH_PORT"): + self.inventory.set_variable(hostname, "ansible_port", server["SSH_PORT"]) # handle construcable implementation: get composed variables if any - self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), server, hostname, strict=strict) # groups based on jinja conditionals get added to specific groups - self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), server, hostname, strict=strict) # groups based on variables associated with them in the inventory - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), server, hostname, strict=strict) def parse(self, inventory, loader, path, cache=True): if not HAS_PYONE: - raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!') + raise AnsibleError("OpenNebula Inventory plugin requires pyone to work!") super().parse(inventory, loader, path) self._read_config_data(path=path) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 6653a42681d..3410828f763 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -141,9 +141,7 @@ def _fetch_information(token, url): paginated_url = url while True: try: - response = open_url(paginated_url, - headers={'X-Auth-Token': token, - 'Content-type': 'application/json'}) + response = open_url(paginated_url, headers={"X-Auth-Token": token, "Content-type": "application/json"}) except Exception as e: raise AnsibleError(f"Error while fetching {url}: {e}") try: @@ -156,13 +154,13 @@ def _fetch_information(token, url): except KeyError: raise AnsibleError("Incorrect format from the Scaleway API response") - link = response.headers['Link'] + link = response.headers["Link"] if not link: return results relations = parse_pagination_link(link) - if 'next' not in relations: + if "next" not in relations: return results - paginated_url = urllib_parse.urljoin(paginated_url, relations['next']) + paginated_url = urllib_parse.urljoin(paginated_url, relations["next"]) def _build_server_url(api_endpoint): @@ -223,12 +221,12 @@ def extract_zone(server_info): "private_ipv4": extract_private_ipv4, "public_ipv6": extract_public_ipv6, "hostname": extract_hostname, - "id": extract_server_id + "id": extract_server_id, } class InventoryModule(BaseInventoryPlugin, Constructable): - NAME = 'community.general.scaleway' + NAME = "community.general.scaleway" def _fill_host_variables(self, host, server_info): targeted_attributes = ( @@ -275,7 +273,6 @@ def match_groups(self, server_info, tags): return matching_tags.union((server_zone,)) def _filter_host(self, host_infos, hostname_preferences): - for pref in hostname_preferences: if extractors[pref](host_infos): return extractors[pref](host_infos) @@ -290,9 +287,7 @@ def do_zone_inventory(self, zone, token, tags, hostname_preferences): raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token)) for host_infos in raw_zone_hosts_infos: - - hostname = self._filter_host(host_infos=host_infos, - hostname_preferences=hostname_preferences) + hostname = self._filter_host(host_infos=host_infos, hostname_preferences=hostname_preferences) # No suitable hostname were found in the attributes and the host won't be in the inventory if not hostname: @@ -306,38 +301,38 @@ def do_zone_inventory(self, zone, token, tags, hostname_preferences): self._fill_host_variables(host=hostname, server_info=host_infos) # Composed variables - self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False) + self._set_composite_vars(self.get_option("variables"), host_infos, hostname, strict=False) def get_oauth_token(self): - oauth_token = self.get_option('oauth_token') + oauth_token = self.get_option("oauth_token") - if 'SCW_CONFIG_PATH' in os.environ: - scw_config_path = os.getenv('SCW_CONFIG_PATH') - elif 'XDG_CONFIG_HOME' in os.environ: - scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml') + if "SCW_CONFIG_PATH" in os.environ: + scw_config_path = os.getenv("SCW_CONFIG_PATH") + elif "XDG_CONFIG_HOME" in os.environ: + scw_config_path = os.path.join(os.getenv("XDG_CONFIG_HOME"), "scw", "config.yaml") else: - scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml') + scw_config_path = os.path.join(os.path.expanduser("~"), ".config", "scw", "config.yaml") if not oauth_token and os.path.exists(scw_config_path): with open(scw_config_path) as fh: scw_config = yaml.safe_load(fh) - ansible_profile = self.get_option('scw_profile') + ansible_profile = self.get_option("scw_profile") if ansible_profile: active_profile = ansible_profile else: - active_profile = scw_config.get('active_profile', 'default') + active_profile = scw_config.get("active_profile", "default") - if active_profile == 'default': - oauth_token = scw_config.get('secret_key') + if active_profile == "default": + oauth_token = scw_config.get("secret_key") else: - oauth_token = scw_config['profiles'][active_profile].get('secret_key') + oauth_token = scw_config["profiles"][active_profile].get("secret_key") return oauth_token def parse(self, inventory, loader, path, cache=True): if YAML_IMPORT_ERROR: - raise AnsibleError('PyYAML is probably missing') from YAML_IMPORT_ERROR + raise AnsibleError("PyYAML is probably missing") from YAML_IMPORT_ERROR super().parse(inventory, loader, path) self._read_config_data(path=path) @@ -345,8 +340,12 @@ def parse(self, inventory, loader, path, cache=True): tags = self.get_option("tags") token = self.get_oauth_token() if not token: - raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.") + raise AnsibleError( + "'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config." + ) hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference) + self.do_zone_inventory( + zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference + ) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 041ba290771..065188bf389 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -85,9 +85,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using local virtualbox. ''' + """Host inventory parser for ansible using local virtualbox.""" - NAME = 'community.general.virtualbox' + NAME = "community.general.virtualbox" VBOX = "VBoxManage" def __init__(self): @@ -97,56 +97,58 @@ def __init__(self): def _query_vbox_data(self, host, property_path): ret = None try: - cmd = [self._vbox_path, b'guestproperty', b'get', - to_bytes(host, errors='surrogate_or_strict'), - to_bytes(property_path, errors='surrogate_or_strict')] + cmd = [ + self._vbox_path, + b"guestproperty", + b"get", + to_bytes(host, errors="surrogate_or_strict"), + to_bytes(property_path, errors="surrogate_or_strict"), + ] x = Popen(cmd, stdout=PIPE) - ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict') - if 'Value' in ipinfo: - a, ip = ipinfo.split(':', 1) + ipinfo = to_text(x.stdout.read(), errors="surrogate_or_strict") + if "Value" in ipinfo: + a, ip = ipinfo.split(":", 1) ret = ip.strip() except Exception: pass return ret def _set_variables(self, hostvars): - # set vars in inventory from hostvars for host in hostvars: - - query = self.get_option('query') + query = self.get_option("query") # create vars from vbox properties if query and isinstance(query, MutableMapping): for varname in query: hostvars[host][varname] = self._query_vbox_data(host, query[varname]) - strict = self.get_option('strict') + strict = self.get_option("strict") # create composite vars - self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict) + self._set_composite_vars(self.get_option("compose"), hostvars[host], host, strict=strict) # actually update inventory for key in hostvars[host]: self.inventory.set_variable(host, key, hostvars[host][key]) # constructed groups based on conditionals - self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), hostvars[host], host, strict=strict) # constructed keyed_groups - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), hostvars[host], host, strict=strict) def _populate_from_cache(self, source_data): source_data = make_unsafe(source_data) - hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + hostvars = source_data.pop("_meta", {}).get("hostvars", {}) for group in source_data: - if group == 'all': + if group == "all": continue else: group = self.inventory.add_group(group) - hosts = source_data[group].get('hosts', []) + hosts = source_data[group].get("hosts", []) for host in hosts: self._populate_host_vars([host], hostvars.get(host, {}), group) - self.inventory.add_child('all', group) + self.inventory.add_child("all", group) if not source_data: for host in hostvars: self.inventory.add_host(host) @@ -157,32 +159,32 @@ def _populate_from_source(self, source_data, using_current_cache=False): self._populate_from_cache(source_data) return source_data - cacheable_results = {'_meta': {'hostvars': {}}} + cacheable_results = {"_meta": {"hostvars": {}}} hostvars = {} - prevkey = pref_k = '' + prevkey = pref_k = "" current_host = None # needed to possibly set ansible_host - netinfo = self.get_option('network_info_path') + netinfo = self.get_option("network_info_path") for line in source_data: line = to_text(line) - if ':' not in line: + if ":" not in line: continue try: - k, v = line.split(':', 1) + k, v = line.split(":", 1) except Exception: # skip non splitable continue - if k.strip() == '': + if k.strip() == "": # skip empty continue v = v.strip() # found host - if k.startswith('Name') and ',' not in v: # some setting strings appear in Name + if k.startswith("Name") and "," not in v: # some setting strings appear in Name current_host = make_unsafe(v) if current_host not in hostvars: hostvars[current_host] = {} @@ -191,11 +193,11 @@ def _populate_from_source(self, source_data, using_current_cache=False): # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: - self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata)) + self.inventory.set_variable(current_host, "ansible_host", make_unsafe(netdata)) # found groups - elif k == 'Groups': - if self.get_option('enable_advanced_group_parsing'): + elif k == "Groups": + if self.get_option("enable_advanced_group_parsing"): self._handle_vboxmanage_group_string(v, current_host, cacheable_results) else: self._handle_group_string(v, current_host, cacheable_results) @@ -204,7 +206,7 @@ def _populate_from_source(self, source_data, using_current_cache=False): else: # found vars, accumulate in hostvars for clean inventory set pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}") - leading_spaces = len(k) - len(k.lstrip(' ')) + leading_spaces = len(k) - len(k.lstrip(" ")) if 0 < leading_spaces <= 2: if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): hostvars[current_host][prevkey] = {} @@ -212,26 +214,26 @@ def _populate_from_source(self, source_data, using_current_cache=False): elif leading_spaces > 2: continue else: - if v != '': + if v != "": hostvars[current_host][pref_k] = make_unsafe(v) if self._ungrouped_host(current_host, cacheable_results): - if 'ungrouped' not in cacheable_results: - cacheable_results['ungrouped'] = {'hosts': []} - cacheable_results['ungrouped']['hosts'].append(current_host) + if "ungrouped" not in cacheable_results: + cacheable_results["ungrouped"] = {"hosts": []} + cacheable_results["ungrouped"]["hosts"].append(current_host) prevkey = pref_k self._set_variables(hostvars) for host in hostvars: h = self.inventory.get_host(host) - cacheable_results['_meta']['hostvars'][h.name] = h.vars + cacheable_results["_meta"]["hostvars"][h.name] = h.vars return cacheable_results def _ungrouped_host(self, host, inventory): def find_host(host, inventory): for k, v in inventory.items(): - if k == '_meta': + if k == "_meta": continue if isinstance(v, dict): yield self._ungrouped_host(host, v) @@ -242,20 +244,20 @@ def find_host(host, inventory): return all(find_host(host, inventory)) def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): - '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + """Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.""" # The original implementation of this inventory plugin treated `/` as # a delimeter to split and use as Ansible Groups. - for group in vboxmanage_group.split('/'): + for group in vboxmanage_group.split("/"): if group: group = make_unsafe(group) group = self.inventory.add_group(group) self.inventory.add_child(group, current_host) if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + cacheable_results[group] = {"hosts": []} + cacheable_results[group]["hosts"].append(current_host) def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): - '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + """Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.""" # Per the VirtualBox documentation, a VM can be part of many groups, # and it is possible to have nested groups. # Many groups are separated by commas ",", and nested groups use @@ -264,7 +266,7 @@ def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cachea # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" - for group in vboxmanage_group.split(','): + for group in vboxmanage_group.split(","): if not group: # We could get an empty element due how to split works, and # possible assignments from VirtualBox. e.g. ,/Group1 @@ -277,13 +279,13 @@ def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cachea continue parent_group = None - for subgroup in group.split('/'): + for subgroup in group.split("/"): if not subgroup: # Similarly to above, we could get an empty element. # e.g //Group1 continue - if subgroup == '/': + if subgroup == "/": # "root" group. # Consider the host to be unassigned continue @@ -294,21 +296,19 @@ def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cachea self.inventory.add_child(parent_group, subgroup) self.inventory.add_child(subgroup, current_host) if subgroup not in cacheable_results: - cacheable_results[subgroup] = {'hosts': []} - cacheable_results[subgroup]['hosts'].append(current_host) + cacheable_results[subgroup] = {"hosts": []} + cacheable_results[subgroup]["hosts"].append(current_host) parent_group = subgroup def verify_file(self, path): - valid = False if super().verify_file(path): - if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')): + if path.endswith(("virtualbox.yaml", "virtualbox.yml", "vbox.yaml", "vbox.yml")): valid = True return valid def parse(self, inventory, loader, path, cache=True): - try: self._vbox_path = get_bin_path(self.VBOX) except ValueError as e: @@ -325,7 +325,7 @@ def parse(self, inventory, loader, path, cache=True): source_data = None if cache: - cache = self.get_option('cache') + cache = self.get_option("cache") update_cache = False if cache: @@ -335,18 +335,20 @@ def parse(self, inventory, loader, path, cache=True): update_cache = True if not source_data: - b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru') - running = self.get_option('running_only') + b_pwfile = to_bytes( + self.get_option("settings_password_file"), errors="surrogate_or_strict", nonstring="passthru" + ) + running = self.get_option("running_only") # start getting data - cmd = [self._vbox_path, b'list', b'-l'] + cmd = [self._vbox_path, b"list", b"-l"] if running: - cmd.append(b'runningvms') + cmd.append(b"runningvms") else: - cmd.append(b'vms') + cmd.append(b"vms") if b_pwfile and os.path.exists(b_pwfile): - cmd.append(b'--settingspwfile') + cmd.append(b"--settingspwfile") cmd.append(b_pwfile) try: diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 810c5c22349..c58c25fa095 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -110,32 +110,31 @@ import websocket from websocket import create_connection - if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'): + if LooseVersion(websocket.__version__) <= LooseVersion("1.0.0"): raise ImportError except ImportError as e: HAS_WEBSOCKET = False -HALTED = 'Halted' -PAUSED = 'Paused' -RUNNING = 'Running' -SUSPENDED = 'Suspended' +HALTED = "Halted" +PAUSED = "Paused" +RUNNING = "Running" +SUSPENDED = "Suspended" POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED] -HOST_GROUP = 'xo_hosts' -POOL_GROUP = 'xo_pools' +HOST_GROUP = "xo_hosts" +POOL_GROUP = "xo_pools" def clean_group_name(label): - return label.lower().replace(' ', '-').replace('-', '_') + return label.lower().replace(" ", "-").replace("-", "_") class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using XenOrchestra as source. ''' + """Host inventory parser for ansible using XenOrchestra as source.""" - NAME = 'community.general.xen_orchestra' + NAME = "community.general.xen_orchestra" def __init__(self): - super().__init__() # from config @@ -150,13 +149,12 @@ def pointer(self): return self.counter def create_connection(self, xoa_api_host): - validate_certs = self.get_option('validate_certs') - use_ssl = self.get_option('use_ssl') - proto = 'wss' if use_ssl else 'ws' + validate_certs = self.get_option("validate_certs") + use_ssl = self.get_option("use_ssl") + proto = "wss" if use_ssl else "ws" - sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE} - self.conn = create_connection( - f'{proto}://{xoa_api_host}/api/', sslopt=sslopt) + sslopt = None if validate_certs else {"cert_reqs": ssl.CERT_NONE} + self.conn = create_connection(f"{proto}://{xoa_api_host}/api/", sslopt=sslopt) CALL_TIMEOUT = 100 """Number of 1/10ths of a second to wait before method call times out.""" @@ -164,74 +162,67 @@ def create_connection(self, xoa_api_host): def call(self, method, params): """Calls a method on the XO server with the provided parameters.""" id = self.pointer - self.conn.send(json.dumps({ - 'id': id, - 'jsonrpc': '2.0', - 'method': method, - 'params': params - })) + self.conn.send(json.dumps({"id": id, "jsonrpc": "2.0", "method": method, "params": params})) waited = 0 while waited < self.CALL_TIMEOUT: response = json.loads(self.conn.recv()) - if 'id' in response and response['id'] == id: + if "id" in response and response["id"] == id: return response else: sleep(0.1) waited += 1 - raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.') + raise AnsibleError(f"Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.") def login(self, user, password): - result = self.call('session.signIn', { - 'username': user, 'password': password - }) + result = self.call("session.signIn", {"username": user, "password": password}) - if 'error' in result: + if "error" in result: raise AnsibleError(f"Could not connect: {result['error']}") def get_object(self, name): - answer = self.call('xo.getAllObjects', {'filter': {'type': name}}) + answer = self.call("xo.getAllObjects", {"filter": {"type": name}}) - if 'error' in answer: + if "error" in answer: raise AnsibleError(f"Could not request: {answer['error']}") - return answer['result'] + return answer["result"] def _get_objects(self): self.create_connection(self.xoa_api_host) self.login(self.xoa_user, self.xoa_password) return { - 'vms': self.get_object('VM'), - 'pools': self.get_object('pool'), - 'hosts': self.get_object('host'), + "vms": self.get_object("VM"), + "pools": self.get_object("pool"), + "hosts": self.get_object("host"), } def _apply_constructable(self, name, variables): - strict = self.get_option('strict') - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) - self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + strict = self.get_option("strict") + self._add_host_to_composed_groups(self.get_option("groups"), variables, name, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), variables, name, strict=strict) + self._set_composite_vars(self.get_option("compose"), variables, name, strict=strict) def _add_vms(self, vms, hosts, pools): vm_name_list = [] for uuid, vm in vms.items(): - if self.vm_entry_name_type == 'name_label': - if vm['name_label'] not in vm_name_list: - entry_name = vm['name_label'] - vm_name_list.append(vm['name_label']) + if self.vm_entry_name_type == "name_label": + if vm["name_label"] not in vm_name_list: + entry_name = vm["name_label"] + vm_name_list.append(vm["name_label"]) else: - vm_duplicate_count = vm_name_list.count(vm['name_label']) + vm_duplicate_count = vm_name_list.count(vm["name_label"]) entry_name = f"{vm['name_label']}_{vm_duplicate_count}" - vm_name_list.append(vm['name_label']) + vm_name_list.append(vm["name_label"]) else: entry_name = uuid - group = 'with_ip' - ip = vm.get('mainIpAddress') - power_state = vm['power_state'].lower() - pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId']) - host_name = self._host_group_name_for_uuid(hosts, vm['$container']) + group = "with_ip" + ip = vm.get("mainIpAddress") + power_state = vm["power_state"].lower() + pool_name = self._pool_group_name_for_uuid(pools, vm["$poolId"]) + host_name = self._host_group_name_for_uuid(hosts, vm["$container"]) self.inventory.add_host(entry_name) @@ -248,67 +239,58 @@ def _add_vms(self, vms, hosts, pools): # Grouping VMs with an IP together if ip is None: - group = 'without_ip' + group = "without_ip" self.inventory.add_group(group) self.inventory.add_child(group, entry_name) # Adding meta - self.inventory.set_variable(entry_name, 'uuid', uuid) - self.inventory.set_variable(entry_name, 'ip', ip) - self.inventory.set_variable(entry_name, 'ansible_host', ip) - self.inventory.set_variable(entry_name, 'power_state', power_state) - self.inventory.set_variable( - entry_name, 'name_label', vm['name_label']) - self.inventory.set_variable(entry_name, 'type', vm['type']) - self.inventory.set_variable( - entry_name, 'cpus', vm['CPUs']['number']) - self.inventory.set_variable(entry_name, 'tags', vm['tags']) - self.inventory.set_variable( - entry_name, 'memory', vm['memory']['size']) - self.inventory.set_variable( - entry_name, 'has_ip', group == 'with_ip') - self.inventory.set_variable( - entry_name, 'is_managed', vm.get('managementAgentDetected', False)) - self.inventory.set_variable( - entry_name, 'os_version', vm['os_version']) + self.inventory.set_variable(entry_name, "uuid", uuid) + self.inventory.set_variable(entry_name, "ip", ip) + self.inventory.set_variable(entry_name, "ansible_host", ip) + self.inventory.set_variable(entry_name, "power_state", power_state) + self.inventory.set_variable(entry_name, "name_label", vm["name_label"]) + self.inventory.set_variable(entry_name, "type", vm["type"]) + self.inventory.set_variable(entry_name, "cpus", vm["CPUs"]["number"]) + self.inventory.set_variable(entry_name, "tags", vm["tags"]) + self.inventory.set_variable(entry_name, "memory", vm["memory"]["size"]) + self.inventory.set_variable(entry_name, "has_ip", group == "with_ip") + self.inventory.set_variable(entry_name, "is_managed", vm.get("managementAgentDetected", False)) + self.inventory.set_variable(entry_name, "os_version", vm["os_version"]) self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars()) def _add_hosts(self, hosts, pools): host_name_list = [] for host in hosts.values(): - if self.host_entry_name_type == 'name_label': - if host['name_label'] not in host_name_list: - entry_name = host['name_label'] - host_name_list.append(host['name_label']) + if self.host_entry_name_type == "name_label": + if host["name_label"] not in host_name_list: + entry_name = host["name_label"] + host_name_list.append(host["name_label"]) else: - host_duplicate_count = host_name_list.count(host['name_label']) + host_duplicate_count = host_name_list.count(host["name_label"]) entry_name = f"{host['name_label']}_{host_duplicate_count}" - host_name_list.append(host['name_label']) + host_name_list.append(host["name_label"]) else: - entry_name = host['uuid'] + entry_name = host["uuid"] group_name = f"xo_host_{clean_group_name(host['name_label'])}" - pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) + pool_name = self._pool_group_name_for_uuid(pools, host["$poolId"]) self.inventory.add_group(group_name) self.inventory.add_host(entry_name) self.inventory.add_child(HOST_GROUP, entry_name) self.inventory.add_child(pool_name, entry_name) - self.inventory.set_variable(entry_name, 'enabled', host['enabled']) - self.inventory.set_variable( - entry_name, 'hostname', host['hostname']) - self.inventory.set_variable(entry_name, 'memory', host['memory']) - self.inventory.set_variable(entry_name, 'address', host['address']) - self.inventory.set_variable(entry_name, 'cpus', host['cpus']) - self.inventory.set_variable(entry_name, 'type', 'host') - self.inventory.set_variable(entry_name, 'tags', host['tags']) - self.inventory.set_variable(entry_name, 'version', host['version']) - self.inventory.set_variable( - entry_name, 'power_state', host['power_state'].lower()) - self.inventory.set_variable( - entry_name, 'product_brand', host['productBrand']) + self.inventory.set_variable(entry_name, "enabled", host["enabled"]) + self.inventory.set_variable(entry_name, "hostname", host["hostname"]) + self.inventory.set_variable(entry_name, "memory", host["memory"]) + self.inventory.set_variable(entry_name, "address", host["address"]) + self.inventory.set_variable(entry_name, "cpus", host["cpus"]) + self.inventory.set_variable(entry_name, "type", "host") + self.inventory.set_variable(entry_name, "tags", host["tags"]) + self.inventory.set_variable(entry_name, "version", host["version"]) + self.inventory.set_variable(entry_name, "power_state", host["power_state"].lower()) + self.inventory.set_variable(entry_name, "product_brand", host["productBrand"]) for pool in pools.values(): group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" @@ -340,25 +322,27 @@ def _populate(self, objects): for group in POWER_STATES: self.inventory.add_group(group.lower()) - self._add_pools(objects['pools']) - self._add_hosts(objects['hosts'], objects['pools']) - self._add_vms(objects['vms'], objects['hosts'], objects['pools']) + self._add_pools(objects["pools"]) + self._add_hosts(objects["hosts"], objects["pools"]) + self._add_vms(objects["vms"], objects["hosts"], objects["pools"]) def verify_file(self, path): - valid = False if super().verify_file(path): - if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')): + if path.endswith(("xen_orchestra.yaml", "xen_orchestra.yml")): valid = True else: self.display.vvv( - 'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"') + 'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"' + ) return valid def parse(self, inventory, loader, path, cache=True): if not HAS_WEBSOCKET: - raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: ' - 'https://github.com/websocket-client/websocket-client.') + raise AnsibleError( + "This plugin requires websocket-client 1.0.0 or higher: " + "https://github.com/websocket-client/websocket-client." + ) super().parse(inventory, loader, path) @@ -366,24 +350,24 @@ def parse(self, inventory, loader, path, cache=True): self._read_config_data(path) self.inventory = inventory - self.protocol = 'wss' - self.xoa_api_host = self.get_option('api_host') - self.xoa_user = self.get_option('user') - self.xoa_password = self.get_option('password') + self.protocol = "wss" + self.xoa_api_host = self.get_option("api_host") + self.xoa_user = self.get_option("user") + self.xoa_password = self.get_option("password") self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') + self.use_cache = cache and self.get_option("cache") - self.validate_certs = self.get_option('validate_certs') - if not self.get_option('use_ssl'): - self.protocol = 'ws' + self.validate_certs = self.get_option("validate_certs") + if not self.get_option("use_ssl"): + self.protocol = "ws" - self.vm_entry_name_type = 'uuid' - if not self.get_option('use_vm_uuid'): - self.vm_entry_name_type = 'name_label' + self.vm_entry_name_type = "uuid" + if not self.get_option("use_vm_uuid"): + self.vm_entry_name_type = "name_label" - self.host_entry_name_type = 'uuid' - if not self.get_option('use_host_uuid'): - self.host_entry_name_type = 'name_label' + self.host_entry_name_type = "uuid" + if not self.get_option("use_host_uuid"): + self.host_entry_name_type = "name_label" objects = self._get_objects() self._populate(make_unsafe(objects)) diff --git a/plugins/lookup/binary_file.py b/plugins/lookup/binary_file.py index 3236ade3e40..e13653f6a0a 100644 --- a/plugins/lookup/binary_file.py +++ b/plugins/lookup/binary_file.py @@ -84,7 +84,6 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) not_exist = self.get_option("not_exist") diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 24608ec10bc..388e8428f37 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -131,8 +131,7 @@ class BitwardenException(AnsibleError): class Bitwarden: - - def __init__(self, path='bw'): + def __init__(self, path="bw"): self._cli_path = path self._session = None @@ -150,54 +149,56 @@ def session(self, value): @property def unlocked(self): - out, err = self._run(['status'], stdin="") + out, err = self._run(["status"], stdin="") decoded = AnsibleJSONDecoder().raw_decode(out)[0] - return decoded['status'] == 'unlocked' + return decoded["status"] == "unlocked" def _run(self, args, stdin=None, expected_rc=0): if self.session: - args += ['--session', self.session] + args += ["--session", self.session] p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) out, err = p.communicate(to_bytes(stdin)) rc = p.wait() if rc != expected_rc: - if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err: - return 'null', '' + if len(args) > 2 and args[0] == "get" and args[1] == "item" and b"Not found." in err: + return "null", "" raise BitwardenException(err) - return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') + return to_text(out, errors="surrogate_or_strict"), to_text(err, errors="surrogate_or_strict") def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None): - """Return matching records whose search_field is equal to key. - """ + """Return matching records whose search_field is equal to key.""" # Prepare set of params for Bitwarden CLI - if search_field == 'id': - params = ['get', 'item', search_value] + if search_field == "id": + params = ["get", "item", search_value] else: - params = ['list', 'items'] + params = ["list", "items"] if search_value: - params.extend(['--search', search_value]) + params.extend(["--search", search_value]) if collection_id: - params.extend(['--collectionid', collection_id]) + params.extend(["--collectionid", collection_id]) if organization_id: - params.extend(['--organizationid', organization_id]) + params.extend(["--organizationid", organization_id]) out, err = self._run(params) # This includes things that matched in different fields. initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] - if search_field == 'id': + if search_field == "id": if initial_matches is None: initial_matches = [] else: initial_matches = [initial_matches] # Filter to only include results from the right field, if a search is requested by value or field - return [item for item in initial_matches - if not search_value or not search_field or item.get(search_field) == search_value] + return [ + item + for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value + ] def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): """Return a list of the specified field for records whose search_field match search_value @@ -211,17 +212,17 @@ def get_field(self, field, search_value, search_field="name", collection_id=None field_matches = [] for match in matches: # if there are no custom fields, then `match` has no key 'fields' - if 'fields' in match: + if "fields" in match: custom_field_found = False - for custom_field in match['fields']: - if field == custom_field['name']: - field_matches.append(custom_field['value']) + for custom_field in match["fields"]: + if field == custom_field["name"]: + field_matches.append(custom_field["value"]) custom_field_found = True break if custom_field_found: continue - if 'login' in match and field in match['login']: - field_matches.append(match['login'][field]) + if "login" in match and field in match["login"]: + field_matches.append(match["login"][field]) continue if field in match: field_matches.append(match[field]) @@ -236,10 +237,10 @@ def get_collection_ids(self, collection_name: str, organization_id=None) -> list """Return matching IDs of collections whose name is equal to collection_name.""" # Prepare set of params for Bitwarden CLI - params = ['list', 'collections', '--search', collection_name] + params = ["list", "collections", "--search", collection_name] if organization_id: - params.extend(['--organizationid', organization_id]) + params.extend(["--organizationid", organization_id]) out, err = self._run(params) @@ -247,21 +248,19 @@ def get_collection_ids(self, collection_name: str, organization_id=None) -> list initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] # type: ignore[operator] # Filter to only return the ID of a collections with exactly matching name - return [item['id'] for item in initial_matches - if str(item.get('name')).lower() == collection_name.lower()] + return [item["id"] for item in initial_matches if str(item.get("name")).lower() == collection_name.lower()] class LookupModule(LookupBase): - def run(self, terms=None, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) - field = self.get_option('field') - search_field = self.get_option('search') - collection_id = self.get_option('collection_id') - collection_name = self.get_option('collection_name') - organization_id = self.get_option('organization_id') - result_count = self.get_option('result_count') - _bitwarden.session = self.get_option('bw_session') + field = self.get_option("field") + search_field = self.get_option("search") + collection_id = self.get_option("collection_id") + collection_name = self.get_option("collection_name") + organization_id = self.get_option("organization_id") + result_count = self.get_option("result_count") + _bitwarden.session = self.get_option("bw_session") if not _bitwarden.unlocked: raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") @@ -287,7 +286,8 @@ def run(self, terms=None, variables=None, **kwargs): for result in results: if result_count is not None and len(result) != result_count: raise BitwardenException( - f"Number of results doesn't match result_count! ({len(result)} != {result_count})") + f"Number of results doesn't match result_count! ({len(result)} != {result_count})" + ) return results diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py index 173467047af..b8077c8b38f 100644 --- a/plugins/lookup/bitwarden_secrets_manager.py +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -83,7 +83,7 @@ class BitwardenSecretsManagerException(AnsibleLookupError): class BitwardenSecretsManager: - def __init__(self, path='bws'): + def __init__(self, path="bws"): self._cli_path = path self._max_retries = 3 self._retry_delay = 1 @@ -100,7 +100,7 @@ def _run_with_retry(self, args, stdin=None, retries=0): raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.") if "Too many requests" in err: - delay = self._retry_delay * (2 ** retries) + delay = self._retry_delay * (2**retries) sleep(delay) return self._run_with_retry(args, stdin, retries + 1) else: @@ -112,36 +112,31 @@ def _run(self, args, stdin=None): p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) out, err = p.communicate(stdin) rc = p.wait() - return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + return to_text(out, errors="surrogate_or_strict"), to_text(err, errors="surrogate_or_strict"), rc def get_bws_version(self): - """Get the version of the Bitwarden Secrets Manager CLI. - """ - out, err, rc = self._run(['--version']) + """Get the version of the Bitwarden Secrets Manager CLI.""" + out, err, rc = self._run(["--version"]) if rc != 0: raise BitwardenSecretsManagerException(to_text(err)) # strip the prefix and grab the last segment, the version number return out.split()[-1] def get_secret(self, secret_id, bws_access_token): - """Get and return the secret with the given secret_id. - """ + """Get and return the secret with the given secret_id.""" # Prepare set of params for Bitwarden Secrets Manager CLI # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. - params = [ - '--color', 'no', - '--access-token', bws_access_token - ] + params = ["--color", "no", "--access-token", bws_access_token] # bws version 0.3.0 introduced a breaking change in the command line syntax: # pre-0.3.0: verb noun # 0.3.0 and later: noun verb bws_version = self.get_bws_version() - if LooseVersion(bws_version) < LooseVersion('0.3.0'): - params.extend(['get', 'secret', secret_id]) + if LooseVersion(bws_version) < LooseVersion("0.3.0"): + params.extend(["get", "secret", secret_id]) else: - params.extend(['secret', 'get', secret_id]) + params.extend(["secret", "get", secret_id]) out, err, rc = self._run_with_retry(params) if rc != 0: @@ -153,7 +148,7 @@ def get_secret(self, secret_id, bws_access_token): class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) - bws_access_token = self.get_option('bws_access_token') + bws_access_token = self.get_option("bws_access_token") return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms] diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index 9382ee08098..2d448d8ea7c 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -47,6 +47,7 @@ try: import chef + HAS_CHEF = True except ImportError as missing_module: HAS_CHEF = False @@ -56,8 +57,8 @@ class LookupModule(LookupBase): """ Chef data bag lookup module """ - def __init__(self, loader=None, templar=None, **kwargs): + def __init__(self, loader=None, templar=None, **kwargs): super().__init__(loader, templar, **kwargs) # setup vars for data bag name and data bag item @@ -77,18 +78,14 @@ def parse_kv_args(self, args): parsed = str(arg_raw) setattr(self, arg, parsed) except ValueError: - raise AnsibleError( - f"can't parse arg {arg}={arg_raw} as string" - ) + raise AnsibleError(f"can't parse arg {arg}={arg_raw} as string") if args: - raise AnsibleError( - f"unrecognized arguments to with_sequence: {list(args.keys())!r}" - ) + raise AnsibleError(f"unrecognized arguments to with_sequence: {list(args.keys())!r}") def run(self, terms, variables=None, **kwargs): # Ensure pychef has been loaded if not HAS_CHEF: - raise AnsibleError('PyChef needed for lookup plugin, try `pip install pychef`') + raise AnsibleError("PyChef needed for lookup plugin, try `pip install pychef`") for term in terms: self.parse_kv_args(parse_kv(term)) @@ -96,7 +93,7 @@ def run(self, terms, variables=None, **kwargs): api_object = chef.autoconfigure() if not isinstance(api_object, chef.api.ChefAPI): - raise AnsibleError('Unable to connect to Chef Server API.') + raise AnsibleError("Unable to connect to Chef Server API.") data_bag_object = chef.DataBag(self.name) diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 7a9eaf10bd2..17745973fc3 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -66,35 +66,35 @@ from ansible.plugins.lookup import LookupBase -FQCN_RE = re.compile(r'^[A-Za-z0-9_]+\.[A-Za-z0-9_]+$') +FQCN_RE = re.compile(r"^[A-Za-z0-9_]+\.[A-Za-z0-9_]+$") def load_collection_meta_manifest(manifest_path): - with open(manifest_path, 'rb') as f: + with open(manifest_path, "rb") as f: meta = json.load(f) return { - 'version': meta['collection_info']['version'], + "version": meta["collection_info"]["version"], } -def load_collection_meta_galaxy(galaxy_path, no_version='*'): - with open(galaxy_path, 'rb') as f: +def load_collection_meta_galaxy(galaxy_path, no_version="*"): + with open(galaxy_path, "rb") as f: meta = yaml.safe_load(f) return { - 'version': meta.get('version') or no_version, + "version": meta.get("version") or no_version, } -def load_collection_meta(collection_pkg, no_version='*'): +def load_collection_meta(collection_pkg, no_version="*"): path = os.path.dirname(collection_pkg.__file__) # Try to load MANIFEST.json - manifest_path = os.path.join(path, 'MANIFEST.json') + manifest_path = os.path.join(path, "MANIFEST.json") if os.path.exists(manifest_path): return load_collection_meta_manifest(manifest_path) # Try to load galaxy.yml - galaxy_path = os.path.join(path, 'galaxy.yml') + galaxy_path = os.path.join(path, "galaxy.yml") if os.path.exists(galaxy_path): return load_collection_meta_galaxy(galaxy_path, no_version=no_version) @@ -105,15 +105,15 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): result = [] self.set_options(var_options=variables, direct=kwargs) - not_found = self.get_option('result_not_found') - no_version = self.get_option('result_no_version') + not_found = self.get_option("result_not_found") + no_version = self.get_option("result_no_version") for term in terms: if not FQCN_RE.match(term): raise AnsibleLookupError(f'"{term}" is not a FQCN') try: - collection_pkg = import_module(f'ansible_collections.{term}') + collection_pkg = import_module(f"ansible_collections.{term}") except ImportError: # Collection not found result.append(not_found) @@ -122,8 +122,8 @@ def run(self, terms, variables=None, **kwargs): try: data = load_collection_meta(collection_pkg, no_version=no_version) except Exception as exc: - raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}') + raise AnsibleLookupError(f"Error while loading metadata for {term}: {exc}") - result.append(data.get('version', no_version)) + result.append(data.get("version", no_version)) return result diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index c9cc3c63995..18fca107e58 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -125,20 +125,19 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): - if not HAS_CONSUL: raise AnsibleError( - 'py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation') + "py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation" + ) # get options self.set_options(direct=kwargs) - scheme = self.get_option('scheme') - host = self.get_option('host') - port = self.get_option('port') - url = self.get_option('url') + scheme = self.get_option("scheme") + host = self.get_option("host") + port = self.get_option("port") + url = self.get_option("url") if url is not None: u = urlparse(url) if u.scheme: @@ -147,8 +146,8 @@ def run(self, terms, variables=None, **kwargs): if u.port is not None: port = u.port - validate_certs = self.get_option('validate_certs') - client_cert = self.get_option('client_cert') + validate_certs = self.get_option("validate_certs") + client_cert = self.get_option("client_cert") values = [] try: @@ -156,40 +155,41 @@ def run(self, terms, variables=None, **kwargs): params = self.parse_params(term) consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert) - results = consul_api.kv.get(params['key'], - token=params['token'], - index=params['index'], - recurse=params['recurse'], - dc=params['datacenter']) + results = consul_api.kv.get( + params["key"], + token=params["token"], + index=params["index"], + recurse=params["recurse"], + dc=params["datacenter"], + ) if results[1]: # responds with a single or list of result maps if isinstance(results[1], list): for r in results[1]: - values.append(to_text(r['Value'])) + values.append(to_text(r["Value"])) else: - values.append(to_text(results[1]['Value'])) + values.append(to_text(results[1]["Value"])) except Exception as e: - raise AnsibleError( - f"Error locating '{term}' in kv store. Error was {e}") + raise AnsibleError(f"Error locating '{term}' in kv store. Error was {e}") return values def parse_params(self, term): - params = term.split(' ') + params = term.split(" ") paramvals = { - 'key': params[0], - 'token': self.get_option('token'), - 'recurse': self.get_option('recurse'), - 'index': self.get_option('index'), - 'datacenter': self.get_option('datacenter') + "key": params[0], + "token": self.get_option("token"), + "recurse": self.get_option("recurse"), + "index": self.get_option("index"), + "datacenter": self.get_option("datacenter"), } # parameters specified? try: for param in params[1:]: if param and len(param) > 0: - name, value = param.split('=') + name, value = param.split("=") if name not in paramvals: raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter") paramvals[name] = value diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 01e6a1a8fe9..6d9490cb890 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -98,6 +98,7 @@ try: import credstash + CREDSTASH_INSTALLED = True except ImportError: CREDSTASH_INSTALLED = False @@ -106,28 +107,38 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): if not CREDSTASH_INSTALLED: - raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') + raise AnsibleError("The credstash lookup plugin requires credstash to be installed.") self.set_options(var_options=variables, direct=kwargs) - version = self.get_option('version') - region = self.get_option('region') - table = self.get_option('table') - profile_name = self.get_option('profile_name') - aws_access_key_id = self.get_option('aws_access_key_id') - aws_secret_access_key = self.get_option('aws_secret_access_key') - aws_session_token = self.get_option('aws_session_token') + version = self.get_option("version") + region = self.get_option("region") + table = self.get_option("table") + profile_name = self.get_option("profile_name") + aws_access_key_id = self.get_option("aws_access_key_id") + aws_secret_access_key = self.get_option("aws_secret_access_key") + aws_session_token = self.get_option("aws_session_token") context = { - k: v for k, v in kwargs.items() - if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') + k: v + for k, v in kwargs.items() + if k + not in ( + "version", + "region", + "table", + "profile_name", + "aws_access_key_id", + "aws_secret_access_key", + "aws_session_token", + ) } kwargs_pass = { - 'profile_name': profile_name, - 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key, - 'aws_session_token': aws_session_token, + "profile_name": profile_name, + "aws_access_key_id": aws_access_key_id, + "aws_secret_access_key": aws_secret_access_key, + "aws_session_token": aws_session_token, } ret = [] @@ -135,8 +146,8 @@ def run(self, terms, variables=None, **kwargs): try: ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass)) except credstash.ItemNotFound: - raise AnsibleError(f'Key {term} not found') + raise AnsibleError(f"Key {term} not found") except Exception as e: - raise AnsibleError(f'Encountered exception while fetching {term}: {e}') + raise AnsibleError(f"Encountered exception while fetching {term}: {e}") return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 955ba4a89ae..660abcdd1cb 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -89,13 +89,11 @@ display = Display() -CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk') +CLIPASSWORDSDK_CMD = os.getenv("AIM_CLIPASSWORDSDK_CMD", "/opt/CARKaim/sdk/clipasswordsdk") class CyberarkPassword: - def __init__(self, appid=None, query=None, output=None, **kwargs): - self.appid = appid self.query = query self.output = output @@ -104,7 +102,7 @@ def __init__(self, appid=None, query=None, output=None, **kwargs): # FailRequestOnPasswordChange, Queryformat, Reason, etc. self.extra_parms = [] for key, value in kwargs.items(): - self.extra_parms.append('-p') + self.extra_parms.append("-p") self.extra_parms.append(f"{key}={value}") if self.appid is None: @@ -123,17 +121,21 @@ def __init__(self, appid=None, query=None, output=None, **kwargs): self.b_delimiter = b"@#@" # Known delimiter to split output results def get(self): - result_dict = {} try: all_parms = [ CLIPASSWORDSDK_CMD, - 'GetPassword', - '-p', f'AppDescs.AppID={self.appid}', - '-p', f'Query={self.query}', - '-o', self.output, - '-d', self.b_delimiter] + "GetPassword", + "-p", + f"AppDescs.AppID={self.appid}", + "-p", + f"Query={self.query}", + "-o", + self.output, + "-d", + self.b_delimiter, + ] all_parms.extend(self.extra_parms) b_credential = b"" @@ -146,7 +148,7 @@ def get(self): if tmp_error: raise AnsibleError(f"ERROR => {tmp_error} ") - if b_credential and b_credential.endswith(b'\n'): + if b_credential and b_credential.endswith(b"\n"): b_credential = b_credential[:-1] output_names = self.output.split(",") @@ -164,13 +166,14 @@ def get(self): except subprocess.CalledProcessError as e: raise AnsibleError(e.output) except OSError as e: - raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} ") + raise AnsibleError( + f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} " + ) return [result_dict] class LookupModule(LookupBase): - """ USAGE: diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 89502e95188..3dafe38abd7 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -125,6 +125,7 @@ try: from ansible.template import trust_as_template as _trust_as_template + HAS_DATATAGGING = True except ImportError: HAS_DATATAGGING = False @@ -145,7 +146,7 @@ def __evaluate(self, expression, templar, variables): """ templar.available_variables = variables or {} quoted_expression = "{0}{1}{2}".format("{{", expression, "}}") - if hasattr(templar, 'evaluate_expression'): + if hasattr(templar, "evaluate_expression"): # This is available since the Data Tagging PR has been merged return templar.evaluate_expression(_make_safe(expression)) return templar.template(quoted_expression) @@ -169,12 +170,11 @@ def __process(self, result, terms, index, current, templar, variables): if expression is not None: # Evaluate expression in current context vars = variables.copy() - vars['item'] = current.copy() + vars["item"] = current.copy() try: values = self.__evaluate(expression, templar, variables=vars) except Exception as e: - raise AnsibleLookupError( - f'Caught "{e}" while evaluating {key!r} with item == {current!r}') + raise AnsibleLookupError(f'Caught "{e}" while evaluating {key!r} with item == {current!r}') if isinstance(values, Mapping): for idx, val in sorted(values.items()): @@ -186,7 +186,8 @@ def __process(self, result, terms, index, current, templar, variables): self.__process(result, terms, index + 1, current, templar, variables) else: raise AnsibleLookupError( - f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}') + f"Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}" + ) def run(self, terms, variables=None, **kwargs): """Generate list.""" @@ -202,15 +203,14 @@ def run(self, terms, variables=None, **kwargs): vars_so_far = set() for index, term in enumerate(terms): if not isinstance(term, Mapping): - raise AnsibleLookupError( - f'Parameter {index} must be a dictionary, got {type(term)}') + raise AnsibleLookupError(f"Parameter {index} must be a dictionary, got {type(term)}") if len(term) != 1: raise AnsibleLookupError( - f'Parameter {index} must be a one-element dictionary, got {len(term)} elements') + f"Parameter {index} must be a one-element dictionary, got {len(term)} elements" + ) k, v = list(term.items())[0] if k in vars_so_far: - raise AnsibleLookupError( - f'The variable {k!r} appears more than once') + raise AnsibleLookupError(f"The variable {k!r} appears more than once") vars_so_far.add(k) if isinstance(v, str): data.append((k, v, None)) @@ -218,6 +218,7 @@ def run(self, terms, variables=None, **kwargs): data.append((k, None, v)) else: raise AnsibleLookupError( - f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}') + f"Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}" + ) self.__process(result, data, 0, {}, templar, variables) return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index b36f02d7d4c..b89609931e8 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -252,8 +252,30 @@ import dns.resolver import dns.reversename import dns.rdataclass - from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, - MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT) + from dns.rdatatype import ( + A, + AAAA, + CAA, + CNAME, + DNAME, + DNSKEY, + DS, + HINFO, + LOC, + MX, + NAPTR, + NS, + NSEC3PARAM, + PTR, + RP, + SOA, + SPF, + SRV, + SSHFP, + TLSA, + TXT, + ) + HAVE_DNS = True except ImportError: HAVE_DNS = False @@ -263,35 +285,35 @@ def make_rdata_dict(rdata): - ''' While the 'dig' lookup plugin supports anything which dnspython supports - out of the box, the following supported_types list describes which - DNS query types we can convert to a dict. + """While the 'dig' lookup plugin supports anything which dnspython supports + out of the box, the following supported_types list describes which + DNS query types we can convert to a dict. - Note: adding support for RRSIG is hard work. :) - ''' + Note: adding support for RRSIG is hard work. :) + """ supported_types = { - A: ['address'], - AAAA: ['address'], - CAA: ['flags', 'tag', 'value'], - CNAME: ['target'], - DNAME: ['target'], - DNSKEY: ['flags', 'algorithm', 'protocol', 'key'], - DS: ['algorithm', 'digest_type', 'key_tag', 'digest'], - HINFO: ['cpu', 'os'], - LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'], - MX: ['preference', 'exchange'], - NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'], - NS: ['target'], - NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'], - PTR: ['target'], - RP: ['mbox', 'txt'], + A: ["address"], + AAAA: ["address"], + CAA: ["flags", "tag", "value"], + CNAME: ["target"], + DNAME: ["target"], + DNSKEY: ["flags", "algorithm", "protocol", "key"], + DS: ["algorithm", "digest_type", "key_tag", "digest"], + HINFO: ["cpu", "os"], + LOC: ["latitude", "longitude", "altitude", "size", "horizontal_precision", "vertical_precision"], + MX: ["preference", "exchange"], + NAPTR: ["order", "preference", "flags", "service", "regexp", "replacement"], + NS: ["target"], + NSEC3PARAM: ["algorithm", "flags", "iterations", "salt"], + PTR: ["target"], + RP: ["mbox", "txt"], # RRSIG: ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'], - SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], - SPF: ['strings'], - SRV: ['priority', 'weight', 'port', 'target'], - SSHFP: ['algorithm', 'fp_type', 'fingerprint'], - TLSA: ['usage', 'selector', 'mtype', 'cert'], - TXT: ['strings'], + SOA: ["mname", "rname", "serial", "refresh", "retry", "expire", "minimum"], + SPF: ["strings"], + SRV: ["priority", "weight", "port", "target"], + SSHFP: ["algorithm", "fp_type", "fingerprint"], + TLSA: ["usage", "selector", "mtype", "cert"], + TXT: ["strings"], } rd = {} @@ -304,18 +326,18 @@ def make_rdata_dict(rdata): if isinstance(val, dns.name.Name): val = dns.name.Name.to_text(val) - if rdata.rdtype == DS and f == 'digest': - val = dns.rdata._hexify(rdata.digest).replace(' ', '') - if rdata.rdtype == DNSKEY and f == 'algorithm': + if rdata.rdtype == DS and f == "digest": + val = dns.rdata._hexify(rdata.digest).replace(" ", "") + if rdata.rdtype == DNSKEY and f == "algorithm": val = int(val) - if rdata.rdtype == DNSKEY and f == 'key': - val = dns.rdata._base64ify(rdata.key).replace(' ', '') - if rdata.rdtype == NSEC3PARAM and f == 'salt': - val = dns.rdata._hexify(rdata.salt).replace(' ', '') - if rdata.rdtype == SSHFP and f == 'fingerprint': - val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '') - if rdata.rdtype == TLSA and f == 'cert': - val = dns.rdata._hexify(rdata.cert).replace(' ', '') + if rdata.rdtype == DNSKEY and f == "key": + val = dns.rdata._base64ify(rdata.key).replace(" ", "") + if rdata.rdtype == NSEC3PARAM and f == "salt": + val = dns.rdata._hexify(rdata.salt).replace(" ", "") + if rdata.rdtype == SSHFP and f == "fingerprint": + val = dns.rdata._hexify(rdata.fingerprint).replace(" ", "") + if rdata.rdtype == TLSA and f == "cert": + val = dns.rdata._hexify(rdata.cert).replace(" ", "") rd[f] = val @@ -327,11 +349,10 @@ def make_rdata_dict(rdata): # # -------------------------------------------------------------- -class LookupModule(LookupBase): +class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - - ''' + """ terms contains a string with things to `dig' for. We support the following formats: example.com # A record @@ -344,7 +365,7 @@ def run(self, terms, variables=None, **kwargs): ^^^ can be comma-sep list of names/addresses ... flat=0 # returns a dict; default is 1 == string - ''' + """ if HAVE_DNS is False: raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed") @@ -357,21 +378,21 @@ def run(self, terms, variables=None, **kwargs): domains = [] nameservers = [] - qtype = self.get_option('qtype') - flat = self.get_option('flat') - fail_on_error = self.get_option('fail_on_error') - real_empty = self.get_option('real_empty') - tcp = self.get_option('tcp') - port = self.get_option('port') + qtype = self.get_option("qtype") + flat = self.get_option("flat") + fail_on_error = self.get_option("fail_on_error") + real_empty = self.get_option("real_empty") + tcp = self.get_option("tcp") + port = self.get_option("port") try: - rdclass = dns.rdataclass.from_text(self.get_option('class')) + rdclass = dns.rdataclass.from_text(self.get_option("class")) except Exception as e: raise AnsibleError(f"dns lookup illegal CLASS: {e}") - myres.retry_servfail = self.get_option('retry_servfail') + myres.retry_servfail = self.get_option("retry_servfail") for t in terms: - if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. - nsset = t[1:].split(',') + if t.startswith("@"): # e.g. "@10.0.1.2,192.0.2.1" is ok. + nsset = t[1:].split(",") for ns in nsset: # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that @@ -386,35 +407,35 @@ def run(self, terms, variables=None, **kwargs): except Exception as e: raise AnsibleError(f"dns lookup NS: {e}") continue - if '=' in t: + if "=" in t: try: - opt, arg = t.split('=', 1) + opt, arg = t.split("=", 1) except Exception: pass - if opt == 'qtype': + if opt == "qtype": qtype = arg.upper() - elif opt == 'flat': + elif opt == "flat": flat = int(arg) - elif opt == 'class': + elif opt == "class": try: rdclass = dns.rdataclass.from_text(arg) except Exception as e: raise AnsibleError(f"dns lookup illegal CLASS: {e}") - elif opt == 'retry_servfail': + elif opt == "retry_servfail": myres.retry_servfail = boolean(arg) - elif opt == 'fail_on_error': + elif opt == "fail_on_error": fail_on_error = boolean(arg) - elif opt == 'real_empty': + elif opt == "real_empty": real_empty = boolean(arg) - elif opt == 'tcp': + elif opt == "tcp": tcp = boolean(arg) continue - if '/' in t: + if "/" in t: try: - domain, qtype = t.split('/') + domain, qtype = t.split("/") domains.append(domain) except Exception: domains.append(t) @@ -428,7 +449,7 @@ def run(self, terms, variables=None, **kwargs): if len(nameservers) > 0: myres.nameservers = nameservers - if qtype.upper() == 'PTR': + if qtype.upper() == "PTR": reversed_domains = [] for domain in domains: try: @@ -450,7 +471,7 @@ def run(self, terms, variables=None, **kwargs): answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp) for rdata in answers: s = rdata.to_text() - if qtype.upper() == 'TXT': + if qtype.upper() == "TXT": s = s[1:-1] # Strip outside quotes on TXT rdata if flat: @@ -458,10 +479,10 @@ def run(self, terms, variables=None, **kwargs): else: try: rd = make_rdata_dict(rdata) - rd['owner'] = answers.canonical_name.to_text() - rd['type'] = dns.rdatatype.to_text(rdata.rdtype) - rd['ttl'] = answers.rrset.ttl - rd['class'] = dns.rdataclass.to_text(rdata.rdclass) + rd["owner"] = answers.canonical_name.to_text() + rd["type"] = dns.rdatatype.to_text(rdata.rdtype) + rd["ttl"] = answers.rrset.ttl + rd["class"] = dns.rdataclass.to_text(rdata.rdclass) ret.append(rd) except Exception as err: @@ -473,7 +494,7 @@ def run(self, terms, variables=None, **kwargs): if fail_on_error: raise AnsibleError(f"Lookup failed: {err}") if not real_empty: - ret.append('NXDOMAIN') + ret.append("NXDOMAIN") except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err: if fail_on_error: raise AnsibleError(f"Lookup failed: {err}") diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index d83f08bb090..1ad8e07addc 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -57,6 +57,7 @@ try: import dns.resolver from dns.exception import DNSException + HAVE_DNS = True except ImportError: pass @@ -73,21 +74,20 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) if HAVE_DNS is False: raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed") - real_empty = self.get_option('real_empty') + real_empty = self.get_option("real_empty") ret = [] for term in terms: domain = term.split()[0] string = [] try: - answers = dns.resolver.query(domain, 'TXT') + answers = dns.resolver.query(domain, "TXT") for rdata in answers: s = rdata.to_text() string.append(s[1:-1]) # Strip outside quotes on TXT rdata @@ -95,18 +95,18 @@ def run(self, terms, variables=None, **kwargs): except dns.resolver.NXDOMAIN: if real_empty: continue - string = 'NXDOMAIN' + string = "NXDOMAIN" except dns.resolver.Timeout: if real_empty: continue - string = '' + string = "" except dns.resolver.NoAnswer: if real_empty: continue - string = '' + string = "" except DNSException as e: raise AnsibleError(f"dns.resolver unhandled exception {e}") - ret.append(''.join(string)) + ret.append("".join(string)) return ret diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 594dd40f4eb..6bb73e2f198 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -140,7 +140,5 @@ def run(self, terms, variables, **kwargs): display.vvv(f"DevOps Secrets Vault GET /secrets/{path}") result.append(vault.get_secret_json(path)) except SecretsVaultError as error: - raise AnsibleError( - f"DevOps Secrets Vault lookup failure: {error.message}" - ) + raise AnsibleError(f"DevOps Secrets Vault lookup failure: {error.message}") return result diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 65a9d23d2f1..87d479422a0 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -102,7 +102,7 @@ class Etcd: def __init__(self, url, version, validate_certs): self.url = url self.version = version - self.baseurl = f'{self.url}/{self.version}/keys' + self.baseurl = f"{self.url}/{self.version}/keys" self.validate_certs = validate_certs def _parse_node(self, node): @@ -113,12 +113,12 @@ def _parse_node(self, node): # the function will create a key-value at this level and # undoing the loop. path = {} - if node.get('dir', False): - for n in node.get('nodes', []): - path[n['key'].split('/')[-1]] = self._parse_node(n) + if node.get("dir", False): + for n in node.get("nodes", []): + path[n["key"].split("/")[-1]] = self._parse_node(n) else: - path = node['value'] + path = node["value"] return path @@ -135,16 +135,16 @@ def get(self, key): try: # I will not support Version 1 of etcd for folder parsing item = json.loads(data) - if self.version == 'v1': + if self.version == "v1": # When ETCD are working with just v1 - if 'value' in item: - value = item['value'] + if "value" in item: + value = item["value"] else: - if 'node' in item: + if "node" in item: # When a usual result from ETCD - value = self._parse_node(item['node']) + value = self._parse_node(item["node"]) - if 'errorCode' in item: + if "errorCode" in item: # Here return an error when an unknown entry responds value = "ENOENT" except Exception: @@ -154,14 +154,12 @@ def get(self, key): class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - self.set_options(var_options=variables, direct=kwargs) - validate_certs = self.get_option('validate_certs') - url = self.get_option('url') - version = self.get_option('version') + validate_certs = self.get_option("validate_certs") + url = self.get_option("url") + version = self.get_option("version") etcd = Etcd(url=url, version=version, validate_certs=validate_certs) diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index 0312f171274..46873af89ef 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -142,6 +142,7 @@ try: import etcd3 + HAS_ETCD = True except ImportError: HAS_ETCD = False @@ -149,14 +150,14 @@ display = Display() etcd3_cnx_opts = ( - 'host', - 'port', - 'ca_cert', - 'cert_key', - 'cert_cert', - 'timeout', - 'user', - 'password', + "host", + "port", + "ca_cert", + "cert_key", + "cert_cert", + "timeout", + "user", + "password", # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?) ) @@ -166,18 +167,16 @@ def etcd3_client(client_params): etcd = etcd3.client(**client_params) etcd.status() except Exception as exp: - raise AnsibleLookupError(f'Cannot connect to etcd cluster: {exp}') + raise AnsibleLookupError(f"Cannot connect to etcd cluster: {exp}") return etcd class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - self.set_options(var_options=variables, direct=kwargs) if not HAS_ETCD: - display.error(missing_required_lib('etcd3')) + display.error(missing_required_lib("etcd3")) return None # create the etcd3 connection parameters dict to pass to etcd3 class @@ -187,21 +186,21 @@ def run(self, terms, variables, **kwargs): # must be mangled a bit to fit in this scheme. # so here we use a regex to extract server and port match = re.compile( - r'^(https?://)?(?P(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P\d{1,5}))?/?$' - ).match(self.get_option('endpoints')) + r"^(https?://)?(?P(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P\d{1,5}))?/?$" + ).match(self.get_option("endpoints")) if match: - if match.group('host'): - client_params['host'] = match.group('host') - if match.group('port'): - client_params['port'] = match.group('port') + if match.group("host"): + client_params["host"] = match.group("host") + if match.group("port"): + client_params["port"] = match.group("port") for opt in etcd3_cnx_opts: if self.get_option(opt): client_params[opt] = self.get_option(opt) cnx_log = dict(client_params) - if 'password' in cnx_log: - cnx_log['password'] = '' + if "password" in cnx_log: + cnx_log["password"] = "" display.verbose(f"etcd3 connection parameters: {cnx_log}") # connect to etcd3 server @@ -210,18 +209,18 @@ def run(self, terms, variables, **kwargs): ret = [] # we can pass many keys to lookup for term in terms: - if self.get_option('prefix'): + if self.get_option("prefix"): try: for val, meta in etcd.get_prefix(term): if val and meta: - ret.append({'key': to_native(meta.key), 'value': to_native(val)}) + ret.append({"key": to_native(meta.key), "value": to_native(val)}) except Exception as exp: - display.warning(f'Caught except during etcd3.get_prefix: {exp}') + display.warning(f"Caught except during etcd3.get_prefix: {exp}") else: try: val, meta = etcd.get(term) if val and meta: - ret.append({'key': to_native(meta.key), 'value': to_native(val)}) + ret.append({"key": to_native(meta.key), "value": to_native(val)}) except Exception as exp: - display.warning(f'Caught except during etcd3.get: {exp}') + display.warning(f"Caught except during etcd3.get: {exp}") return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 75ef55253f4..5dda5583d4b 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -133,6 +133,7 @@ HAVE_SELINUX = False try: import selinux + HAVE_SELINUX = True except ImportError: pass @@ -157,78 +158,76 @@ def selinux_context(path): if ret[0] != -1: # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters - context = ret[1].split(':', 3) + context = ret[1].split(":", 3) return context def file_props(root, path): - ''' Returns dictionary with file properties, or return None on failure ''' + """Returns dictionary with file properties, or return None on failure""" abspath = os.path.join(root, path) try: st = os.lstat(abspath) except OSError as e: - display.warning(f'filetree: Error using stat() on path {abspath} ({e})') + display.warning(f"filetree: Error using stat() on path {abspath} ({e})") return None ret = dict(root=root, path=path) if stat.S_ISLNK(st.st_mode): - ret['state'] = 'link' - ret['src'] = os.readlink(abspath) + ret["state"] = "link" + ret["src"] = os.readlink(abspath) elif stat.S_ISDIR(st.st_mode): - ret['state'] = 'directory' + ret["state"] = "directory" elif stat.S_ISREG(st.st_mode): - ret['state'] = 'file' - ret['src'] = abspath + ret["state"] = "file" + ret["src"] = abspath else: - display.warning(f'filetree: Error file type of {abspath} is not supported') + display.warning(f"filetree: Error file type of {abspath} is not supported") return None - ret['uid'] = st.st_uid - ret['gid'] = st.st_gid + ret["uid"] = st.st_uid + ret["gid"] = st.st_gid try: - ret['owner'] = pwd.getpwuid(st.st_uid).pw_name + ret["owner"] = pwd.getpwuid(st.st_uid).pw_name except KeyError: - ret['owner'] = st.st_uid + ret["owner"] = st.st_uid try: - ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) + ret["group"] = to_text(grp.getgrgid(st.st_gid).gr_name) except KeyError: - ret['group'] = st.st_gid - ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}' - ret['size'] = st.st_size - ret['mtime'] = st.st_mtime - ret['ctime'] = st.st_ctime + ret["group"] = st.st_gid + ret["mode"] = f"0{stat.S_IMODE(st.st_mode):03o}" + ret["size"] = st.st_size + ret["mtime"] = st.st_mtime + ret["ctime"] = st.st_ctime if HAVE_SELINUX and selinux.is_selinux_enabled() == 1: context = selinux_context(abspath) - ret['seuser'] = context[0] - ret['serole'] = context[1] - ret['setype'] = context[2] - ret['selevel'] = context[3] + ret["seuser"] = context[0] + ret["serole"] = context[1] + ret["setype"] = context[2] + ret["selevel"] = context[3] return ret class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) basedir = self.get_basedir(variables) # Regular expression for exclude - exclude = self.get_option('exclude') + exclude = self.get_option("exclude") exclude_pattern = re.compile(exclude) if exclude else None ret = [] for term in terms: term_file = os.path.basename(term) - dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) + dwimmed_path = self._loader.path_dwim_relative(basedir, "files", os.path.dirname(term)) path = os.path.join(dwimmed_path, term_file) display.debug(f"Walking '{path}'") for root, dirs, files in os.walk(path, topdown=True): - # Filter files and directories using a regular expression if exclude_pattern is not None: dirs[:] = [d for d in dirs if not exclude_pattern.match(d)] @@ -238,7 +237,7 @@ def run(self, terms, variables=None, **kwargs): relpath = os.path.relpath(os.path.join(root, entry), path) # Skip if relpath was already processed (from another root) - if relpath not in [entry['path'] for entry in ret]: + if relpath not in [entry["path"] for entry in ret]: props = file_props(path, relpath) if props is not None: display.debug(f" found '{os.path.join(path, relpath)}'") diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index 0ed92afa278..801758a4503 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -40,7 +40,6 @@ class LookupModule(LookupBase): - def _check_list_of_one_list(self, term): # make sure term is not a list of one (list of one..) item # return the final non list item if so @@ -53,12 +52,11 @@ def _check_list_of_one_list(self, term): return term def _do_flatten(self, terms, variables): - ret = [] for term in terms: term = self._check_list_of_one_list(term) - if term == 'None' or term == 'null': + if term == "None" or term == "null": # ignore undefined items break diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py index dd0d9c18701..d8b7e64f5e4 100644 --- a/plugins/lookup/github_app_access_token.py +++ b/plugins/lookup/github_app_access_token.py @@ -75,18 +75,21 @@ try: import jwt + HAS_JWT = True except ImportError: HAS_JWT = False HAS_PYTHON_JWT = False # vs pyjwt -if HAS_JWT and hasattr(jwt, 'JWT'): +if HAS_JWT and hasattr(jwt, "JWT"): HAS_PYTHON_JWT = True from jwt import jwk_from_pem, JWT # type: ignore[attr-defined] + jwt_instance = JWT() try: from cryptography.hazmat.primitives import serialization + HAS_CRYPTOGRAPHY = True except ImportError: HAS_CRYPTOGRAPHY = False @@ -105,13 +108,12 @@ class PythonJWT: - @staticmethod def read_key(path, private_key=None): try: if private_key: - return jwk_from_pem(private_key.encode('utf-8')) - with open(path, 'rb') as pem_file: + return jwk_from_pem(private_key.encode("utf-8")) + with open(path, "rb") as pem_file: return jwk_from_pem(pem_file.read()) except Exception as e: raise AnsibleError(f"Error while parsing key file: {e}") @@ -120,12 +122,12 @@ def read_key(path, private_key=None): def encode_jwt(app_id, jwk, exp=600): now = int(time.time()) payload = { - 'iat': now, - 'exp': now + exp, - 'iss': app_id, + "iat": now, + "exp": now + exp, + "iss": app_id, } try: - return jwt_instance.encode(payload, jwk, alg='RS256') + return jwt_instance.encode(payload, jwk, alg="RS256") except Exception as e: raise AnsibleError(f"Error while encoding jwt: {e}") @@ -135,9 +137,9 @@ def read_key(path, private_key=None): return PythonJWT.read_key(path, private_key) try: if private_key: - key_bytes = private_key.encode('utf-8') + key_bytes = private_key.encode("utf-8") else: - with open(path, 'rb') as pem_file: + with open(path, "rb") as pem_file: key_bytes = pem_file.read() return serialization.load_pem_private_key(key_bytes, password=None) except Exception as e: @@ -149,26 +151,26 @@ def encode_jwt(app_id, private_key_obj, exp=600): return PythonJWT.encode_jwt(app_id, private_key_obj) now = int(time.time()) payload = { - 'iat': now, - 'exp': now + exp, - 'iss': app_id, + "iat": now, + "exp": now + exp, + "iss": app_id, } try: - return jwt.encode(payload, private_key_obj, algorithm='RS256') + return jwt.encode(payload, private_key_obj, algorithm="RS256") except Exception as e: raise AnsibleError(f"Error while encoding jwt: {e}") def post_request(generated_jwt, installation_id, api_base): - base = api_base.rstrip('/') + base = api_base.rstrip("/") github_url = f"{base}/app/installations/{installation_id}/access_tokens" headers = { - "Authorization": f'Bearer {generated_jwt}', + "Authorization": f"Bearer {generated_jwt}", "Accept": "application/vnd.github.v3+json", } try: - response = open_url(github_url, headers=headers, method='POST') + response = open_url(github_url, headers=headers, method="POST") except HTTPError as e: try: error_body = json.loads(e.read().decode()) @@ -182,10 +184,10 @@ def post_request(generated_jwt, installation_id, api_base): raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() try: - json_data = json.loads(response_body.decode('utf-8')) + json_data = json.loads(response_body.decode("utf-8")) except json.decoder.JSONDecodeError as e: raise AnsibleError(f"Error while dencoding JSON respone from github: {e}") - return json_data.get('token') + return json_data.get("token") def get_token(key_path, app_id, installation_id, private_key, github_url, expiry=600): @@ -197,12 +199,12 @@ def get_token(key_path, app_id, installation_id, private_key, github_url, expiry class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): if not HAS_JWT: - raise AnsibleError('Python jwt library is required. ' - 'Please install using "pip install pyjwt"') + raise AnsibleError('Python jwt library is required. Please install using "pip install pyjwt"') if not HAS_PYTHON_JWT and not HAS_CRYPTOGRAPHY: - raise AnsibleError('Python cryptography library is required. ' - 'Please install using "pip install cryptography"') + raise AnsibleError( + 'Python cryptography library is required. Please install using "pip install cryptography"' + ) self.set_options(var_options=variables, direct=kwargs) @@ -212,12 +214,12 @@ def run(self, terms, variables=None, **kwargs): raise AnsibleOptionsError("key_path and private_key are mutually exclusive") t = get_token( - self.get_option('key_path'), - self.get_option('app_id'), - self.get_option('installation_id'), - self.get_option('private_key'), - self.get_option('github_url'), - self.get_option('token_expiry'), + self.get_option("key_path"), + self.get_option("app_id"), + self.get_option("installation_id"), + self.get_option("private_key"), + self.get_option("github_url"), + self.get_option("token_expiry"), ) return [t] diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index ce7e78e8514..f6837baa1e3 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -79,7 +79,7 @@ def __init__(self, hiera_cfg, hiera_bin): def get(self, hiera_key): pargs = [self.hiera_bin] - pargs.extend(['-c', self.hiera_cfg]) + pargs.extend(["-c", self.hiera_cfg]) pargs.extend(hiera_key) @@ -92,6 +92,6 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) - hiera = Hiera(self.get_option('config_file'), self.get_option('executable')) + hiera = Hiera(self.get_option("config_file"), self.get_option("executable")) ret = [hiera.get(terms)] return ret diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index 73fca84e6fa..923b00831b1 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -52,7 +52,6 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): if not HAS_KEYRING: raise AnsibleError("Can't LOOKUP(keyring): missing required python library 'keyring'") diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index f55346ff13d..18b13bb05b3 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -51,8 +51,7 @@ class LPassException(AnsibleError): class LPass: - - def __init__(self, path='lpass'): + def __init__(self, path="lpass"): self._cli_path = path @property @@ -70,7 +69,7 @@ def _run(self, args, stdin=None, expected_rc=0): rc = p.wait() if rc != expected_rc: raise LPassException(err) - return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') + return to_text(out, errors="surrogate_or_strict"), to_text(err, errors="surrogate_or_strict") def _build_args(self, command, args=None): if args is None: @@ -80,7 +79,7 @@ def _build_args(self, command, args=None): return args def get_field(self, key, field): - if field in ['username', 'password', 'url', 'notes', 'id', 'name']: + if field in ["username", "password", "url", "notes", "id", "name"]: out, err = self._run(self._build_args("show", [f"--{field}", key])) else: out, err = self._run(self._build_args("show", [f"--field={field}", key])) @@ -88,10 +87,9 @@ def get_field(self, key, field): class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) - field = self.get_option('field') + field = self.get_option("field") lp = LPass() diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index f9b0d9482f9..236588a9657 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -70,9 +70,8 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): - ''' + """ terms contain any number of keys to be retrieved. If terms is None, all keys from the database are returned with their values, and if term ends in an asterisk, we @@ -83,13 +82,13 @@ def run(self, terms, variables=None, **kwargs): vars: - lmdb_kv_db: "jp.mdb" - ''' + """ if HAVE_LMDB is False: raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed") self.set_options(var_options=variables, direct=kwargs) - db = self.get_option('db') + db = self.get_option("db") try: env = lmdb.open(str(db), readonly=True) @@ -107,7 +106,7 @@ def run(self, terms, variables=None, **kwargs): else: for term in terms: with env.begin() as txn: - if term.endswith('*'): + if term.endswith("*"): cursor = txn.cursor() prefix = term[:-1] # strip asterisk cursor.set_range(to_text(term).encode()) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py index 5c1686b499b..7be8cbf83c5 100644 --- a/plugins/lookup/merge_variables.py +++ b/plugins/lookup/merge_variables.py @@ -136,9 +136,9 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): self.set_options(direct=kwargs) initial_value = self.get_option("initial_value", None) - self._override = self.get_option('override', 'error') - self._pattern_type = self.get_option('pattern_type', 'regex') - self._groups = self.get_option('groups', None) + self._override = self.get_option("override", "error") + self._pattern_type = self.get_option("pattern_type", "regex") + self._groups = self.get_option("groups", None) ret = [] for term in terms: @@ -159,7 +159,7 @@ def run(self, terms, variables=None, **kwargs): return ret def _is_host_in_allowed_groups(self, host_groups): - if 'all' in self._groups: + if "all" in self._groups: return True group_intersection = [host_group_name for host_group_name in host_groups if host_group_name in self._groups] @@ -191,7 +191,9 @@ def _merge_vars(self, search_pattern, initial_value, variables): result = initial_value for var_name in var_merge_names: - temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables + temp_templar = self._templar.copy_with_new_env( + available_variables=variables + ) # tmp. switch renderer to context of current variables var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates var_type = _verify_and_get_type(var_value) diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 773b1d273c2..9fc41427241 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -299,11 +299,13 @@ def assert_logged_in(self): def full_signin(self): if self.connect_host or self.connect_token: raise AnsibleLookupError( - "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later.") + "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later." + ) if self.service_account_token: raise AnsibleLookupError( - "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later.") + "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later." + ) required_params = [ "subdomain", @@ -338,7 +340,7 @@ def get_raw(self, item_id, vault=None, token=None): return self._run(args) def signin(self): - self._check_required_params(['master_password']) + self._check_required_params(["master_password"]) args = ["signin", "--raw"] if self.subdomain: @@ -351,6 +353,7 @@ class OnePassCLIv2(OnePassCLIBase): """ CLIv2 Syntax Reference: https://developer.1password.com/docs/cli/upgrade#step-2-update-your-scripts """ + supports_version = "2" def _parse_field(self, data_json, field_name, section_title=None): @@ -533,9 +536,13 @@ def full_signin(self): self._check_required_params(required_params) args = [ - "account", "add", "--raw", - "--address", f"{self.subdomain}.{self.domain}", - "--email", to_bytes(self.username), + "account", + "add", + "--raw", + "--address", + f"{self.subdomain}.{self.domain}", + "--email", + to_bytes(self.username), "--signin", ] @@ -574,7 +581,7 @@ def get_raw(self, item_id, vault=None, token=None): return self._add_parameters_and_run(args, vault=vault, token=token) def signin(self): - self._check_required_params(['master_password']) + self._check_required_params(["master_password"]) args = ["signin", "--raw"] if self.subdomain: @@ -584,8 +591,19 @@ def signin(self): class OnePass: - def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None, - service_account_token=None, account_id=None, connect_host=None, connect_token=None, cli_class=None): + def __init__( + self, + subdomain=None, + domain="1password.com", + username=None, + secret_key=None, + master_password=None, + service_account_token=None, + account_id=None, + connect_host=None, + connect_token=None, + cli_class=None, + ): self.subdomain = subdomain self.domain = domain self.username = username @@ -607,15 +625,33 @@ def __init__(self, subdomain=None, domain="1password.com", username=None, secret def _get_cli_class(self, cli_class=None): if cli_class is not None: - return cli_class(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, - self.account_id, self.connect_host, self.connect_token) + return cli_class( + self.subdomain, + self.domain, + self.username, + self.secret_key, + self.master_password, + self.service_account_token, + self.account_id, + self.connect_host, + self.connect_token, + ) version = OnePassCLIBase.get_current_version() for cls in OnePassCLIBase.__subclasses__(): if cls.supports_version == version.split(".")[0]: try: - return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, - self.account_id, self.connect_host, self.connect_token) + return cls( + self.subdomain, + self.domain, + self.username, + self.secret_key, + self.master_password, + self.service_account_token, + self.account_id, + self.connect_host, + self.connect_token, + ) except TypeError as e: raise AnsibleLookupError(e) @@ -666,7 +702,6 @@ def get_field(self, item_id, field, section=None, vault=None): class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index b75be3d630c..7639cb76b43 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -59,7 +59,6 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py index 35e3034e043..2828dd8aa8b 100644 --- a/plugins/lookup/onepassword_ssh_key.py +++ b/plugins/lookup/onepassword_ssh_key.py @@ -76,11 +76,7 @@ def get_ssh_key(self, out, item_id, ssh_format=False): raise AnsibleLookupError(f"No private key found for item {item_id}.") if ssh_format: - return ( - private_key_field.get("ssh_formats", {}) - .get("openssh", {}) - .get("value", "") - ) + return private_key_field.get("ssh_formats", {}).get("openssh", {}).get("value", "") return private_key_field.get("value", "") def run(self, terms, variables=None, **kwargs): @@ -112,7 +108,4 @@ def run(self, terms, variables=None, **kwargs): ) op.assert_logged_in() - return [ - self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) - for term in terms - ] + return [self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) for term in terms] diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index e0912ad397a..f427f4ab610 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -264,16 +264,16 @@ # http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output # note: contains special logic for calling 'pass', so not a drop-in replacement for check_output def check_output2(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - if 'stderr' in kwargs: - raise ValueError('stderr argument not allowed, it will be overridden.') - if 'input' in kwargs: - if 'stdin' in kwargs: - raise ValueError('stdin and input arguments may not both be used.') - b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict') - del kwargs['input'] - kwargs['stdin'] = subprocess.PIPE + if "stdout" in kwargs: + raise ValueError("stdout argument not allowed, it will be overridden.") + if "stderr" in kwargs: + raise ValueError("stderr argument not allowed, it will be overridden.") + if "input" in kwargs: + if "stdin" in kwargs: + raise ValueError("stdin and input arguments may not both be used.") + b_inputdata = to_bytes(kwargs["input"], errors="surrogate_or_strict") + del kwargs["input"] + kwargs["stdin"] = subprocess.PIPE else: b_inputdata = None process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) @@ -284,24 +284,20 @@ def check_output2(*popenargs, **kwargs): process.wait() raise retcode = process.poll() - if retcode == 0 and (b'encryption failed: Unusable public key' in b_out or - b'encryption failed: Unusable public key' in b_err): + if retcode == 0 and ( + b"encryption failed: Unusable public key" in b_out or b"encryption failed: Unusable public key" in b_err + ): retcode = 78 # os.EX_CONFIG if retcode != 0: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] - raise subprocess.CalledProcessError( - retcode, - cmd, - to_native(b_out + b_err, errors='surrogate_or_strict') - ) + raise subprocess.CalledProcessError(retcode, cmd, to_native(b_out + b_err, errors="surrogate_or_strict")) return b_out class LookupModule(LookupBase): def __init__(self, loader=None, templar=None, **kwargs): - super().__init__(loader, templar, **kwargs) self.realpass = None @@ -309,12 +305,11 @@ def is_real_pass(self): if self.realpass is None: try: passoutput = to_text( - check_output2([self.pass_cmd, "--version"], env=self.env), - errors='surrogate_or_strict' + check_output2([self.pass_cmd, "--version"], env=self.env), errors="surrogate_or_strict" ) - self.realpass = 'pass: the standard unix password manager' in passoutput - except (subprocess.CalledProcessError) as e: - raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + self.realpass = "pass: the standard unix password manager" in passoutput + except subprocess.CalledProcessError as e: + raise AnsibleError(f"exit code {e.returncode} while running {e.cmd}. Error output: {e.output}") return self.realpass @@ -329,96 +324,96 @@ def parse_params(self, term): # next parse the optional parameters in keyvalue pairs try: for param in params[1:]: - name, value = param.split('=', 1) + name, value = param.split("=", 1) if name not in self.paramvals: - raise AnsibleAssertionError(f'{name} not in paramvals') + raise AnsibleAssertionError(f"{name} not in paramvals") self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) # check and convert values try: - for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']: + for key in ["create", "returnall", "overwrite", "backup", "nosymbols"]: if not isinstance(self.paramvals[key], bool): self.paramvals[key] = boolean(self.paramvals[key]) except (ValueError, AssertionError) as e: raise AnsibleError(e) - if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: + if self.paramvals["missing"] not in ["error", "warn", "create", "empty"]: raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing") - if not isinstance(self.paramvals['length'], int): - if self.paramvals['length'].isdigit(): - self.paramvals['length'] = int(self.paramvals['length']) + if not isinstance(self.paramvals["length"], int): + if self.paramvals["length"].isdigit(): + self.paramvals["length"] = int(self.paramvals["length"]) else: raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length") - if self.paramvals['create']: - self.paramvals['missing'] = 'create' + if self.paramvals["create"]: + self.paramvals["missing"] = "create" # Collect pass environment variables from the plugin's parameters. self.env = os.environ.copy() - self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2 + self.env["LANGUAGE"] = "C" # make sure to get errors in English as required by check_output2 - if self.backend == 'gopass': - self.env['GOPASS_NO_REMINDER'] = "YES" - elif os.path.isdir(self.paramvals['directory']): + if self.backend == "gopass": + self.env["GOPASS_NO_REMINDER"] = "YES" + elif os.path.isdir(self.paramvals["directory"]): # Set PASSWORD_STORE_DIR - self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] + self.env["PASSWORD_STORE_DIR"] = self.paramvals["directory"] elif self.is_real_pass(): raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist") # Set PASSWORD_STORE_UMASK if umask is set - if self.paramvals.get('umask') is not None: - if len(self.paramvals['umask']) != 3: - raise AnsibleError('Passwordstore umask must have a length of 3.') - elif int(self.paramvals['umask'][0]) > 3: - raise AnsibleError('Passwordstore umask not allowed (password not user readable).') + if self.paramvals.get("umask") is not None: + if len(self.paramvals["umask"]) != 3: + raise AnsibleError("Passwordstore umask must have a length of 3.") + elif int(self.paramvals["umask"][0]) > 3: + raise AnsibleError("Passwordstore umask not allowed (password not user readable).") else: - self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask'] + self.env["PASSWORD_STORE_UMASK"] = self.paramvals["umask"] def check_pass(self): try: self.passoutput = to_text( - check_output2([self.pass_cmd, 'show'] + - [self.passname], env=self.env), - errors='surrogate_or_strict' + check_output2([self.pass_cmd, "show"] + [self.passname], env=self.env), errors="surrogate_or_strict" ).splitlines() self.password = self.passoutput[0] self.passdict = {} try: - values = yaml.safe_load('\n'.join(self.passoutput[1:])) + values = yaml.safe_load("\n".join(self.passoutput[1:])) for key, item in values.items(): self.passdict[key] = item except (yaml.YAMLError, AttributeError): for line in self.passoutput[1:]: - if ':' in line: - name, value = line.split(':', 1) + if ":" in line: + name, value = line.split(":", 1) self.passdict[name.strip()] = value.strip() - if (self.backend == 'gopass' or - os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg")) - or not self.is_real_pass()): + if ( + self.backend == "gopass" + or os.path.isfile(os.path.join(self.paramvals["directory"], f"{self.passname}.gpg")) + or not self.is_real_pass() + ): # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise) return True - except (subprocess.CalledProcessError) as e: + except subprocess.CalledProcessError as e: # 'not in password store' is the expected error if a password wasn't found - if 'not in the password store' not in e.output: - raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + if "not in the password store" not in e.output: + raise AnsibleError(f"exit code {e.returncode} while running {e.cmd}. Error output: {e.output}") - if self.paramvals['missing'] == 'error': - raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set') - elif self.paramvals['missing'] == 'warn': - display.warning(f'passwordstore: passname {self.passname} not found') + if self.paramvals["missing"] == "error": + raise AnsibleError(f"passwordstore: passname {self.passname} not found and missing=error is set") + elif self.paramvals["missing"] == "warn": + display.warning(f"passwordstore: passname {self.passname} not found") return False def get_newpass(self): - if self.paramvals['nosymbols']: + if self.paramvals["nosymbols"]: chars = C.DEFAULT_PASSWORD_CHARS[:62] else: chars = C.DEFAULT_PASSWORD_CHARS - if self.paramvals['userpass']: - newpass = self.paramvals['userpass'] + if self.paramvals["userpass"]: + newpass = self.paramvals["userpass"] else: - newpass = random_password(length=self.paramvals['length'], chars=chars) + newpass = random_password(length=self.paramvals["length"], chars=chars) return newpass def update_password(self): @@ -429,7 +424,6 @@ def update_password(self): subkey = self.paramvals["subkey"] if subkey != "password": - msg_lines = [] subkey_exists = False subkey_line = f"{subkey}: {newpass}" @@ -447,27 +441,25 @@ def update_password(self): msg_lines.insert(2, subkey_line) if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: - msg_lines.append( - f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n" - ) + msg_lines.append(f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n") msg = os.linesep.join(msg_lines) else: msg = newpass - if self.paramvals['preserve'] or self.paramvals['timestamp']: - msg += '\n' - if self.paramvals['preserve'] and self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) - msg += '\n' - if self.paramvals['timestamp'] and self.paramvals['backup']: + if self.paramvals["preserve"] or self.paramvals["timestamp"]: + msg += "\n" + if self.paramvals["preserve"] and self.passoutput[1:]: + msg += "\n".join(self.passoutput[1:]) + msg += "\n" + if self.paramvals["timestamp"] and self.paramvals["backup"]: msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n" try: - check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) - except (subprocess.CalledProcessError) as e: - raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + check_output2([self.pass_cmd, "insert", "-f", "-m", self.passname], input=msg, env=self.env) + except subprocess.CalledProcessError as e: + raise AnsibleError(f"exit code {e.returncode} while running {e.cmd}. Error output: {e.output}") return newpass def generate_password(self): @@ -482,24 +474,24 @@ def generate_password(self): else: msg = newpass - if self.paramvals['timestamp']: + if self.paramvals["timestamp"]: msg += f"\nlookup_pass: First generated by ansible on {datetime}\n" try: - check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) - except (subprocess.CalledProcessError) as e: - raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + check_output2([self.pass_cmd, "insert", "-f", "-m", self.passname], input=msg, env=self.env) + except subprocess.CalledProcessError as e: + raise AnsibleError(f"exit code {e.returncode} while running {e.cmd}. Error output: {e.output}") return newpass def get_passresult(self): - if self.paramvals['returnall']: + if self.paramvals["returnall"]: return os.linesep.join(self.passoutput) - if self.paramvals['subkey'] == 'password': + if self.paramvals["subkey"] == "password": return self.password else: - if self.paramvals['subkey'] in self.passdict: - return self.passdict[self.paramvals['subkey']] + if self.paramvals["subkey"] in self.passdict: + return self.passdict[self.paramvals["subkey"]] else: if self.paramvals["missing_subkey"] == "error": raise AnsibleError( @@ -515,10 +507,10 @@ def get_passresult(self): @contextmanager def opt_lock(self, type): - if self.get_option('lock') == type: - tmpdir = os.environ.get('TMPDIR', '/tmp') - user = os.environ.get('USER') - lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock') + if self.get_option("lock") == type: + tmpdir = os.environ.get("TMPDIR", "/tmp") + user = os.environ.get("USER") + lockfile = os.path.join(tmpdir, f".{user}.passwordstore.lock") with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield @@ -527,40 +519,40 @@ def opt_lock(self, type): yield def setup(self, variables): - self.backend = self.get_option('backend') + self.backend = self.get_option("backend") self.pass_cmd = self.backend # pass and gopass are commands as well self.locked = None - timeout = self.get_option('locktimeout') - if not re.match('^[0-9]+[smh]$', timeout): + timeout = self.get_option("locktimeout") + if not re.match("^[0-9]+[smh]$", timeout): raise AnsibleError(f"{timeout} is not a correct value for locktimeout") unit_to_seconds = {"s": 1, "m": 60, "h": 3600} self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] - directory = self.get_option('directory') + directory = self.get_option("directory") if directory is None: - if self.backend == 'gopass': + if self.backend == "gopass": try: - with open(os.path.expanduser('~/.config/gopass/config.yml')) as f: - directory = yaml.safe_load(f)['path'] + with open(os.path.expanduser("~/.config/gopass/config.yml")) as f: + directory = yaml.safe_load(f)["path"] except (FileNotFoundError, KeyError, yaml.YAMLError): - directory = os.path.expanduser('~/.local/share/gopass/stores/root') + directory = os.path.expanduser("~/.local/share/gopass/stores/root") else: - directory = os.path.expanduser('~/.password-store') + directory = os.path.expanduser("~/.password-store") self.paramvals = { - 'subkey': self.get_option('subkey'), - 'directory': directory, - 'create': self.get_option('create'), - 'returnall': self.get_option('returnall'), - 'overwrite': self.get_option('overwrite'), - 'nosymbols': self.get_option('nosymbols'), - 'userpass': self.get_option('userpass') or '', - 'length': self.get_option('length'), - 'backup': self.get_option('backup'), - 'missing': self.get_option('missing'), - 'umask': self.get_option('umask'), - 'timestamp': self.get_option('timestamp'), - 'preserve': self.get_option('preserve'), + "subkey": self.get_option("subkey"), + "directory": directory, + "create": self.get_option("create"), + "returnall": self.get_option("returnall"), + "overwrite": self.get_option("overwrite"), + "nosymbols": self.get_option("nosymbols"), + "userpass": self.get_option("userpass") or "", + "length": self.get_option("length"), + "backup": self.get_option("backup"), + "missing": self.get_option("missing"), + "umask": self.get_option("umask"), + "timestamp": self.get_option("timestamp"), + "preserve": self.get_option("preserve"), "missing_subkey": self.get_option("missing_subkey"), } @@ -570,25 +562,27 @@ def run(self, terms, variables, **kwargs): result = [] for term in terms: - self.parse_params(term) # parse the input into paramvals - with self.opt_lock('readwrite'): - if self.check_pass(): # password file exists - if self.paramvals['overwrite']: # if "overwrite", always update password - with self.opt_lock('write'): + self.parse_params(term) # parse the input into paramvals + with self.opt_lock("readwrite"): + if self.check_pass(): # password file exists + if self.paramvals["overwrite"]: # if "overwrite", always update password + with self.opt_lock("write"): result.append(self.update_password()) elif ( self.paramvals["subkey"] != "password" and not self.passdict.get(self.paramvals["subkey"]) and self.paramvals["missing"] == "create" ): # target is a subkey, this subkey is not in passdict BUT missing == create - with self.opt_lock('write'): + with self.opt_lock("write"): result.append(self.update_password()) else: result.append(self.get_passresult()) else: # password does not exist - if self.paramvals['missing'] == 'create': - with self.opt_lock('write'): - if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock + if self.paramvals["missing"] == "create": + with self.opt_lock("write"): + if ( + self.locked == "write" and self.check_pass() + ): # lookup password again if under write lock result.append(self.get_passresult()) else: result.append(self.generate_password()) diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 0ab3ee29d37..cdb06794a7f 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -78,18 +78,15 @@ class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): - if not HAS_PETNAME: - raise AnsibleError('Python petname library is required. ' - 'Please install using "pip install petname"') + raise AnsibleError('Python petname library is required. Please install using "pip install petname"') self.set_options(var_options=variables, direct=kwargs) - words = self.get_option('words') - length = self.get_option('length') - prefix = self.get_option('prefix') - separator = self.get_option('separator') + words = self.get_option("words") + length = self.get_option("length") + prefix = self.get_option("prefix") + separator = self.get_option("separator") values = petname.Generate(words=words, separator=separator, letters=length) if prefix: diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index 027a587ad87..a00dfc1997e 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -179,18 +179,12 @@ class LookupModule(LookupBase): @staticmethod def get_random(random_generator, chars, length): if not chars: - raise AnsibleLookupError( - "Available characters cannot be None, please change constraints" - ) + raise AnsibleLookupError("Available characters cannot be None, please change constraints") return "".join(random_generator.choice(chars) for dummy in range(length)) @staticmethod def b64encode(string_value, encoding="utf-8"): - return to_text( - base64.b64encode( - to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict") - ) - ) + return to_text(base64.b64encode(to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict"))) def run(self, terms, variables=None, **kwargs): number_chars = string.digits diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py index dd06e701f88..cd5558cf58e 100644 --- a/plugins/lookup/random_words.py +++ b/plugins/lookup/random_words.py @@ -92,12 +92,8 @@ class LookupModule(LookupBase): """The random_words Ansible lookup class.""" def run(self, terms, variables=None, **kwargs): - if not HAS_XKCDPASS: - raise AnsibleLookupError( - "Python xkcdpass library is required. " - 'Please install using "pip install xkcdpass"' - ) + raise AnsibleLookupError('Python xkcdpass library is required. Please install using "pip install xkcdpass"') self.set_options(var_options=variables, direct=kwargs) method = self.get_option("case") @@ -107,12 +103,8 @@ def run(self, terms, variables=None, **kwargs): numwords = self.get_option("numwords") words = xp.locate_wordfile() - wordlist = xp.generate_wordlist( - max_length=max_length, min_length=min_length, wordfile=words - ) + wordlist = xp.generate_wordlist(max_length=max_length, min_length=min_length, wordfile=words) - values = xp.generate_xkcdpassword( - wordlist, case=method, delimiter=delimiter, numwords=numwords - ) + values = xp.generate_xkcdpassword(wordlist, case=method, delimiter=delimiter, numwords=numwords) return [values] diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 0073796a226..f71f398135c 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -76,6 +76,7 @@ HAVE_REDIS = False try: import redis + HAVE_REDIS = True except ImportError: pass @@ -86,9 +87,7 @@ class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - if not HAVE_REDIS: raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed") @@ -96,9 +95,9 @@ def run(self, terms, variables, **kwargs): self.set_options(direct=kwargs) # setup connection - host = self.get_option('host') - port = self.get_option('port') - socket = self.get_option('socket') + host = self.get_option("host") + port = self.get_option("port") + socket = self.get_option("socket") if socket is None: conn = redis.Redis(host=host, port=port) else: @@ -113,5 +112,5 @@ def run(self, terms, variables, **kwargs): ret.append(to_text(res)) except Exception as e: # connection failed or key not found - raise AnsibleError(f'Encountered exception while fetching {term}: {e}') + raise AnsibleError(f"Encountered exception while fetching {term}: {e}") return ret diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py index 549bcd87bd0..8451994e06a 100644 --- a/plugins/lookup/revbitspss.py +++ b/plugins/lookup/revbitspss.py @@ -78,19 +78,18 @@ class LookupModule(LookupBase): - @staticmethod def Client(server_parameters): return SecretServer(**server_parameters) def run(self, terms, variables, **kwargs): if ANOTHER_LIBRARY_IMPORT_ERROR: - raise AnsibleError('revbits_ansible must be installed to use this plugin') from ANOTHER_LIBRARY_IMPORT_ERROR + raise AnsibleError("revbits_ansible must be installed to use this plugin") from ANOTHER_LIBRARY_IMPORT_ERROR self.set_options(var_options=variables, direct=kwargs) secret_server = LookupModule.Client( { - "base_url": self.get_option('base_url'), - "api_key": self.get_option('api_key'), + "base_url": self.get_option("base_url"), + "api_key": self.get_option("api_key"), } ) result = [] diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 54d96e91d29..5166daa704c 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -46,7 +46,6 @@ class LookupModule(LookupBase): - def read_shelve(self, shelve_filename, key): """ Read the value of "key" from a shelve file @@ -68,19 +67,19 @@ def run(self, terms, variables=None, **kwargs): try: for param in params: - name, value = param.split('=') + name, value = param.split("=") if name not in paramvals: - raise AnsibleAssertionError(f'{name} not in paramvals') + raise AnsibleAssertionError(f"{name} not in paramvals") paramvals[name] = value except (ValueError, AssertionError) as e: # In case "file" or "key" are not present raise AnsibleError(e) - key = paramvals['key'] + key = paramvals["key"] # Search also in the role/files directory and in the playbook directory - shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file']) + shelvefile = self.find_file_in_search_path(variables, "files", paramvals["file"]) if shelvefile: res = self.read_shelve(shelvefile, key) diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 1d39d1b3a88..6e9236c8aff 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -260,14 +260,26 @@ from ansible.utils.display import Display try: - from delinea.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer + from delinea.secrets.server import ( + SecretServer, + SecretServerError, + PasswordGrantAuthorizer, + DomainPasswordGrantAuthorizer, + AccessTokenAuthorizer, + ) HAS_TSS_SDK = True HAS_DELINEA_SS_SDK = True HAS_TSS_AUTHORIZER = True except ImportError: try: - from thycotic.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer + from thycotic.secrets.server import ( + SecretServer, + SecretServerError, + PasswordGrantAuthorizer, + DomainPasswordGrantAuthorizer, + AccessTokenAuthorizer, + ) HAS_TSS_SDK = True HAS_DELINEA_SS_SDK = False @@ -312,11 +324,11 @@ def get_secret(self, term, secret_path, fetch_file_attachments, file_download_pa obj = self._client.get_secret_by_path(secret_path, fetch_file_attachments) else: obj = self._client.get_secret(secret_id, fetch_file_attachments) - for i in obj['items']: + for i in obj["items"]: if file_download_path and os.path.isdir(file_download_path): - if i['isFile']: + if i["isFile"]: try: - file_content = i['itemValue'].content + file_content = i["itemValue"].content with open(os.path.join(file_download_path, f"{obj['id']}_{i['slug']}"), "wb") as f: f.write(file_content) except ValueError: @@ -324,7 +336,7 @@ def get_secret(self, term, secret_path, fetch_file_attachments, file_download_pa except AttributeError: display.warning(f"Could not read file content for {i['slug']}") finally: - i['itemValue'] = "*** Not Valid For Display ***" + i["itemValue"] = "*** Not Valid For Display ***" else: raise AnsibleOptionsError("File download path does not exist") return obj @@ -377,9 +389,7 @@ def __init__(self, **server_parameters): super().__init__() authorizer = self._get_authorizer(**server_parameters) - self._client = SecretServer( - server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] - ) + self._client = SecretServer(server_parameters["base_url"], authorizer, server_parameters["api_path_uri"]) @staticmethod def _get_authorizer(**server_parameters): diff --git a/plugins/module_utils/_filelock.py b/plugins/module_utils/_filelock.py index f5d0e276081..ce7f4b62614 100644 --- a/plugins/module_utils/_filelock.py +++ b/plugins/module_utils/_filelock.py @@ -21,20 +21,21 @@ class LockTimeout(Exception): class FileLock: - ''' + """ Currently FileLock is implemented via fcntl.flock on a lock file, however this behaviour may change in the future. Avoid mixing lock types fcntl.flock, fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause unwanted and/or unexpected behaviour - ''' + """ + def __init__(self): self.lockfd = None @contextmanager def lock_file(self, path, tmpdir, lock_timeout=None): - ''' + """ Context for lock acquisition - ''' + """ try: self.set_lock(path, tmpdir, lock_timeout) yield @@ -42,7 +43,7 @@ def lock_file(self, path, tmpdir, lock_timeout=None): self.unlock() def set_lock(self, path, tmpdir, lock_timeout=None): - ''' + """ Create a lock file based on path with flock to prevent other processes using given path. Please note that currently file locking only works when it is executed by @@ -55,14 +56,14 @@ def set_lock(self, path, tmpdir, lock_timeout=None): 0 = Do not wait, fail if lock cannot be acquired immediately, Default is None, wait indefinitely until lock is released. :returns: True - ''' - lock_path = os.path.join(tmpdir, f'ansible-{os.path.basename(path)}.lock') + """ + lock_path = os.path.join(tmpdir, f"ansible-{os.path.basename(path)}.lock") l_wait = 0.1 r_exception = IOError if sys.version_info[0] == 3: r_exception = BlockingIOError - self.lockfd = open(lock_path, 'w') + self.lockfd = open(lock_path, "w") if lock_timeout <= 0: fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) @@ -82,7 +83,7 @@ def set_lock(self, path, tmpdir, lock_timeout=None): continue self.lockfd.close() - raise LockTimeout(f'{lock_timeout} sec') + raise LockTimeout(f"{lock_timeout} sec") fcntl.flock(self.lockfd, fcntl.LOCK_EX) os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) @@ -90,12 +91,12 @@ def set_lock(self, path, tmpdir, lock_timeout=None): return True def unlock(self): - ''' + """ Make sure lock file is available for everyone and Unlock the file descriptor locked by set_lock :returns: True - ''' + """ if not self.lockfd: return True diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 33d191c845b..4d2545573fb 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -32,9 +32,9 @@ def ismount(path): return False if isinstance(path, bytes): - parent = os.path.join(path, b'..') + parent = os.path.join(path, b"..") else: - parent = os.path.join(path, '..') + parent = os.path.join(path, "..") parent = os.path.realpath(parent) try: s2 = os.lstat(parent) @@ -44,9 +44,9 @@ def ismount(path): dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: - return True # path/.. on a different device as path + return True # path/.. on a different device as path ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: - return True # path/.. is the same i-node as path + return True # path/.. is the same i-node as path return False diff --git a/plugins/module_utils/_stormssh.py b/plugins/module_utils/_stormssh.py index 8c1188d540e..4e71a770198 100644 --- a/plugins/module_utils/_stormssh.py +++ b/plugins/module_utils/_stormssh.py @@ -31,37 +31,44 @@ def parse(self, file_obj): @type file_obj: file """ order = 1 - host = {"host": ['*'], "config": {}, } + host = { + "host": ["*"], + "config": {}, + } for line in file_obj: - line = line.rstrip('\n').lstrip() - if line == '': - self._config.append({ - 'type': 'empty_line', - 'value': line, - 'host': '', - 'order': order, - }) + line = line.rstrip("\n").lstrip() + if line == "": + self._config.append( + { + "type": "empty_line", + "value": line, + "host": "", + "order": order, + } + ) order += 1 continue - if line.startswith('#'): - self._config.append({ - 'type': 'comment', - 'value': line, - 'host': '', - 'order': order, - }) + if line.startswith("#"): + self._config.append( + { + "type": "comment", + "value": line, + "host": "", + "order": order, + } + ) order += 1 continue - if '=' in line: + if "=" in line: # Ensure ProxyCommand gets properly split - if line.lower().strip().startswith('proxycommand'): + if line.lower().strip().startswith("proxycommand"): proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I) match = proxy_re.match(line) key, value = match.group(1).lower(), match.group(2) else: - key, value = line.split('=', 1) + key, value = line.split("=", 1) key = key.strip().lower() else: # find first whitespace, and split there @@ -69,26 +76,21 @@ def parse(self, file_obj): while (i < len(line)) and not line[i].isspace(): i += 1 if i == len(line): - raise Exception(f'Unparsable line: {line!r}') + raise Exception(f"Unparsable line: {line!r}") key = line[:i].lower() value = line[i:].lstrip() - if key == 'host': + if key == "host": self._config.append(host) value = value.split() - host = { - key: value, - 'config': {}, - 'type': 'entry', - 'order': order - } + host = {key: value, "config": {}, "type": "entry", "order": order} order += 1 - elif key in ['identityfile', 'localforward', 'remoteforward']: - if key in host['config']: - host['config'][key].append(value) + elif key in ["identityfile", "localforward", "remoteforward"]: + if key in host["config"]: + host["config"][key].append(value) else: - host['config'][key] = [value] - elif key not in host['config']: - host['config'].update({key: value}) + host["config"][key] = [value] + elif key not in host["config"]: + host["config"].update({key: value}) self._config.append(host) @@ -108,7 +110,7 @@ def __init__(self, ssh_config_file=None): if not os.path.exists(self.ssh_config_file): if not os.path.exists(os.path.dirname(self.ssh_config_file)): os.makedirs(os.path.dirname(self.ssh_config_file)) - open(self.ssh_config_file, 'w+').close() + open(self.ssh_config_file, "w+").close() os.chmod(self.ssh_config_file, 0o600) self.config_data = [] @@ -131,16 +133,18 @@ def load(self): continue host_item = { - 'host': entry["host"][0], - 'options': entry.get("config"), - 'type': 'entry', - 'order': entry.get("order", 0), + "host": entry["host"][0], + "options": entry.get("config"), + "type": "entry", + "order": entry.get("order", 0), } if len(entry["host"]) > 1: - host_item.update({ - 'host': " ".join(entry["host"]), - }) + host_item.update( + { + "host": " ".join(entry["host"]), + } + ) # minor bug in paramiko.SSHConfig that duplicates # "Host *" entries. if entry.get("config") and len(entry.get("config")) > 0: @@ -149,20 +153,20 @@ def load(self): return self.config_data def add_host(self, host, options): - self.config_data.append({ - 'host': host, - 'options': options, - 'order': self.get_last_index(), - }) + self.config_data.append( + { + "host": host, + "options": options, + "order": self.get_last_index(), + } + ) return self def update_host(self, host, options, use_regex=False): for index, host_entry in enumerate(self.config_data): - if host_entry.get("host") == host or \ - (use_regex and re.match(host, host_entry.get("host"))): - - if 'deleted_fields' in options: + if host_entry.get("host") == host or (use_regex and re.match(host, host_entry.get("host"))): + if "deleted_fields" in options: deleted_fields = options.pop("deleted_fields") for deleted_field in deleted_fields: del self.config_data[index]["options"][deleted_field] @@ -174,7 +178,7 @@ def update_host(self, host, options, use_regex=False): def search_host(self, search_string): results = [] for host_entry in self.config_data: - if host_entry.get("type") != 'entry': + if host_entry.get("type") != "entry": continue if host_entry.get("host") == "*": continue @@ -201,7 +205,7 @@ def delete_host(self, host): found += 1 if found == 0: - raise ValueError('No host found') + raise ValueError("No host found") return self def delete_all_hosts(self): @@ -218,7 +222,7 @@ def dump(self): self.config_data = sorted(self.config_data, key=itemgetter("order")) for host_item in self.config_data: - if host_item.get("type") in ['comment', 'empty_line']: + if host_item.get("type") in ["comment", "empty_line"]: file_content += f"{host_item.get('value')}\n" continue host_item_content = f"Host {host_item.get('host')}\n" @@ -235,7 +239,7 @@ def dump(self): return file_content def write_to_ssh_config(self): - with open(self.ssh_config_file, 'w+') as f: + with open(self.ssh_config_file, "w+") as f: data = self.dump() if data: f.write(data) diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index e752b4aa4aa..c21c2261f74 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -41,13 +41,20 @@ class AnsibleACSError(Exception): def acs_common_argument_spec(): return dict( - alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True, - fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])), - alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True, - fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])), - alicloud_security_token=dict(aliases=['security_token'], no_log=True, - fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])), - ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME'])) + alicloud_access_key=dict( + aliases=["access_key_id", "access_key"], + no_log=True, + fallback=(env_fallback, ["ALICLOUD_ACCESS_KEY", "ALICLOUD_ACCESS_KEY_ID"]), + ), + alicloud_secret_key=dict( + aliases=["secret_access_key", "secret_key"], + no_log=True, + fallback=(env_fallback, ["ALICLOUD_SECRET_KEY", "ALICLOUD_SECRET_ACCESS_KEY"]), + ), + alicloud_security_token=dict( + aliases=["security_token"], no_log=True, fallback=(env_fallback, ["ALICLOUD_SECURITY_TOKEN"]) + ), + ecs_role_name=dict(aliases=["role_name"], fallback=(env_fallback, ["ALICLOUD_ECS_ROLE_NAME"])), ) @@ -55,31 +62,38 @@ def ecs_argument_spec(): spec = acs_common_argument_spec() spec.update( dict( - alicloud_region=dict(required=True, aliases=['region', 'region_id'], - fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])), - alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']), - aliases=['assume_role_arn']), - alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']), - aliases=['assume_role_session_name']), - alicloud_assume_role_session_expiration=dict(type='int', - fallback=(env_fallback, - ['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']), - aliases=['assume_role_session_expiration']), - alicloud_assume_role=dict(type='dict', aliases=['assume_role']), - profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])), - shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE'])) + alicloud_region=dict( + required=True, + aliases=["region", "region_id"], + fallback=(env_fallback, ["ALICLOUD_REGION", "ALICLOUD_REGION_ID"]), + ), + alicloud_assume_role_arn=dict( + fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_ARN"]), aliases=["assume_role_arn"] + ), + alicloud_assume_role_session_name=dict( + fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_SESSION_NAME"]), aliases=["assume_role_session_name"] + ), + alicloud_assume_role_session_expiration=dict( + type="int", + fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"]), + aliases=["assume_role_session_expiration"], + ), + alicloud_assume_role=dict(type="dict", aliases=["assume_role"]), + profile=dict(fallback=(env_fallback, ["ALICLOUD_PROFILE"])), + shared_credentials_file=dict(fallback=(env_fallback, ["ALICLOUD_SHARED_CREDENTIALS_FILE"])), ) ) return spec def get_acs_connection_info(params): - - ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'), - acs_secret_access_key=params.get('alicloud_secret_key'), - security_token=params.get('alicloud_security_token'), - ecs_role_name=params.get('ecs_role_name'), - user_agent='Ansible-Provider-Alicloud') + ecs_params = dict( + acs_access_key_id=params.get("alicloud_access_key"), + acs_secret_access_key=params.get("alicloud_secret_key"), + security_token=params.get("alicloud_security_token"), + ecs_role_name=params.get("ecs_role_name"), + user_agent="Ansible-Provider-Alicloud", + ) return ecs_params @@ -88,76 +102,98 @@ def connect_to_acs(acs_module, region, **params): if not conn: if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]: raise AnsibleACSError( - f"Region {region} does not seem to be available for acs module {acs_module.__name__}.") + f"Region {region} does not seem to be available for acs module {acs_module.__name__}." + ) else: raise AnsibleACSError( - f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}.") + f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}." + ) return conn def get_assume_role(params): - """ Return new params """ + """Return new params""" sts_params = get_acs_connection_info(params) assume_role = {} - if params.get('assume_role'): - assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn') - assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name') - assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration') - assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy') + if params.get("assume_role"): + assume_role["alicloud_assume_role_arn"] = params["assume_role"].get("role_arn") + assume_role["alicloud_assume_role_session_name"] = params["assume_role"].get("session_name") + assume_role["alicloud_assume_role_session_expiration"] = params["assume_role"].get("session_expiration") + assume_role["alicloud_assume_role_policy"] = params["assume_role"].get("policy") assume_role_params = { - 'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'), - 'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name') - else assume_role.get('alicloud_assume_role_session_name'), - 'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration') - else assume_role.get('alicloud_assume_role_session_expiration', 3600), - 'policy': assume_role.get('alicloud_assume_role_policy', {}) + "role_arn": params.get("alicloud_assume_role_arn") + if params.get("alicloud_assume_role_arn") + else assume_role.get("alicloud_assume_role_arn"), + "role_session_name": params.get("alicloud_assume_role_session_name") + if params.get("alicloud_assume_role_session_name") + else assume_role.get("alicloud_assume_role_session_name"), + "duration_seconds": params.get("alicloud_assume_role_session_expiration") + if params.get("alicloud_assume_role_session_expiration") + else assume_role.get("alicloud_assume_role_session_expiration", 3600), + "policy": assume_role.get("alicloud_assume_role_policy", {}), } try: - sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read() - sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \ - = sts['access_key_id'], sts['access_key_secret'], sts['security_token'] + sts = ( + connect_to_acs(footmark.sts, params.get("alicloud_region"), **sts_params) + .assume_role(**assume_role_params) + .read() + ) + sts_params["acs_access_key_id"], sts_params["acs_secret_access_key"], sts_params["security_token"] = ( + sts["access_key_id"], + sts["access_key_secret"], + sts["security_token"], + ) except AnsibleACSError as e: params.fail_json(msg=str(e)) return sts_params def get_profile(params): - if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']: - path = params['shared_credentials_file'] if params['shared_credentials_file'] else f"{os.getenv('HOME')}/.aliyun/config.json" + if not params["alicloud_access_key"] and not params["ecs_role_name"] and params["profile"]: + path = ( + params["shared_credentials_file"] + if params["shared_credentials_file"] + else f"{os.getenv('HOME')}/.aliyun/config.json" + ) auth = {} - with open(path, 'r') as f: - for pro in json.load(f)['profiles']: - if params['profile'] == pro['name']: + with open(path, "r") as f: + for pro in json.load(f)["profiles"]: + if params["profile"] == pro["name"]: auth = pro if auth: - if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'): - params['alicloud_access_key'] = auth.get('access_key_id') - params['alicloud_secret_key'] = auth.get('access_key_secret') - params['alicloud_region'] = auth.get('region_id') + if auth["mode"] == "AK" and auth.get("access_key_id") and auth.get("access_key_secret"): + params["alicloud_access_key"] = auth.get("access_key_id") + params["alicloud_secret_key"] = auth.get("access_key_secret") + params["alicloud_region"] = auth.get("region_id") params = get_acs_connection_info(params) - elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'): - params['alicloud_access_key'] = auth.get('access_key_id') - params['alicloud_secret_key'] = auth.get('access_key_secret') - params['security_token'] = auth.get('sts_token') - params['alicloud_region'] = auth.get('region_id') + elif ( + auth["mode"] == "StsToken" + and auth.get("access_key_id") + and auth.get("access_key_secret") + and auth.get("sts_token") + ): + params["alicloud_access_key"] = auth.get("access_key_id") + params["alicloud_secret_key"] = auth.get("access_key_secret") + params["security_token"] = auth.get("sts_token") + params["alicloud_region"] = auth.get("region_id") params = get_acs_connection_info(params) - elif auth['mode'] == 'EcsRamRole': - params['ecs_role_name'] = auth.get('ram_role_name') - params['alicloud_region'] = auth.get('region_id') + elif auth["mode"] == "EcsRamRole": + params["ecs_role_name"] = auth.get("ram_role_name") + params["alicloud_region"] = auth.get("region_id") params = get_acs_connection_info(params) - elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'): - params['alicloud_access_key'] = auth.get('access_key_id') - params['alicloud_secret_key'] = auth.get('access_key_secret') - params['security_token'] = auth.get('sts_token') - params['ecs_role_name'] = auth.get('ram_role_name') - params['alicloud_assume_role_arn'] = auth.get('ram_role_arn') - params['alicloud_assume_role_session_name'] = auth.get('ram_session_name') - params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds') - params['alicloud_region'] = auth.get('region_id') + elif auth["mode"] == "RamRoleArn" and auth.get("ram_role_arn"): + params["alicloud_access_key"] = auth.get("access_key_id") + params["alicloud_secret_key"] = auth.get("access_key_secret") + params["security_token"] = auth.get("sts_token") + params["ecs_role_name"] = auth.get("ram_role_name") + params["alicloud_assume_role_arn"] = auth.get("ram_role_arn") + params["alicloud_assume_role_session_name"] = auth.get("ram_session_name") + params["alicloud_assume_role_session_expiration"] = auth.get("expired_seconds") + params["alicloud_region"] = auth.get("region_id") params = get_assume_role(params) - elif params.get('alicloud_assume_role_arn') or params.get('assume_role'): + elif params.get("alicloud_assume_role_arn") or params.get("assume_role"): params = get_assume_role(params) else: params = get_acs_connection_info(params) @@ -165,10 +201,10 @@ def get_profile(params): def ecs_connect(module): - """ Return an ecs connection""" + """Return an ecs connection""" ecs_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: ecs = connect_to_acs(footmark.ecs, region, **ecs_params) @@ -179,10 +215,10 @@ def ecs_connect(module): def slb_connect(module): - """ Return an slb connection""" + """Return an slb connection""" slb_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: slb = connect_to_acs(footmark.slb, region, **slb_params) @@ -193,10 +229,10 @@ def slb_connect(module): def dns_connect(module): - """ Return an dns connection""" + """Return an dns connection""" dns_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: dns = connect_to_acs(footmark.dns, region, **dns_params) @@ -207,10 +243,10 @@ def dns_connect(module): def vpc_connect(module): - """ Return an vpc connection""" + """Return an vpc connection""" vpc_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: vpc = connect_to_acs(footmark.vpc, region, **vpc_params) @@ -221,10 +257,10 @@ def vpc_connect(module): def rds_connect(module): - """ Return an rds connection""" + """Return an rds connection""" rds_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: rds = connect_to_acs(footmark.rds, region, **rds_params) @@ -235,10 +271,10 @@ def rds_connect(module): def ess_connect(module): - """ Return an ess connection""" + """Return an ess connection""" ess_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: ess = connect_to_acs(footmark.ess, region, **ess_params) @@ -249,10 +285,10 @@ def ess_connect(module): def sts_connect(module): - """ Return an sts connection""" + """Return an sts connection""" sts_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: sts = connect_to_acs(footmark.sts, region, **sts_params) @@ -263,10 +299,10 @@ def sts_connect(module): def ram_connect(module): - """ Return an ram connection""" + """Return an ram connection""" ram_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: ram = connect_to_acs(footmark.ram, region, **ram_params) @@ -277,10 +313,10 @@ def ram_connect(module): def market_connect(module): - """ Return an market connection""" + """Return an market connection""" market_params = get_profile(module.params) # If we have a region specified, connect to its endpoint. - region = module.params.get('alicloud_region') + region = module.params.get("alicloud_region") if region: try: market = connect_to_acs(footmark.market, region, **market_params) diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py index ab086cc66e7..0bdcfb7ac30 100644 --- a/plugins/module_utils/android_sdkmanager.py +++ b/plugins/module_utils/android_sdkmanager.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024, Stanislav Shamilov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -10,18 +9,10 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt -__state_map = { - "present": "--install", - "absent": "--uninstall" -} +__state_map = {"present": "--install", "absent": "--uninstall"} # sdkmanager --help 2>&1 | grep -A 2 -- --channel -__channel_map = { - "stable": 0, - "beta": 1, - "dev": 2, - "canary": 3 -} +__channel_map = {"stable": 0, "beta": 1, "dev": 2, "canary": 3} def __map_channel(channel_name): @@ -33,18 +24,18 @@ def __map_channel(channel_name): def sdkmanager_runner(module, **kwargs): return CmdRunner( module, - command='sdkmanager', + command="sdkmanager", arg_formats=dict( state=cmd_runner_fmt.as_map(__state_map), name=cmd_runner_fmt.as_list(), installed=cmd_runner_fmt.as_fixed("--list_installed"), - list=cmd_runner_fmt.as_fixed('--list'), + list=cmd_runner_fmt.as_fixed("--list"), newer=cmd_runner_fmt.as_fixed("--newer"), sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"), - channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]) + channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]), ), force_lang="C.UTF-8", # Without this, sdkmanager binary crashes - **kwargs + **kwargs, ) @@ -72,43 +63,45 @@ class SdkManagerException(Exception): class AndroidSdkManager: - _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$') - _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$') + _RE_INSTALLED_PACKAGES_HEADER = re.compile(r"^Installed packages:$") + _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r"^Available Updates:$") # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools ' - _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$') + _RE_INSTALLED_PACKAGE = re.compile(r"^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$") # Example: ' platform-tools | 27.0.0 | 35.0.2' - _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$') + _RE_UPDATABLE_PACKAGE = re.compile(r"^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$") - _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$') - _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of ' - r'the packages they depend on were not accepted') + _RE_UNKNOWN_PACKAGE = re.compile(r"^Warning: Failed to find package \'(?P\S+)\'\s*$") + _RE_ACCEPT_LICENSE = re.compile( + r"^The following packages can not be installed since their licenses or those of " + r"the packages they depend on were not accepted" + ) def __init__(self, module): self.runner = sdkmanager_runner(module) def get_installed_packages(self): - with self.runner('installed sdk_root channel') as ctx: + with self.runner("installed sdk_root channel") as ctx: rc, stdout, stderr = ctx.run() return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE) def get_updatable_packages(self): - with self.runner('list newer sdk_root channel') as ctx: + with self.runner("list newer sdk_root channel") as ctx: rc, stdout, stderr = ctx.run() return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE) def apply_packages_changes(self, packages, accept_licenses=False): - """ Install or delete packages, depending on the `module.vars.state` parameter """ + """Install or delete packages, depending on the `module.vars.state` parameter""" if len(packages) == 0: - return 0, '', '' + return 0, "", "" if accept_licenses: - license_prompt_answer = 'y' + license_prompt_answer = "y" else: - license_prompt_answer = 'N' + license_prompt_answer = "N" for package in packages: - with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx: + with self.runner("state name sdk_root channel", data=license_prompt_answer) as ctx: rc, stdout, stderr = ctx.run(name=package.name) for line in stdout.splitlines(): @@ -118,14 +111,14 @@ def apply_packages_changes(self, packages, accept_licenses=False): if rc != 0: self._try_parse_stderr(stderr) return rc, stdout, stderr - return 0, '', '' + return 0, "", "" def _try_parse_stderr(self, stderr): data = stderr.splitlines() for line in data: unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line) if unknown_package_regex: - package = unknown_package_regex.group('package') + package = unknown_package_regex.group("package") raise SdkManagerException(f"Unknown package {package}") @staticmethod @@ -142,5 +135,5 @@ def _parse_packages(stdout, header_regexp, row_regexp): else: p = row_regexp.match(line) if p: - packages.add(Package(p.group('name'))) + packages.add(Package(p.group("name"))) return packages diff --git a/plugins/module_utils/btrfs.py b/plugins/module_utils/btrfs.py index b0470eac13b..a41fbe09fd6 100644 --- a/plugins/module_utils/btrfs.py +++ b/plugins/module_utils/btrfs.py @@ -14,9 +14,9 @@ def normalize_subvolume_path(path): Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes. In addition, if the path is prefixed with a leading , this value is removed. """ - fstree_stripped = re.sub(r'^', '', path) - result = re.sub(r'/+$', '', re.sub(r'/+', '/', f"/{fstree_stripped}")) - return result if len(result) > 0 else '/' + fstree_stripped = re.sub(r"^", "", path) + result = re.sub(r"/+$", "", re.sub(r"/+", "/", f"/{fstree_stripped}")) + return result if len(result) > 0 else "/" class BtrfsModuleException(Exception): @@ -24,7 +24,6 @@ class BtrfsModuleException(Exception): class BtrfsCommands: - """ Provides access to a subset of the Btrfs command line """ @@ -40,43 +39,43 @@ def filesystem_show(self): filesystems = [] current = None for line in stdout: - if line.startswith('Label'): + if line.startswith("Label"): current = self.__parse_filesystem(line) filesystems.append(current) - elif line.startswith('devid'): - current['devices'].append(self.__parse_filesystem_device(line)) + elif line.startswith("devid"): + current["devices"].append(self.__parse_filesystem_device(line)) return filesystems def __parse_filesystem(self, line): - label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line)) - id = re.sub(r'^.*uuid:\s*', '', line) + label = re.sub(r"\s*uuid:.*$", "", re.sub(r"^Label:\s*", "", line)) + id = re.sub(r"^.*uuid:\s*", "", line) filesystem = {} - filesystem['label'] = label.strip("'") if label != 'none' else None - filesystem['uuid'] = id - filesystem['devices'] = [] - filesystem['mountpoints'] = [] - filesystem['subvolumes'] = [] - filesystem['default_subvolid'] = None + filesystem["label"] = label.strip("'") if label != "none" else None + filesystem["uuid"] = id + filesystem["devices"] = [] + filesystem["mountpoints"] = [] + filesystem["subvolumes"] = [] + filesystem["default_subvolid"] = None return filesystem def __parse_filesystem_device(self, line): - return re.sub(r'^.*path\s', '', line) + return re.sub(r"^.*path\s", "", line) def subvolumes_list(self, filesystem_path): command = f"{self.__btrfs} subvolume list -tap {filesystem_path}" result = self.__module.run_command(command, check_rc=True) - stdout = [x.split('\t') for x in result[1].splitlines()] - subvolumes = [{'id': 5, 'parent': None, 'path': '/'}] + stdout = [x.split("\t") for x in result[1].splitlines()] + subvolumes = [{"id": 5, "parent": None, "path": "/"}] if len(stdout) > 2: subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]]) return subvolumes def __parse_subvolume_list_record(self, item): return { - 'id': int(item[0]), - 'parent': int(item[2]), - 'path': normalize_subvolume_path(item[5]), + "id": int(item[0]), + "parent": int(item[2]), + "path": normalize_subvolume_path(item[5]), } def subvolume_get_default(self, filesystem_path): @@ -103,7 +102,6 @@ def subvolume_delete(self, subvolume_path): class BtrfsInfoProvider: - """ Utility providing details of the currently available btrfs filesystems """ @@ -117,15 +115,14 @@ def get_filesystems(self): filesystems = self.__btrfs_api.filesystem_show() mountpoints = self.__find_mountpoints() for filesystem in filesystems: - device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices']) - filesystem['mountpoints'] = device_mountpoints + device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem["devices"]) + filesystem["mountpoints"] = device_mountpoints if len(device_mountpoints) > 0: - # any path within the filesystem can be used to query metadata - mountpoint = device_mountpoints[0]['mountpoint'] - filesystem['subvolumes'] = self.get_subvolumes(mountpoint) - filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint) + mountpoint = device_mountpoints[0]["mountpoint"] + filesystem["subvolumes"] = self.get_subvolumes(mountpoint) + filesystem["default_subvolid"] = self.get_default_subvolume_id(mountpoint) return filesystems @@ -140,7 +137,7 @@ def get_default_subvolume_id(self, filesystem_path): return self.__btrfs_api.subvolume_get_default(filesystem_path) def __filter_mountpoints_for_devices(self, mountpoints, devices): - return [m for m in mountpoints if (m['device'] in devices)] + return [m for m in mountpoints if (m["device"] in devices)] def __find_mountpoints(self): command = f"{self.__findmnt_path} -t btrfs -nvP" @@ -154,28 +151,29 @@ def __find_mountpoints(self): return mountpoints def __parse_mountpoint_pairs(self, line): - pattern = re.compile(r'^TARGET="(?P.*)"\s+SOURCE="(?P.*)"\s+FSTYPE="(?P.*)"\s+OPTIONS="(?P.*)"\s*$') + pattern = re.compile( + r'^TARGET="(?P.*)"\s+SOURCE="(?P.*)"\s+FSTYPE="(?P.*)"\s+OPTIONS="(?P.*)"\s*$' + ) match = pattern.search(line) if match is not None: groups = match.groupdict() return { - 'mountpoint': groups['target'], - 'device': groups['source'], - 'subvolid': self.__extract_mount_subvolid(groups['options']), + "mountpoint": groups["target"], + "device": groups["source"], + "subvolid": self.__extract_mount_subvolid(groups["options"]), } else: raise BtrfsModuleException(f"Failed to parse findmnt result for line: '{line}'") def __extract_mount_subvolid(self, mount_options): - for option in mount_options.split(','): - if option.startswith('subvolid='): - return int(option[len('subvolid='):]) + for option in mount_options.split(","): + if option.startswith("subvolid="): + return int(option[len("subvolid=") :]) raise BtrfsModuleException(f"Failed to find subvolid for mountpoint in options '{mount_options}'") class BtrfsSubvolume: - """ Wrapper class providing convenience methods for inspection of a btrfs subvolume """ @@ -219,8 +217,8 @@ def get_child_relative_path(self, absolute_child_path): """ path = self.path if absolute_child_path.startswith(path): - relative = absolute_child_path[len(path):] - return re.sub(r'^/*', '', relative) + relative = absolute_child_path[len(path) :] + return re.sub(r"^/*", "", relative) else: raise BtrfsModuleException(f"Path '{absolute_child_path}' doesn't start with '{path}'") @@ -241,19 +239,18 @@ def id(self): @property def name(self): - return self.path.split('/').pop() + return self.path.split("/").pop() @property def path(self): - return self.__info['path'] + return self.__info["path"] @property def parent(self): - return self.__info['parent'] + return self.__info["parent"] class BtrfsFilesystem: - """ Wrapper class providing convenience methods for inspection of a btrfs filesystem """ @@ -262,14 +259,14 @@ def __init__(self, info, provider, module): self.__provider = provider # constant for module execution - self.__uuid = info['uuid'] - self.__label = info['label'] - self.__devices = info['devices'] + self.__uuid = info["uuid"] + self.__label = info["label"] + self.__devices = info["devices"] # refreshable - self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None - self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else []) - self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else []) + self.__default_subvolid = info["default_subvolid"] if "default_subvolid" in info else None + self.__update_mountpoints(info["mountpoints"] if "mountpoints" in info else []) + self.__update_subvolumes(info["subvolumes"] if "subvolumes" in info else []) @property def uuid(self): @@ -299,8 +296,8 @@ def refresh_mountpoints(self): def __update_mountpoints(self, mountpoints): self.__mountpoints = dict() for i in mountpoints: - subvolid = i['subvolid'] - mountpoint = i['mountpoint'] + subvolid = i["subvolid"] + mountpoint = i["mountpoint"] if subvolid not in self.__mountpoints: self.__mountpoints[subvolid] = [] self.__mountpoints[subvolid].append(mountpoint) @@ -315,7 +312,7 @@ def __update_subvolumes(self, subvolumes): # TODO strategy for retaining information on deleted subvolumes? self.__subvolumes = dict() for subvolume in subvolumes: - self.__subvolumes[subvolume['id']] = subvolume + self.__subvolumes[subvolume["id"]] = subvolume def refresh_default_subvolume(self): filesystem_path = self.get_any_mountpoint() @@ -336,8 +333,8 @@ def get_subvolume_info_for_id(self, subvolume_id): def get_subvolume_by_name(self, subvolume): for subvolume_info in self.__subvolumes.values(): - if subvolume_info['path'] == subvolume: - return BtrfsSubvolume(self, subvolume_info['id']) + if subvolume_info["path"] == subvolume: + return BtrfsSubvolume(self, subvolume_info["id"]) return None def get_any_mountpoint(self): @@ -361,9 +358,9 @@ def get_nearest_subvolume(self, subvolume): subvolumes_by_path = self.__get_subvolumes_by_path() while len(subvolume) > 1: if subvolume in subvolumes_by_path: - return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id']) + return BtrfsSubvolume(self, subvolumes_by_path[subvolume]["id"]) else: - subvolume = re.sub(r'/[^/]+$', '', subvolume) + subvolume = re.sub(r"/[^/]+$", "", subvolume) return BtrfsSubvolume(self, 5) @@ -378,12 +375,12 @@ def get_mountpath_as_child(self, subvolume_name): return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name) def get_subvolume_children(self, subvolume_id): - return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id] + return [BtrfsSubvolume(self, x["id"]) for x in self.__subvolumes.values() if x["parent"] == subvolume_id] def __get_subvolumes_by_path(self): result = {} for s in self.__subvolumes.values(): - path = s['path'] + path = s["path"] result[path] = s return result @@ -394,25 +391,26 @@ def get_summary(self): subvolumes = [] sources = self.__subvolumes.values() if self.__subvolumes is not None else [] for subvolume in sources: - id = subvolume['id'] - subvolumes.append({ - 'id': id, - 'path': subvolume['path'], - 'parent': subvolume['parent'], - 'mountpoints': self.get_mountpoints_by_subvolume_id(id), - }) + id = subvolume["id"] + subvolumes.append( + { + "id": id, + "path": subvolume["path"], + "parent": subvolume["parent"], + "mountpoints": self.get_mountpoints_by_subvolume_id(id), + } + ) return { - 'default_subvolume': self.__default_subvolid, - 'devices': self.__devices, - 'label': self.__label, - 'uuid': self.__uuid, - 'subvolumes': subvolumes, + "default_subvolume": self.__default_subvolid, + "devices": self.__devices, + "label": self.__label, + "uuid": self.__uuid, + "subvolumes": subvolumes, } class BtrfsFilesystemsProvider: - """ Provides methods to query available btrfs filesystems """ @@ -423,8 +421,8 @@ def __init__(self, module): self.__filesystems = None def get_matching_filesystem(self, criteria): - if criteria['device'] is not None: - criteria['device'] = os.path.realpath(criteria['device']) + if criteria["device"] is not None: + criteria["device"] = os.path.realpath(criteria["device"]) self.__check_init() matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)] @@ -436,9 +434,11 @@ def get_matching_filesystem(self, criteria): ) def __filesystem_matches_criteria(self, filesystem, criteria): - return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and - (criteria['label'] is None or filesystem.label == criteria['label']) and - (criteria['device'] is None or filesystem.contains_device(criteria['device']))) + return ( + (criteria["uuid"] is None or filesystem.uuid == criteria["uuid"]) + and (criteria["label"] is None or filesystem.label == criteria["label"]) + and (criteria["device"] is None or filesystem.contains_device(criteria["device"])) + ) def get_filesystem_for_device(self, device): real_device = os.path.realpath(device) @@ -456,5 +456,5 @@ def __check_init(self): if self.__filesystems is None: self.__filesystems = dict() for f in self.__provider.get_filesystems(): - uuid = f['uuid'] + uuid = f["uuid"] self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module) diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index d3569472dbb..d38cae8b26e 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -36,7 +36,7 @@ def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): - """ Customizable exponential backoff strategy. + """Customizable exponential backoff strategy. Args: retries (int): Maximum number of times to retry a request. delay (float): Initial (base) delay. @@ -54,15 +54,17 @@ def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): >>> list(backoff()) [2, 4, 8, 16, 32, 60, 60, 60, 60, 60] """ + def backoff_gen(): for retry in range(0, retries): - sleep = delay * backoff ** retry + sleep = delay * backoff**retry yield sleep if max_delay is None else min(sleep, max_delay) + return backoff_gen def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): - """ Implements the "Full Jitter" backoff strategy described here + """Implements the "Full Jitter" backoff strategy described here https://www.awsarchitectureblog.com/2015/03/backoff.html Args: retries (int): Maximum number of times to retry a request. @@ -83,23 +85,26 @@ def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): >>> list(backoff()) [2, 1, 6, 6, 31] """ + def backoff_gen(): for retry in range(0, retries): - yield _random.randint(0, min(max_delay, delay * 2 ** retry)) + yield _random.randint(0, min(max_delay, delay * 2**retry)) + return backoff_gen class CloudRetry: - """ CloudRetry can be used by any cloud provider, in order to implement a - backoff algorithm/retry effect based on Status Code from Exceptions. + """CloudRetry can be used by any cloud provider, in order to implement a + backoff algorithm/retry effect based on Status Code from Exceptions. """ + # This is the base class of the exception. # AWS Example botocore.exceptions.ClientError base_class = None @staticmethod def status_code_from_exception(error): - """ Return the status code from the exception object + """Return the status code from the exception object Args: error (object): The exception itself. """ @@ -107,7 +112,7 @@ def status_code_from_exception(error): @staticmethod def found(response_code, catch_extra_error_codes=None): - """ Return True if the Response Code to retry on was found. + """Return True if the Response Code to retry on was found. Args: response_code (str): This is the Response Code that is being matched against. """ @@ -115,13 +120,14 @@ def found(response_code, catch_extra_error_codes=None): @classmethod def _backoff(cls, backoff_strategy, catch_extra_error_codes=None): - """ Retry calling the Cloud decorated function using the provided + """Retry calling the Cloud decorated function using the provided backoff strategy. Args: backoff_strategy (callable): Callable that returns a generator. The generator should yield sleep times for each retry of the decorated function. """ + def deco(f): @wraps(f) def retry_func(*args, **kwargs): @@ -163,8 +169,10 @@ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch max_delay (int or None): maximum amount of time to wait between retries. default=60 """ - return cls._backoff(_exponential_backoff( - retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes) + return cls._backoff( + _exponential_backoff(retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), + catch_extra_error_codes, + ) @classmethod def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None): @@ -182,8 +190,9 @@ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_c max_delay (int): maximum amount of time to wait between retries. default=60 """ - return cls._backoff(_full_jitter_backoff( - retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes) + return cls._backoff( + _full_jitter_backoff(retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes + ) @classmethod def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): @@ -204,4 +213,9 @@ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): default=1.1 """ return cls.exponential_backoff( - retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes) + retries=tries - 1, + delay=delay, + backoff=backoff, + max_delay=None, + catch_extra_error_codes=catch_extra_error_codes, + ) diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 0571b7577de..be81ca1fda3 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -75,8 +75,17 @@ class CmdRunner: def _prepare_args_order(order): return tuple(order) if is_sequence(order) else tuple(order.split()) - def __init__(self, module, command, arg_formats=None, default_args_order=(), - check_rc=False, force_lang="C", path_prefix=None, environ_update=None): + def __init__( + self, + module, + command, + arg_formats=None, + default_args_order=(), + check_rc=False, + force_lang="C", + path_prefix=None, + environ_update=None, + ): self.module = module self.command = _ensure_list(command) self.default_args_order = self._prepare_args_order(default_args_order) @@ -101,7 +110,11 @@ def __init__(self, module, command, arg_formats=None, default_args_order=(), self.environ_update = environ_update _cmd = self.command[0] - self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) + self.command[0] = ( + _cmd + if (os.path.isabs(_cmd) or "/" in _cmd) + else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) + ) @property def binary(self): @@ -116,11 +129,14 @@ def __call__(self, args_order=None, output_process=None, check_mode_skip=False, for p in args_order: if p not in self.arg_formats: raise MissingArgumentFormat(p, args_order, tuple(self.arg_formats.keys())) - return _CmdRunnerContext(runner=self, - args_order=args_order, - output_process=output_process, - check_mode_skip=check_mode_skip, - check_mode_return=check_mode_return, **kwargs) + return _CmdRunnerContext( + runner=self, + args_order=args_order, + output_process=output_process, + check_mode_skip=check_mode_skip, + check_mode_return=check_mode_return, + **kwargs, + ) def has_arg_format(self, arg): return arg in self.arg_formats @@ -139,17 +155,19 @@ def __init__(self, runner, args_order, output_process, check_mode_skip, check_mo self.run_command_args = dict(kwargs) self.environ_update = runner.environ_update - self.environ_update.update(self.run_command_args.get('environ_update', {})) + self.environ_update.update(self.run_command_args.get("environ_update", {})) if runner.force_lang: - self.environ_update.update({ - 'LANGUAGE': runner.force_lang, - 'LC_ALL': runner.force_lang, - }) - self.run_command_args['environ_update'] = self.environ_update - - if 'check_rc' not in self.run_command_args: - self.run_command_args['check_rc'] = runner.check_rc - self.check_rc = self.run_command_args['check_rc'] + self.environ_update.update( + { + "LANGUAGE": runner.force_lang, + "LC_ALL": runner.force_lang, + } + ) + self.run_command_args["environ_update"] = self.environ_update + + if "check_rc" not in self.run_command_args: + self.run_command_args["check_rc"] = runner.check_rc + self.check_rc = self.run_command_args["check_rc"] self.cmd = None self.results_rc = None diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py index b0b27f13fe1..535a012947e 100644 --- a/plugins/module_utils/cmd_runner_fmt.py +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -46,7 +46,9 @@ def as_bool(args_true, args_false=None, ignore_none=None): ignore_none = False else: args_false = [] - return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) + return _ArgFormat( + lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none + ) def as_bool_not(args): @@ -73,6 +75,7 @@ def func(value): if max_len is not None and len(value) > max_len: raise ValueError(f"Parameter must have at most {max_len} element(s)") return value + return _ArgFormat(func, ignore_none=ignore_none) @@ -96,6 +99,7 @@ def unpack_args(func): @wraps(func) def wrapper(v): return func(*v) + return wrapper @@ -103,6 +107,7 @@ def unpack_kwargs(func): @wraps(func) def wrapper(v): return func(**v) + return wrapper @@ -115,7 +120,9 @@ def stacking(value): stack = [new_func(v) for v in value if v] stack = [x for args in stack for x in args] return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py index c0963499166..1d405d20ae3 100644 --- a/plugins/module_utils/consul.py +++ b/plugins/module_utils/consul.py @@ -1,4 +1,3 @@ - # Copyright (c) 2022, HÃ¥kon Lerring # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -65,12 +64,12 @@ def camel_case_key(key): def validate_check(check): - validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_duration_keys = ["Interval", "Ttl", "Timeout"] validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" - if check.get('Tcp') is not None: - match = re.match(validate_tcp_regex, check['Tcp']) + if check.get("Tcp") is not None: + match = re.match(validate_tcp_regex, check["Tcp"]) if not match: - raise Exception('tcp check must be in host:port format') + raise Exception("tcp check must be in host:port format") for duration in validate_duration_keys: if duration in check and check[duration] is not None: check[duration] = validate_duration(check[duration]) @@ -99,12 +98,7 @@ def _normalize_params(params, arg_spec): if k not in arg_spec or v is None: # Alias continue spec = arg_spec[k] - if ( - spec.get("type") == "list" - and spec.get("elements") == "dict" - and spec.get("options") - and v - ): + if spec.get("type") == "list" and spec.get("elements") == "dict" and spec.get("options") and v: v = [_normalize_params(d, spec["options"]) for d in v] elif spec.get("type") == "dict" and spec.get("options") and v: v = _normalize_params(v, spec["options"]) @@ -130,9 +124,7 @@ def __init__(self, module): self._module = module self.params = _normalize_params(module.params, module.argument_spec) self.api_params = { - k: camel_case_key(k) - for k in self.params - if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC + k: camel_case_key(k) for k in self.params if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC } self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) @@ -192,11 +184,9 @@ def helper(item): def needs_camel_case(k): spec = self._module.argument_spec[k] - return ( - spec.get("type") == "list" - and spec.get("elements") == "dict" - and spec.get("options") - ) or (spec.get("type") == "dict" and spec.get("options")) + return (spec.get("type") == "list" and spec.get("elements") == "dict" and spec.get("options")) or ( + spec.get("type") == "dict" and spec.get("options") + ) if k in self.api_params and v is not None: if isinstance(v, dict) and needs_camel_case(k): @@ -221,9 +211,7 @@ def needs_update(self, api_obj, module_obj): return False def prepare_object(self, existing, obj): - existing = { - k: v for k, v in existing.items() if k not in self.operational_attributes - } + existing = {k: v for k, v in existing.items() if k not in self.operational_attributes} for k, v in obj.items(): existing[k] = v return existing @@ -319,9 +307,7 @@ def _request(self, method, url_parts, data=None, params=None): ca_path=ca_path, ) response_data = response.read() - status = ( - response.status if hasattr(response, "status") else response.getcode() - ) + status = response.status if hasattr(response, "status") else response.getcode() except urllib_error.URLError as e: if isinstance(e, urllib_error.HTTPError): diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 3003875c093..8246be14ab3 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # Copyright (c) 2018, Dag Wieers (@dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -27,11 +26,12 @@ def initialize_dialect(dialect, **kwargs): # Add Unix dialect from Python 3 class unix_dialect(csv.Dialect): """Describe the usual properties of Unix-generated CSV files.""" - delimiter = ',' + + delimiter = "," quotechar = '"' doublequote = True skipinitialspace = False - lineterminator = '\n' + lineterminator = "\n" quoting = csv.QUOTE_ALL csv.register_dialect("unix", unix_dialect) @@ -43,19 +43,19 @@ class unix_dialect(csv.Dialect): dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: - csv.register_dialect('custom', dialect, **dialect_params) + csv.register_dialect("custom", dialect, **dialect_params) except TypeError as e: raise CustomDialectFailureError(f"Unable to create custom dialect: {e}") - dialect = 'custom' + dialect = "custom" return dialect def read_csv(data, dialect, fieldnames=None): - BOM = to_native('\ufeff') - data = to_native(data, errors='surrogate_or_strict') + BOM = to_native("\ufeff") + data = to_native(data, errors="surrogate_or_strict") if data.startswith(BOM): - data = data[len(BOM):] + data = data[len(BOM) :] fake_fh = StringIO(data) diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index bb4c0efcee3..86d8bd5231d 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -18,13 +18,13 @@ # # 1. '"' in string and '--' in string or # "'" in string and '--' in string -PATTERN_1 = re.compile(r'(\'|\").*--') +PATTERN_1 = re.compile(r"(\'|\").*--") # 2. union \ intersect \ except + select -PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE) +PATTERN_2 = re.compile(r"(UNION|INTERSECT|EXCEPT).*SELECT", re.IGNORECASE) # 3. ';' and any KEY_WORDS -PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE) +PATTERN_3 = re.compile(r";.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)", re.IGNORECASE) class SQLParseError(Exception): @@ -65,7 +65,7 @@ def _find_end_quote(identifier, quote_char): return accumulate if next_char == quote_char: try: - identifier = identifier[quote + 2:] + identifier = identifier[quote + 2 :] accumulate = accumulate + 2 except IndexError: raise UnclosedQuoteError @@ -75,7 +75,7 @@ def _find_end_quote(identifier, quote_char): def _identifier_parse(identifier, quote_char): if not identifier: - raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + raise SQLParseError("Identifier name unspecified or unquoted trailing dot") already_quoted = False if identifier.startswith(quote_char): @@ -86,20 +86,20 @@ def _identifier_parse(identifier, quote_char): already_quoted = False else: if end_quote < len(identifier) - 1: - if identifier[end_quote + 1] == '.': + if identifier[end_quote + 1] == ".": dot = end_quote + 1 first_identifier = identifier[:dot] - next_identifier = identifier[dot + 1:] + next_identifier = identifier[dot + 1 :] further_identifiers = _identifier_parse(next_identifier, quote_char) further_identifiers.insert(0, first_identifier) else: - raise SQLParseError('User escaped identifiers must escape extra quotes') + raise SQLParseError("User escaped identifiers must escape extra quotes") else: further_identifiers = [identifier] if not already_quoted: try: - dot = identifier.index('.') + dot = identifier.index(".") except ValueError: identifier = identifier.replace(quote_char, quote_char * 2) identifier = f"{quote_char}{identifier}{quote_char}" @@ -111,7 +111,7 @@ def _identifier_parse(identifier, quote_char): further_identifiers = [identifier] else: first_identifier = identifier[:dot] - next_identifier = identifier[dot + 1:] + next_identifier = identifier[dot + 1 :] further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char * 2) first_identifier = f"{quote_char}{first_identifier}{quote_char}" @@ -123,23 +123,27 @@ def _identifier_parse(identifier, quote_char): def pg_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='"') if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError(f'PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') - return '.'.join(identifier_fragments) + raise SQLParseError( + f"PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots" + ) + return ".".join(identifier_fragments) def mysql_quote_identifier(identifier, id_type): - identifier_fragments = _identifier_parse(identifier, quote_char='`') + identifier_fragments = _identifier_parse(identifier, quote_char="`") if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError(f'MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') + raise SQLParseError( + f"MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots" + ) special_cased_fragments = [] for fragment in identifier_fragments: - if fragment == '`*`': - special_cased_fragments.append('*') + if fragment == "`*`": + special_cased_fragments.append("*") else: special_cased_fragments.append(fragment) - return '.'.join(special_cased_fragments) + return ".".join(special_cased_fragments) def is_input_dangerous(string): diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py index 4964dc500b9..0a7a06b78e6 100644 --- a/plugins/module_utils/dimensiondata.py +++ b/plugins/module_utils/dimensiondata.py @@ -70,34 +70,30 @@ def __init__(self, module): self.module = module if not HAS_LIBCLOUD: - self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("libcloud"), exception=LIBCLOUD_IMP_ERR) # Credentials are common to all Dimension Data modules. credentials = self.get_credentials() - self.user_id = credentials['user_id'] - self.key = credentials['key'] + self.user_id = credentials["user_id"] + self.key = credentials["key"] # Region and location are common to all Dimension Data modules. - region = self.module.params['region'] - self.region = f'dd-{region}' - self.location = self.module.params['location'] + region = self.module.params["region"] + self.region = f"dd-{region}" + self.location = self.module.params["location"] - libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] + libcloud.security.VERIFY_SSL_CERT = self.module.params["validate_certs"] - self.driver = get_driver(Provider.DIMENSIONDATA)( - self.user_id, - self.key, - region=self.region - ) + self.driver = get_driver(Provider.DIMENSIONDATA)(self.user_id, self.key, region=self.region) # Determine the MCP API version (this depends on the target datacenter). self.mcp_version = self.get_mcp_version(self.location) # Optional "wait-for-completion" arguments - if 'wait' in self.module.params: - self.wait = self.module.params['wait'] - self.wait_time = self.module.params['wait_time'] - self.wait_poll_interval = self.module.params['wait_poll_interval'] + if "wait" in self.module.params: + self.wait = self.module.params["wait"] + self.wait_time = self.module.params["wait_time"] + self.wait_poll_interval = self.module.params["wait_poll_interval"] else: self.wait = False self.wait_time = 0 @@ -122,29 +118,29 @@ def get_credentials(self): """ if not HAS_LIBCLOUD: - self.module.fail_json(msg='libcloud is required for this module.') + self.module.fail_json(msg="libcloud is required for this module.") user_id = None key = None # First, try the module configuration - if 'mcp_user' in self.module.params: - if 'mcp_password' not in self.module.params: + if "mcp_user" in self.module.params: + if "mcp_password" not in self.module.params: self.module.fail_json( msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).' ) - user_id = self.module.params['mcp_user'] - key = self.module.params['mcp_password'] + user_id = self.module.params["mcp_user"] + key = self.module.params["mcp_password"] # Fall back to environment if not user_id or not key: - user_id = os.environ.get('MCP_USER', None) - key = os.environ.get('MCP_PASSWORD', None) + user_id = os.environ.get("MCP_USER", None) + key = os.environ.get("MCP_PASSWORD", None) # Finally, try dotfile (~/.dimensiondata) if not user_id or not key: - home = expanduser('~') + home = expanduser("~") config = configparser.RawConfigParser() config.read(f"{home}/.dimensiondata") @@ -171,9 +167,9 @@ def get_mcp_version(self, location): location = self.driver.ex_get_location_by_id(location) if MCP_2_LOCATION_NAME_PATTERN.match(location.name): - return '2.0' + return "2.0" - return '1.0' + return "1.0" def get_network_domain(self, locator, location): """ @@ -184,7 +180,8 @@ def get_network_domain(self, locator, location): network_domain = self.driver.ex_get_network_domain(locator) else: matching_network_domains = [ - network_domain for network_domain in self.driver.ex_list_network_domains(location=location) + network_domain + for network_domain in self.driver.ex_list_network_domains(location=location) if network_domain.name == locator ] @@ -206,8 +203,7 @@ def get_vlan(self, locator, location, network_domain): vlan = self.driver.ex_get_vlan(locator) else: matching_vlans = [ - vlan for vlan in self.driver.ex_list_vlans(location, network_domain) - if vlan.name == locator + vlan for vlan in self.driver.ex_list_vlans(location, network_domain) if vlan.name == locator ] if matching_vlans: @@ -229,11 +225,11 @@ def argument_spec(**additional_argument_spec): """ spec = dict( - region=dict(type='str', default='na'), - mcp_user=dict(type='str', required=False), - mcp_password=dict(type='str', required=False, no_log=True), - location=dict(type='str', required=True), - validate_certs=dict(type='bool', required=False, default=True) + region=dict(type="str", default="na"), + mcp_user=dict(type="str", required=False), + mcp_password=dict(type="str", required=False, no_log=True), + location=dict(type="str", required=True), + validate_certs=dict(type="bool", required=False, default=True), ) if additional_argument_spec: @@ -250,9 +246,9 @@ def argument_spec_with_wait(**additional_argument_spec): """ spec = DimensionDataModule.argument_spec( - wait=dict(type='bool', required=False, default=False), - wait_time=dict(type='int', required=False, default=600), - wait_poll_interval=dict(type='int', required=False, default=2) + wait=dict(type="bool", required=False, default=False), + wait_time=dict(type="int", required=False, default=600), + wait_poll_interval=dict(type="int", required=False, default=2), ) if additional_argument_spec: @@ -268,9 +264,7 @@ def required_together(*additional_required_together): :return: An array containing the argument specifications. """ - required_together = [ - ['mcp_user', 'mcp_password'] - ] + required_together = [["mcp_user", "mcp_password"]] if additional_required_together: required_together.extend(additional_required_together) @@ -319,7 +313,7 @@ def get_dd_regions(): all_regions = API_ENDPOINTS.keys() # Only Dimension Data endpoints (no prefix) - regions = [region[3:] for region in all_regions if region.startswith('dd-')] + regions = [region[3:] for region in all_regions if region.startswith("dd-")] return regions diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index c39ac88ed8b..0f807e673a1 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -71,10 +71,10 @@ # keys can be used in _django_args _args_menu = dict( std=(django_std_args, _django_std_arg_fmts), - database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 + database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0 - dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 - check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 database_dash=(_database_dash, {}), data=(_data, {}), ) @@ -89,9 +89,17 @@ def __init__(self, module, arg_formats=None, **kwargs): def __call__(self, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): args_order = ( - ("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order) + "command", + "no_color", + "settings", + "pythonpath", + "traceback", + "verbosity", + "skip_checks", + ) + self._prepare_args_order(self.default_args_order) + return super().__call__( + args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs ) - return super().__call__(args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) def bare_context(self, *args, **kwargs): return super().__call__(*args, **kwargs) @@ -106,9 +114,9 @@ class DjangoModuleHelper(ModuleHelper): _check_mode_arg: str = "" def __init__(self): - self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), - self.arg_formats, - *(["std"] + self._django_args)) + self.module["argument_spec"], self.arg_formats = self._build_args( + self.module.get("argument_spec", {}), self.arg_formats, *(["std"] + self._django_args) + ) super().__init__(self.module) if self.django_admin_cmd is not None: self.vars.command = self.django_admin_cmd @@ -127,11 +135,13 @@ def _build_args(arg_spec, arg_format, *names): return res_arg_spec, res_arg_fmts def __run__(self): - runner = _DjangoRunner(self.module, - default_args_order=self.django_admin_arg_order, - arg_formats=self.arg_formats, - venv=self.vars.venv, - check_rc=True) + runner = _DjangoRunner( + self.module, + default_args_order=self.django_admin_arg_order, + arg_formats=self.arg_formats, + venv=self.vars.venv, + check_rc=True, + ) run_params = self.vars.as_dict() if self._check_mode_arg: diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 948d2309d8c..344e6b6bef4 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -11,51 +11,42 @@ class GandiLiveDNSAPI: - - api_endpoint = 'https://api.gandi.net/v5/livedns' + api_endpoint = "https://api.gandi.net/v5/livedns" changed = False error_strings = { - 400: 'Bad request', - 401: 'Permission denied', - 404: 'Resource not found', + 400: "Bad request", + 401: "Permission denied", + 404: "Resource not found", } - attribute_map = { - 'record': 'rrset_name', - 'type': 'rrset_type', - 'ttl': 'rrset_ttl', - 'values': 'rrset_values' - } + attribute_map = {"record": "rrset_name", "type": "rrset_type", "ttl": "rrset_ttl", "values": "rrset_values"} def __init__(self, module): self.module = module - self.api_key = module.params['api_key'] - self.personal_access_token = module.params['personal_access_token'] + self.api_key = module.params["api_key"] + self.personal_access_token = module.params["personal_access_token"] def _build_error_message(self, module, info): - s = '' - body = info.get('body') + s = "" + body = info.get("body") if body: - errors = module.from_json(body).get('errors') + errors = module.from_json(body).get("errors") if errors: error = errors[0] - name = error.get('name') + name = error.get("name") if name: - s += f'{name} :' - description = error.get('description') + s += f"{name} :" + description = error.get("description") if description: s += description return s - def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): + def _gandi_api_call(self, api_call, method="GET", payload=None, error_on_404=True): authorization_header = ( - f'Bearer {self.personal_access_token}' - if self.personal_access_token - else f'Apikey {self.api_key}' + f"Bearer {self.personal_access_token}" if self.personal_access_token else f"Apikey {self.api_key}" ) - headers = {'Authorization': authorization_header, - 'Content-Type': 'application/json'} + headers = {"Authorization": authorization_header, "Content-Type": "application/json"} data = None if payload: try: @@ -63,15 +54,11 @@ def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=Tru except Exception as e: self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ") - resp, info = fetch_url(self.module, - self.api_endpoint + api_call, - headers=headers, - data=data, - method=method) + resp, info = fetch_url(self.module, self.api_endpoint + api_call, headers=headers, data=data, method=method) - error_msg = '' - if info['status'] >= 400 and (info['status'] != 404 or error_on_404): - err_s = self.error_strings.get(info['status'], '') + error_msg = "" + if info["status"] >= 400 and (info["status"] != 404 or error_on_404): + err_s = self.error_strings.get(info["status"], "") error_msg = f"API Error {err_s}: {self._build_error_message(self.module, info)}" @@ -83,14 +70,14 @@ def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=Tru if content: try: - result = json.loads(to_text(content, errors='surrogate_or_strict')) - except (getattr(json, 'JSONDecodeError', ValueError)) as e: + result = json.loads(to_text(content, errors="surrogate_or_strict")) + except getattr(json, "JSONDecodeError", ValueError) as e: error_msg += f"; Failed to parse API response with error {e}: {content}" if error_msg: self.module.fail_json(msg=error_msg) - return result, info['status'] + return result, info["status"] def build_result(self, result, domain): if result is None: @@ -100,11 +87,11 @@ def build_result(self, result, domain): for k in self.attribute_map: v = result.get(self.attribute_map[k], None) if v is not None: - if k == 'record' and v == '@': - v = '' + if k == "record" and v == "@": + v = "" res[k] = v - res['domain'] = domain + res["domain"] = domain return res @@ -114,11 +101,11 @@ def build_results(self, results, domain): return [self.build_result(r, domain) for r in results] def get_records(self, record, type, domain): - url = f'/domains/{domain}/records' + url = f"/domains/{domain}/records" if record: - url += f'/{record}' + url += f"/{record}" if type: - url += f'/{type}' + url += f"/{type}" records, status = self._gandi_api_call(url, error_on_404=False) @@ -130,44 +117,45 @@ def get_records(self, record, type, domain): # filter by type if record is not set if not record and type: - records = [r - for r in records - if r['rrset_type'] == type] + records = [r for r in records if r["rrset_type"] == type] return records def create_record(self, record, type, values, ttl, domain): - url = f'/domains/{domain}/records' + url = f"/domains/{domain}/records" new_record = { - 'rrset_name': record, - 'rrset_type': type, - 'rrset_values': values, - 'rrset_ttl': ttl, + "rrset_name": record, + "rrset_type": type, + "rrset_values": values, + "rrset_ttl": ttl, } - record, status = self._gandi_api_call(url, method='POST', payload=new_record) + record, status = self._gandi_api_call(url, method="POST", payload=new_record) - if status in (200, 201,): + if status in ( + 200, + 201, + ): return new_record return None def update_record(self, record, type, values, ttl, domain): - url = f'/domains/{domain}/records/{record}/{type}' + url = f"/domains/{domain}/records/{record}/{type}" new_record = { - 'rrset_values': values, - 'rrset_ttl': ttl, + "rrset_values": values, + "rrset_ttl": ttl, } - record = self._gandi_api_call(url, method='PUT', payload=new_record)[0] + record = self._gandi_api_call(url, method="PUT", payload=new_record)[0] return record def delete_record(self, record, type, domain): - url = f'/domains/{domain}/records/{record}/{type}' + url = f"/domains/{domain}/records/{record}/{type}" - self._gandi_api_call(url, method='DELETE') + self._gandi_api_call(url, method="DELETE") def delete_dns_record(self, record, type, values, domain): - if record == '': - record = '@' + if record == "": + record = "@" records = self.get_records(record, type, domain) @@ -176,11 +164,11 @@ def delete_dns_record(self, record, type, values, domain): self.changed = True - if values is not None and set(cur_record['rrset_values']) != set(values): - new_values = set(cur_record['rrset_values']) - set(values) + if values is not None and set(cur_record["rrset_values"]) != set(values): + new_values = set(cur_record["rrset_values"]) - set(values) if new_values: # Removing one or more values from a record, we update the record with the remaining values - self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain) + self.update_record(record, type, list(new_values), cur_record["rrset_ttl"], domain) records = self.get_records(record, type, domain) return records[0], self.changed @@ -192,8 +180,8 @@ def delete_dns_record(self, record, type, values, domain): return None, self.changed def ensure_dns_record(self, record, type, ttl, values, domain): - if record == '': - record = '@' + if record == "": + record = "@" records = self.get_records(record, type, domain) @@ -201,19 +189,14 @@ def ensure_dns_record(self, record, type, ttl, values, domain): cur_record = records[0] do_update = False - if ttl is not None and cur_record['rrset_ttl'] != ttl: + if ttl is not None and cur_record["rrset_ttl"] != ttl: do_update = True - if values is not None and set(cur_record['rrset_values']) != set(values): + if values is not None and set(cur_record["rrset_values"]) != set(values): do_update = True if do_update: if self.module.check_mode: - result = dict( - rrset_type=type, - rrset_name=record, - rrset_values=values, - rrset_ttl=ttl - ) + result = dict(rrset_type=type, rrset_name=record, rrset_values=values, rrset_ttl=ttl) else: self.update_record(record, type, values, ttl, domain) @@ -225,12 +208,7 @@ def ensure_dns_record(self, record, type, ttl, values, domain): return cur_record, self.changed if self.module.check_mode: - new_record = dict( - rrset_type=type, - rrset_name=record, - rrset_values=values, - rrset_ttl=ttl - ) + new_record = dict(rrset_type=type, rrset_name=record, rrset_values=values, rrset_ttl=ttl) result = new_record else: result = self.create_record(record, type, values, ttl, domain) diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py index 7d11078edf2..9eafa553fd2 100644 --- a/plugins/module_utils/gconftool2.py +++ b/plugins/module_utils/gconftool2.py @@ -17,7 +17,7 @@ def gconftool2_runner(module, **kwargs): return CmdRunner( module, - command='gconftool-2', + command="gconftool-2", arg_formats=dict( state=cmd_runner_fmt.as_map(_state_map), key=cmd_runner_fmt.as_list(), @@ -27,5 +27,5 @@ def gconftool2_runner(module, **kwargs): config_source=cmd_runner_fmt.as_opt_val("--config-source"), version=cmd_runner_fmt.as_fixed("--version"), ), - **kwargs + **kwargs, ) diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py index 15122b1ef13..e6987c6e7d3 100644 --- a/plugins/module_utils/gio_mime.py +++ b/plugins/module_utils/gio_mime.py @@ -10,14 +10,14 @@ def gio_mime_runner(module, **kwargs): return CmdRunner( module, - command=['gio'], + command=["gio"], arg_formats=dict( - mime=cmd_runner_fmt.as_fixed('mime'), + mime=cmd_runner_fmt.as_fixed("mime"), mime_type=cmd_runner_fmt.as_list(), handler=cmd_runner_fmt.as_list(), - version=cmd_runner_fmt.as_fixed('--version'), + version=cmd_runner_fmt.as_fixed("--version"), ), - **kwargs + **kwargs, ) diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index b1b0082fa28..a2092d4aa32 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2018, Marcus Watkins # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -19,20 +18,21 @@ def _determine_list_all_kwargs(version) -> dict[str, t.Any]: gitlab_version = LooseVersion(version) - if gitlab_version >= LooseVersion('4.0.0'): + if gitlab_version >= LooseVersion("4.0.0"): # 4.0.0 removed 'as_list' - return {'iterator': True, 'per_page': 100} - elif gitlab_version >= LooseVersion('3.7.0'): + return {"iterator": True, "per_page": 100} + elif gitlab_version >= LooseVersion("3.7.0"): # 3.7.0 added 'get_all' - return {'as_list': False, 'get_all': True, 'per_page': 100} + return {"as_list": False, "get_all": True, "per_page": 100} else: - return {'as_list': False, 'all': True, 'per_page': 100} + return {"as_list": False, "all": True, "per_page": 100} GITLAB_IMP_ERR: str | None = None try: import gitlab import requests + HAS_GITLAB_PACKAGE = True list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__) except Exception: @@ -43,12 +43,12 @@ def _determine_list_all_kwargs(version) -> dict[str, t.Any]: def auth_argument_spec(spec=None): - arg_spec = (dict( - ca_path=dict(type='str'), - api_token=dict(type='str', no_log=True), - api_oauth_token=dict(type='str', no_log=True), - api_job_token=dict(type='str', no_log=True), - )) + arg_spec = dict( + ca_path=dict(type="str"), + api_token=dict(type="str", no_log=True), + api_oauth_token=dict(type="str", no_log=True), + api_job_token=dict(type="str", no_log=True), + ) if spec: arg_spec.update(spec) return arg_spec @@ -79,28 +79,30 @@ def find_group(gitlab_instance, identifier): def ensure_gitlab_package(module, min_version=None): if not HAS_GITLAB_PACKAGE: module.fail_json( - msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'), - exception=GITLAB_IMP_ERR + msg=missing_required_lib("python-gitlab", url="https://python-gitlab.readthedocs.io/en/stable/"), + exception=GITLAB_IMP_ERR, ) gitlab_version = gitlab.__version__ if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version): - module.fail_json(msg=( - f"This module requires python-gitlab Python module >= {min_version} (installed version: " - f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above." - )) + module.fail_json( + msg=( + f"This module requires python-gitlab Python module >= {min_version} (installed version: " + f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above." + ) + ) def gitlab_authentication(module, min_version=None): ensure_gitlab_package(module, min_version=min_version) - gitlab_url = module.params['api_url'] - validate_certs = module.params['validate_certs'] - ca_path = module.params['ca_path'] - gitlab_user = module.params['api_username'] - gitlab_password = module.params['api_password'] - gitlab_token = module.params['api_token'] - gitlab_oauth_token = module.params['api_oauth_token'] - gitlab_job_token = module.params['api_job_token'] + gitlab_url = module.params["api_url"] + validate_certs = module.params["validate_certs"] + ca_path = module.params["ca_path"] + gitlab_user = module.params["api_username"] + gitlab_password = module.params["api_password"] + gitlab_token = module.params["api_token"] + gitlab_oauth_token = module.params["api_oauth_token"] + gitlab_job_token = module.params["api_job_token"] verify = ca_path if validate_certs and ca_path else validate_certs @@ -108,21 +110,29 @@ def gitlab_authentication(module, min_version=None): # We can create an oauth_token using a username and password # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow if gitlab_user: - data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} + data = {"grant_type": "password", "username": gitlab_user, "password": gitlab_password} resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) resp_data = resp.json() gitlab_oauth_token = resp_data["access_token"] - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, - oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) + gitlab_instance = gitlab.Gitlab( + url=gitlab_url, + ssl_verify=verify, + private_token=gitlab_token, + oauth_token=gitlab_oauth_token, + job_token=gitlab_job_token, + api_version=4, + ) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: module.fail_json(msg=f"Failed to connect to GitLab server: {e}") - except (gitlab.exceptions.GitlabHttpError) as e: - module.fail_json(msg=( - f"Failed to connect to GitLab server: {e}. GitLab remove Session API now " - "that private tokens are removed from user API endpoints since version 10.2." - )) + except gitlab.exceptions.GitlabHttpError as e: + module.fail_json( + msg=( + f"Failed to connect to GitLab server: {e}. GitLab remove Session API now " + "that private tokens are removed from user API endpoints since version 10.2." + ) + ) return gitlab_instance @@ -130,7 +140,17 @@ def gitlab_authentication(module, min_version=None): def filter_returned_variables(gitlab_variables): # pop properties we don't know existing_variables = [dict(x.attributes) for x in gitlab_variables] - KNOWN = ['key', 'value', 'description', 'masked', 'hidden', 'protected', 'variable_type', 'environment_scope', 'raw'] + KNOWN = [ + "key", + "value", + "description", + "masked", + "hidden", + "protected", + "variable_type", + "environment_scope", + "raw", + ] for item in existing_variables: for key in list(item.keys()): if key not in KNOWN: @@ -159,17 +179,17 @@ def vars_to_variables(vars, module): elif isinstance(value, dict): new_item = { "name": item, - "value": value.get('value'), - "description": value.get('description'), - "masked": value.get('masked'), - "hidden": value.get('hidden'), - "protected": value.get('protected'), - "raw": value.get('raw'), - "variable_type": value.get('variable_type'), + "value": value.get("value"), + "description": value.get("description"), + "masked": value.get("masked"), + "hidden": value.get("hidden"), + "protected": value.get("protected"), + "raw": value.get("raw"), + "variable_type": value.get("variable_type"), } - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') + if value.get("environment_scope"): + new_item["environment_scope"] = value.get("environment_scope") variables.append(new_item) diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index 149e11162e6..ed9f8c59cd0 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -12,12 +12,13 @@ HEROKU_IMP_ERR = None try: import heroku3 + HAS_HEROKU = True except ImportError: HEROKU_IMP_ERR = traceback.format_exc() -class HerokuHelper(): +class HerokuHelper: def __init__(self, module): self.module = module self.check_lib() @@ -25,17 +26,18 @@ def __init__(self, module): def check_lib(self): if not HAS_HEROKU: - self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("heroku3"), exception=HEROKU_IMP_ERR) @staticmethod def heroku_argument_spec(): return dict( - api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True)) + api_key=dict(fallback=(env_fallback, ["HEROKU_API_KEY", "TF_VAR_HEROKU_API_KEY"]), type="str", no_log=True) + ) def get_heroku_client(self): client = heroku3.from_key(self.api_key) if not client.is_authenticated: - self.module.fail_json(msg='Heroku authentication failure, please check your API Key') + self.module.fail_json(msg="Heroku authentication failure, please check your API Key") return client diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py index 5747dc57c05..2908bee72ee 100644 --- a/plugins/module_utils/homebrew.py +++ b/plugins/module_utils/homebrew.py @@ -92,9 +92,7 @@ def valid_brew_path(cls, brew_path): if brew_path is None: return True - return isinstance( - brew_path, str - ) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + return isinstance(brew_path, str) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) @classmethod def valid_package(cls, package): @@ -103,9 +101,7 @@ def valid_package(cls, package): if package is None: return True - return isinstance( - package, str - ) and not cls.INVALID_PACKAGE_REGEX.search(package) + return isinstance(package, str) and not cls.INVALID_PACKAGE_REGEX.search(package) def parse_brew_path(module): diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index c9d554c2846..a23f339b513 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -13,13 +13,13 @@ from keystoneauth1.adapter import Adapter from keystoneauth1.identity import v3 from keystoneauth1 import session + HAS_THIRD_LIBRARIES = True except ImportError: THIRD_LIBRARIES_IMP_ERR = traceback.format_exc() HAS_THIRD_LIBRARIES = False -from ansible.module_utils.basic import (AnsibleModule, env_fallback, - missing_required_lib) +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib from ansible.module_utils.common.text.converters import to_text @@ -59,21 +59,19 @@ def _wrap(self, url, *args, **kwargs): url = self.endpoint + url r = f(self, url, *args, **kwargs) except Exception as ex: - raise HwcClientException( - 0, f"Sending request failed, error={ex}") + raise HwcClientException(0, f"Sending request failed, error={ex}") result = None if r.content: try: result = r.json() except Exception as ex: - raise HwcClientException( - 0, f"Parsing response to json failed, error: {ex}") + raise HwcClientException(0, f"Parsing response to json failed, error: {ex}") code = r.status_code if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]: msg = "" - for i in ['message', 'error.message']: + for i in ["message", "error.message"]: try: msg = navigate_value(result, i) break @@ -97,8 +95,8 @@ def __init__(self, client, endpoint, product): self._client = client self._endpoint = endpoint self._default_header = { - 'User-Agent': f"Huawei-Ansible-MM-{product}", - 'Accept': 'application/json', + "User-Agent": f"Huawei-Ansible-MM-{product}", + "Accept": "application/json", } @property @@ -111,23 +109,19 @@ def endpoint(self, e): @session_method_wrapper def get(self, url, body=None, header=None, timeout=None): - return self._client.get(url, json=body, timeout=timeout, - headers=self._header(header)) + return self._client.get(url, json=body, timeout=timeout, headers=self._header(header)) @session_method_wrapper def post(self, url, body=None, header=None, timeout=None): - return self._client.post(url, json=body, timeout=timeout, - headers=self._header(header)) + return self._client.post(url, json=body, timeout=timeout, headers=self._header(header)) @session_method_wrapper def delete(self, url, body=None, header=None, timeout=None): - return self._client.delete(url, json=body, timeout=timeout, - headers=self._header(header)) + return self._client.delete(url, json=body, timeout=timeout, headers=self._header(header)) @session_method_wrapper def put(self, url, body=None, header=None, timeout=None): - return self._client.put(url, json=body, timeout=timeout, - headers=self._header(header)) + return self._client.put(url, json=body, timeout=timeout, headers=self._header(header)) def _header(self, header): if header and isinstance(header, dict): @@ -167,22 +161,18 @@ def client(self, region, service_type, service_level): def _gen_provider_client(self): m = self._module p = { - "auth_url": m.params['identity_endpoint'], - "password": m.params['password'], - "username": m.params['user'], - "project_name": m.params['project'], - "user_domain_name": m.params['domain'], - "reauthenticate": True + "auth_url": m.params["identity_endpoint"], + "password": m.params["password"], + "username": m.params["user"], + "project_name": m.params["project"], + "user_domain_name": m.params["domain"], + "reauthenticate": True, } - self._project_client = Adapter( - session.Session(auth=v3.Password(**p)), - raise_exc=False) + self._project_client = Adapter(session.Session(auth=v3.Password(**p)), raise_exc=False) p.pop("project_name") - self._domain_client = Adapter( - session.Session(auth=v3.Password(**p)), - raise_exc=False) + self._domain_client = Adapter(session.Session(auth=v3.Password(**p)), raise_exc=False) def _get_service_endpoint(self, client, service_type, region): k = f"{service_type}.{region if region else ''}" @@ -192,15 +182,12 @@ def _get_service_endpoint(self, client, service_type, region): url = None try: - url = client.get_endpoint(service_type=service_type, - region_name=region, interface="public") + url = client.get_endpoint(service_type=service_type, region_name=region, interface="public") except Exception as ex: - raise HwcClientException( - 0, f"Getting endpoint failed, error={ex}") + raise HwcClientException(0, f"Getting endpoint failed, error={ex}") if url == "": - raise HwcClientException( - 0, f"Cannot find the endpoint for {service_type}") + raise HwcClientException(0, f"Cannot find the endpoint for {service_type}") if url[-1] != "/": url += "/" @@ -210,42 +197,46 @@ def _get_service_endpoint(self, client, service_type, region): def _validate(self): if not HAS_THIRD_LIBRARIES: - self.module.fail_json( - msg=missing_required_lib('keystoneauth1'), - exception=THIRD_LIBRARIES_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("keystoneauth1"), exception=THIRD_LIBRARIES_IMP_ERR) class HwcModule(AnsibleModule): def __init__(self, *args, **kwargs): - arg_spec = kwargs.setdefault('argument_spec', {}) + arg_spec = kwargs.setdefault("argument_spec", {}) arg_spec.update( dict( identity_endpoint=dict( - required=True, type='str', - fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']), + required=True, + type="str", + fallback=(env_fallback, ["ANSIBLE_HWC_IDENTITY_ENDPOINT"]), ), user=dict( - required=True, type='str', - fallback=(env_fallback, ['ANSIBLE_HWC_USER']), + required=True, + type="str", + fallback=(env_fallback, ["ANSIBLE_HWC_USER"]), ), password=dict( - required=True, type='str', no_log=True, - fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']), + required=True, + type="str", + no_log=True, + fallback=(env_fallback, ["ANSIBLE_HWC_PASSWORD"]), ), domain=dict( - required=True, type='str', - fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']), + required=True, + type="str", + fallback=(env_fallback, ["ANSIBLE_HWC_DOMAIN"]), ), project=dict( - required=True, type='str', - fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']), + required=True, + type="str", + fallback=(env_fallback, ["ANSIBLE_HWC_PROJECT"]), ), region=dict( - type='str', - fallback=(env_fallback, ['ANSIBLE_HWC_REGION']), + type="str", + fallback=(env_fallback, ["ANSIBLE_HWC_REGION"]), ), - id=dict(type='str') + id=dict(type="str"), ) ) @@ -253,14 +244,14 @@ def __init__(self, *args, **kwargs): class _DictComparison: - ''' This class takes in two dictionaries `a` and `b`. - These are dictionaries of arbitrary depth, but made up of standard - Python types only. - This differ will compare all values in `a` to those in `b`. - If value in `a` is None, always returns True, indicating - this value is no need to compare. - Note: On all lists, order does matter. - ''' + """This class takes in two dictionaries `a` and `b`. + These are dictionaries of arbitrary depth, but made up of standard + Python types only. + This differ will compare all values in `a` to those in `b`. + If value in `a` is None, always returns True, indicating + this value is no need to compare. + Note: On all lists, order does matter. + """ def __init__(self, request): self.request = request @@ -316,8 +307,7 @@ def _compare_value(self, value1, value2): return self._compare_dicts(value1, value2) # Always use to_text values to avoid unicode issues. - return (to_text(value1, errors='surrogate_or_strict') == to_text( - value2, errors='surrogate_or_strict')) + return to_text(value1, errors="surrogate_or_strict") == to_text(value2, errors="surrogate_or_strict") def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): @@ -338,8 +328,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): not_found_times += 1 if not_found_times > 10: - raise HwcModuleException( - f"not found the object for {not_found_times} times") + raise HwcModuleException(f"not found the object for {not_found_times} times") else: not_found_times = 0 @@ -347,8 +336,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): return obj if pending and status not in pending: - raise HwcModuleException( - f"unexpected status({status}) occurred") + raise HwcModuleException(f"unexpected status({status}) occurred") if not is_last_time: wait *= 2 @@ -372,13 +360,11 @@ def navigate_value(data, index, array_index=None): return None if not isinstance(d, dict): - raise HwcModuleException( - "can't navigate value from a non-dict object") + raise HwcModuleException("can't navigate value from a non-dict object") i = index[n] if i not in d: - raise HwcModuleException( - f"navigate value failed: key({i}) is not exist in dict") + raise HwcModuleException(f"navigate value failed: key({i}) is not exist in dict") d = d[i] if not array_index: @@ -392,13 +378,11 @@ def navigate_value(data, index, array_index=None): return None if not isinstance(d, list): - raise HwcModuleException( - "can't navigate value from a non-list object") + raise HwcModuleException("can't navigate value from a non-list object") j = array_index.get(k) if j >= len(d): - raise HwcModuleException( - "navigate value failed: the index is out of list") + raise HwcModuleException("navigate value failed: the index is out of list") d = d[j] return d @@ -425,14 +409,14 @@ def build_path(module, path, kv=None): def get_region(module): - if module.params['region']: - return module.params['region'] + if module.params["region"]: + return module.params["region"] - return module.params['project'].split("_")[0] + return module.params["project"].split("_")[0] def is_empty_value(v): - return (not v) + return not v def are_different_dicts(dict1, dict2): diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index 5153cabd816..89995ce51cc 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -20,48 +20,61 @@ PYXCLI_IMP_ERR = traceback.format_exc() PYXCLI_INSTALLED = False -AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size', - 'domain', 'perf_class', 'vol', - 'iscsi_chap_name', 'iscsi_chap_secret', - 'cluster', 'host', 'lun', 'override', - 'fcaddress', 'iscsi_name', 'max_dms', - 'max_cgs', 'ldap_id', 'max_mirrors', - 'max_pools', 'max_volumes', 'hard_capacity', - 'soft_capacity'] +AVAILABLE_PYXCLI_FIELDS = [ + "pool", + "size", + "snapshot_size", + "domain", + "perf_class", + "vol", + "iscsi_chap_name", + "iscsi_chap_secret", + "cluster", + "host", + "lun", + "override", + "fcaddress", + "iscsi_name", + "max_dms", + "max_cgs", + "ldap_id", + "max_mirrors", + "max_pools", + "max_volumes", + "hard_capacity", + "soft_capacity", +] def xcli_wrapper(func): - """ Catch xcli errors and return a proper message""" + """Catch xcli errors and return a proper message""" + @wraps(func) def wrapper(module, *args, **kwargs): try: return func(module, *args, **kwargs) except errors.CommandExecutionError as e: module.fail_json(msg=to_native(e)) + return wrapper @xcli_wrapper def connect_ssl(module): - endpoints = module.params['endpoints'] - username = module.params['username'] - password = module.params['password'] + endpoints = module.params["endpoints"] + username = module.params["username"] + password = module.params["password"] if not (username and password and endpoints): - module.fail_json( - msg="Username, password or endpoints arguments " - "are missing from the module arguments") + module.fail_json(msg="Username, password or endpoints arguments are missing from the module arguments") try: - return client.XCLIClient.connect_multiendpoint_ssl(username, - password, - endpoints) + return client.XCLIClient.connect_multiendpoint_ssl(username, password, endpoints) except errors.CommandFailedConnectionError as e: - module.fail_json( - msg=f"Connection with Spectrum Accelerate system has failed: {e}.") + module.fail_json(msg=f"Connection with Spectrum Accelerate system has failed: {e}.") def spectrum_accelerate_spec(): - """ Return arguments spec for AnsibleModule """ + """Return arguments spec for AnsibleModule""" return dict( endpoints=dict(required=True), username=dict(required=True), @@ -77,17 +90,16 @@ def execute_pyxcli_command(module, xcli_command, xcli_client): def build_pyxcli_command(fields): - """ Builds the args for pyxcli using the exact args from ansible""" + """Builds the args for pyxcli using the exact args from ansible""" pyxcli_args = {} for field in fields: if not fields[field]: continue - if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '': + if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != "": pyxcli_args[field] = fields[field] return pyxcli_args def is_pyxcli_installed(module): if not PYXCLI_INSTALLED: - module.fail_json(msg=missing_required_lib('pyxcli'), - exception=PYXCLI_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyxcli"), exception=PYXCLI_IMP_ERR) diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index f8c6c86b3cb..b961d4c6144 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -63,8 +63,12 @@ URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}" URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" -URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" -URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" +URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = ( + "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" +) +URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = ( + "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" +) URL_USERS = "{url}/admin/realms/{realm}/users" URL_USER = "{url}/admin/realms/{realm}/users/{id}" @@ -78,8 +82,12 @@ URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user" URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}" -URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available" -URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite" +URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = ( + "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available" +) +URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = ( + "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite" +) URL_REALM_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{group}/role-mappings/realm" @@ -89,7 +97,9 @@ URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions" -URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution" +URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = ( + "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution" +) URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow" URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config" URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" @@ -117,8 +127,12 @@ URL_AUTHZ_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" URL_AUTHZ_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{id}" -URL_AUTHZ_PERMISSION = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}" -URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}" +URL_AUTHZ_PERMISSION = ( + "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}" +) +URL_AUTHZ_PERMISSIONS = ( + "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}" +) URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource" @@ -133,22 +147,22 @@ def keycloak_argument_spec(): :return: argument_spec dict """ return dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), - auth_client_id=dict(type='str', default='admin-cli'), - auth_realm=dict(type='str'), - auth_client_secret=dict(type='str', default=None, no_log=True), - auth_username=dict(type='str', aliases=['username']), - auth_password=dict(type='str', aliases=['password'], no_log=True), - validate_certs=dict(type='bool', default=True), - connection_timeout=dict(type='int', default=10), - token=dict(type='str', no_log=True), - refresh_token=dict(type='str', no_log=True), - http_agent=dict(type='str', default='Ansible'), + auth_keycloak_url=dict(type="str", aliases=["url"], required=True, no_log=False), + auth_client_id=dict(type="str", default="admin-cli"), + auth_realm=dict(type="str"), + auth_client_secret=dict(type="str", default=None, no_log=True), + auth_username=dict(type="str", aliases=["username"]), + auth_password=dict(type="str", aliases=["password"], no_log=True), + validate_certs=dict(type="bool", default=True), + connection_timeout=dict(type="int", default=10), + token=dict(type="str", no_log=True), + refresh_token=dict(type="str", no_log=True), + http_agent=dict(type="str", default="Ansible"), ) def camel(words): - return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:]) + return words.split("_")[0] + "".join(x.capitalize() or "_" for x in words.split("_")[1:]) class KeycloakError(Exception): @@ -161,7 +175,7 @@ def __str__(self): def _token_request(module_params, payload): - """ Obtains connection header with token for the authentication, + """Obtains connection header with token for the authentication, using the provided auth_username/auth_password :param module_params: parameters of the module :param payload: @@ -175,48 +189,55 @@ def _token_request(module_params, payload): 'refresh_token' for type 'refresh_token'. :return: access token """ - base_url = module_params.get('auth_keycloak_url') - if not base_url.lower().startswith(('http', 'https')): + base_url = module_params.get("auth_keycloak_url") + if not base_url.lower().startswith(("http", "https")): raise KeycloakError(f"auth_url '{base_url}' should either start with 'http' or 'https'.") - auth_realm = module_params.get('auth_realm') + auth_realm = module_params.get("auth_realm") auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - http_agent = module_params.get('http_agent') - validate_certs = module_params.get('validate_certs') - connection_timeout = module_params.get('connection_timeout') + http_agent = module_params.get("http_agent") + validate_certs = module_params.get("validate_certs") + connection_timeout = module_params.get("connection_timeout") try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, - data=urlencode(payload)).read())) + r = json.loads( + to_native( + open_url( + auth_url, + method="POST", + validate_certs=validate_certs, + http_agent=http_agent, + timeout=connection_timeout, + data=urlencode(payload), + ).read() + ) + ) - return r['access_token'] + return r["access_token"] except ValueError as e: - raise KeycloakError( - f'API returned invalid JSON when trying to obtain access token from {auth_url}: {e}') + raise KeycloakError(f"API returned invalid JSON when trying to obtain access token from {auth_url}: {e}") except KeyError: - raise KeycloakError( - f'API did not include access_token field in response from {auth_url}') + raise KeycloakError(f"API did not include access_token field in response from {auth_url}") except Exception as e: - raise KeycloakError(f'Could not obtain access token from {auth_url}: {e}', authError=e) + raise KeycloakError(f"Could not obtain access token from {auth_url}: {e}", authError=e) def _request_token_using_credentials(module_params): - """ Obtains connection header with token for the authentication, + """Obtains connection header with token for the authentication, using the provided auth_username/auth_password :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'. :return: connection header """ - client_id = module_params.get('auth_client_id') - auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') + client_id = module_params.get("auth_client_id") + auth_username = module_params.get("auth_username") + auth_password = module_params.get("auth_password") + client_secret = module_params.get("auth_client_secret") temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, + "grant_type": "password", + "client_id": client_id, + "client_secret": client_secret, + "username": auth_username, + "password": auth_password, } # Remove empty items, for instance missing client_secret payload = {k: v for k, v in temp_payload.items() if v is not None} @@ -225,20 +246,20 @@ def _request_token_using_credentials(module_params): def _request_token_using_refresh_token(module_params): - """ Obtains connection header with token for the authentication, + """Obtains connection header with token for the authentication, using the provided refresh_token :param module_params: parameters of the module. Must include 'refresh_token'. :return: connection header """ - client_id = module_params.get('auth_client_id') - refresh_token = module_params.get('refresh_token') - client_secret = module_params.get('auth_client_secret') + client_id = module_params.get("auth_client_id") + refresh_token = module_params.get("refresh_token") + client_secret = module_params.get("auth_client_secret") temp_payload = { - 'grant_type': 'refresh_token', - 'client_id': client_id, - 'client_secret': client_secret, - 'refresh_token': refresh_token, + "grant_type": "refresh_token", + "client_id": client_id, + "client_secret": client_secret, + "refresh_token": refresh_token, } # Remove empty items, for instance missing client_secret payload = {k: v for k, v in temp_payload.items() if v is not None} @@ -247,7 +268,7 @@ def _request_token_using_refresh_token(module_params): def _request_token_using_client_credentials(module_params): - """ Obtains connection header with token for the authentication, + """Obtains connection header with token for the authentication, using the provided auth_client_id and auth_client_secret by grant_type client_credentials. Ensure that the used client uses client authorization with service account roles enabled and required service roles assigned. @@ -255,13 +276,13 @@ def _request_token_using_client_credentials(module_params): and 'auth_client_secret'.. :return: connection header """ - client_id = module_params.get('auth_client_id') - client_secret = module_params.get('auth_client_secret') + client_id = module_params.get("auth_client_id") + client_secret = module_params.get("auth_client_secret") temp_payload = { - 'grant_type': 'client_credentials', - 'client_id': client_id, - 'client_secret': client_secret, + "grant_type": "client_credentials", + "client_id": client_id, + "client_secret": client_secret, } # Remove empty items, for instance missing client_secret payload = {k: v for k, v in temp_payload.items() if v is not None} @@ -270,26 +291,23 @@ def _request_token_using_client_credentials(module_params): def get_token(module_params): - """ Obtains connection header with token for the authentication, + """Obtains connection header with token for the authentication, token already given or obtained from credentials :param module_params: parameters of the module :return: connection header """ - token = module_params.get('token') + token = module_params.get("token") if token is None: - auth_client_id = module_params.get('auth_client_id') - auth_client_secret = module_params.get('auth_client_secret') - auth_username = module_params.get('auth_username') + auth_client_id = module_params.get("auth_client_id") + auth_client_secret = module_params.get("auth_client_secret") + auth_username = module_params.get("auth_username") if auth_client_id is not None and auth_client_secret is not None and auth_username is None: token = _request_token_using_client_credentials(module_params) else: token = _request_token_using_credentials(module_params) - return { - 'Authorization': f"Bearer {token}", - 'Content-Type': 'application/json' - } + return {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} def is_struct_included(struct1, struct2, exclude=None): @@ -348,24 +366,24 @@ def is_struct_included(struct1, struct2, exclude=None): elif isinstance(struct1, bool) and isinstance(struct2, bool): return struct1 == struct2 else: - return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8') + return to_text(struct1, "utf-8") == to_text(struct2, "utf-8") class KeycloakAPI: - """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which - is obtained through OpenID connect + """Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which + is obtained through OpenID connect """ def __init__(self, module, connection_header): self.module = module - self.baseurl = self.module.params.get('auth_keycloak_url') - self.validate_certs = self.module.params.get('validate_certs') - self.connection_timeout = self.module.params.get('connection_timeout') + self.baseurl = self.module.params.get("auth_keycloak_url") + self.validate_certs = self.module.params.get("validate_certs") + self.connection_timeout = self.module.params.get("connection_timeout") self.restheaders = connection_header - self.http_agent = self.module.params.get('http_agent') + self.http_agent = self.module.params.get("http_agent") def _request(self, url, method, data=None): - """ Makes a request to Keycloak and returns the raw response. + """Makes a request to Keycloak and returns the raw response. If a 401 is returned, attempts to re-authenticate using first the module's refresh_token (if provided) and then the module's username/password (if provided). @@ -377,12 +395,18 @@ def _request(self, url, method, data=None): :param data: (optional) data for request :return: raw API response """ + def make_request_catching_401(): try: - return open_url(url, method=method, data=data, - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return open_url( + url, + method=method, + data=data, + http_agent=self.http_agent, + headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs, + ) except HTTPError as e: if e.code != 401: raise e @@ -392,11 +416,11 @@ def make_request_catching_401(): if isinstance(r, Exception): # Try to refresh token and retry, if available - refresh_token = self.module.params.get('refresh_token') + refresh_token = self.module.params.get("refresh_token") if refresh_token is not None: try: token = _request_token_using_refresh_token(self.module.params) - self.restheaders['Authorization'] = f"Bearer {token}" + self.restheaders["Authorization"] = f"Bearer {token}" r = make_request_catching_401() except KeycloakError as e: @@ -406,22 +430,22 @@ def make_request_catching_401(): if isinstance(r, Exception): # Try to re-auth with username/password, if available - auth_username = self.module.params.get('auth_username') - auth_password = self.module.params.get('auth_password') + auth_username = self.module.params.get("auth_username") + auth_password = self.module.params.get("auth_password") if auth_username is not None and auth_password is not None: token = _request_token_using_credentials(self.module.params) - self.restheaders['Authorization'] = f"Bearer {token}" + self.restheaders["Authorization"] = f"Bearer {token}" r = make_request_catching_401() if isinstance(r, Exception): # Try to re-auth with client_id and client_secret, if available - auth_client_id = self.module.params.get('auth_client_id') - auth_client_secret = self.module.params.get('auth_client_secret') + auth_client_id = self.module.params.get("auth_client_id") + auth_client_secret = self.module.params.get("auth_client_secret") if auth_client_id is not None and auth_client_secret is not None: try: token = _request_token_using_client_credentials(self.module.params) - self.restheaders['Authorization'] = f"Bearer {token}" + self.restheaders["Authorization"] = f"Bearer {token}" r = make_request_catching_401() except KeycloakError as e: @@ -436,7 +460,7 @@ def make_request_catching_401(): return r def _request_and_deserialize(self, url, method, data=None): - """ Wraps the _request method with JSON deserialization of the response. + """Wraps the _request method with JSON deserialization of the response. :param url: request path :param method: request method (e.g., 'GET', 'POST', etc.) @@ -445,8 +469,8 @@ def _request_and_deserialize(self, url, method, data=None): """ return json.loads(to_native(self._request(url, method, data).read())) - def get_realm_info_by_id(self, realm='master'): - """ Obtain realm public info by id + def get_realm_info_by_id(self, realm="master"): + """Obtain realm public info by id :param realm: realm id :return: dict of real, representation or None if none matching exist @@ -454,22 +478,22 @@ def get_realm_info_by_id(self, realm='master'): realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) try: - return self._request_and_deserialize(realm_info_url, method='GET') + return self._request_and_deserialize(realm_info_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain realm {realm}: {e}", + exception=traceback.format_exc(), + ) except Exception as e: - self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json(msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) - def get_realm_keys_metadata_by_id(self, realm='master'): + def get_realm_keys_metadata_by_id(self, realm="master"): """Obtain realm public info by id :param realm: realm id @@ -489,19 +513,19 @@ def get_realm_keys_metadata_by_id(self, realm='master'): if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain realm {realm}: {e}", + exception=traceback.format_exc(), + ) except Exception as e: - self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json(msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data. # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin - def get_realm_by_id(self, realm='master'): - """ Obtain realm representation by id + def get_realm_by_id(self, realm="master"): + """Obtain realm representation by id :param realm: realm id :return: dict of real, representation or None if none matching exist @@ -509,23 +533,23 @@ def get_realm_by_id(self, realm='master'): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return self._request_and_deserialize(realm_url, method='GET') + return self._request_and_deserialize(realm_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain realm {realm}: {e}", + exception=traceback.format_exc(), + ) except Exception as e: - self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', - exception=traceback.format_exc()) + self.module.fail_json(msg=f"Could not obtain realm {realm}: {e}", exception=traceback.format_exc()) def update_realm(self, realmrep, realm="master"): - """ Update an existing realm + """Update an existing realm :param realmrep: corresponding (partial/full) realm representation with updates :param realm: realm to be updated in Keycloak :return: HTTPResponse object on success @@ -533,26 +557,24 @@ def update_realm(self, realmrep, realm="master"): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return self._request(realm_url, method='PUT', data=json.dumps(realmrep)) + return self._request(realm_url, method="PUT", data=json.dumps(realmrep)) except Exception as e: - self.fail_request(e, msg=f'Could not update realm {realm}: {e}', - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not update realm {realm}: {e}", exception=traceback.format_exc()) def create_realm(self, realmrep): - """ Create a realm in keycloak + """Create a realm in keycloak :param realmrep: Realm representation of realm to be created. :return: HTTPResponse object on success """ realm_url = URL_REALMS.format(url=self.baseurl) try: - return self._request(realm_url, method='POST', data=json.dumps(realmrep)) + return self._request(realm_url, method="POST", data=json.dumps(realmrep)) except Exception as e: - self.fail_request(e, msg=f"Could not create realm {realmrep['id']}: {e}", - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not create realm {realmrep['id']}: {e}", exception=traceback.format_exc()) def delete_realm(self, realm="master"): - """ Delete a realm from Keycloak + """Delete a realm from Keycloak :param realm: realm to be deleted :return: HTTPResponse object on success @@ -560,13 +582,12 @@ def delete_realm(self, realm="master"): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return self._request(realm_url, method='DELETE') + return self._request(realm_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete realm {realm}: {e}', - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not delete realm {realm}: {e}", exception=traceback.format_exc()) - def get_clients(self, realm='master', filter=None): - """ Obtains client representations for clients in a realm + def get_clients(self, realm="master", filter=None): + """Obtains client representations for clients in a realm :param realm: realm to be queried :param filter: if defined, only the client with clientId specified in the filter is returned @@ -574,17 +595,19 @@ def get_clients(self, realm='master', filter=None): """ clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) if filter is not None: - clientlist_url += f'?clientId={filter}' + clientlist_url += f"?clientId={filter}" try: - return self._request_and_deserialize(clientlist_url, method='GET') + return self._request_and_deserialize(clientlist_url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of clients for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of clients for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of clients for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of clients for realm {realm}: {e}") - def get_client_by_clientid(self, client_id, realm='master'): - """ Get client representation by clientId + def get_client_by_clientid(self, client_id, realm="master"): + """Get client representation by clientId :param client_id: The clientId to be queried :param realm: realm from which to obtain the client representation :return: dict with a client representation or None if none matching exist @@ -595,8 +618,8 @@ def get_client_by_clientid(self, client_id, realm='master'): else: return None - def get_client_by_id(self, id, realm='master'): - """ Obtain client representation by id + def get_client_by_id(self, id, realm="master"): + """Obtain client representation by id :param id: id (not clientId) of client to be queried :param realm: client from this realm @@ -605,33 +628,35 @@ def get_client_by_id(self, id, realm='master'): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return self._request_and_deserialize(client_url, method='GET') + return self._request_and_deserialize(client_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain client {id} for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain client {id} for realm {realm}: {e}") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain client {id} for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain client {id} for realm {realm}: {e}" + ) except Exception as e: - self.module.fail_json(msg=f'Could not obtain client {id} for realm {realm}: {e}') + self.module.fail_json(msg=f"Could not obtain client {id} for realm {realm}: {e}") - def get_client_id(self, client_id, realm='master'): - """ Obtain id of client by client_id + def get_client_id(self, client_id, realm="master"): + """Obtain id of client by client_id :param client_id: client_id of client to be queried :param realm: client template from this realm :return: id of client (usually a UUID) """ result = self.get_client_by_clientid(client_id, realm) - if isinstance(result, dict) and 'id' in result: - return result['id'] + if isinstance(result, dict) and "id" in result: + return result["id"] else: return None def update_client(self, id, clientrep, realm="master"): - """ Update an existing client + """Update an existing client :param id: id (not clientId) of client to be updated in Keycloak :param clientrep: corresponding (partial/full) client representation with updates :param realm: realm the client is in @@ -640,12 +665,12 @@ def update_client(self, id, clientrep, realm="master"): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return self._request(client_url, method='PUT', data=json.dumps(clientrep)) + return self._request(client_url, method="PUT", data=json.dumps(clientrep)) except Exception as e: - self.fail_request(e, msg=f'Could not update client {id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not update client {id} in realm {realm}: {e}") def create_client(self, clientrep, realm="master"): - """ Create a client in keycloak + """Create a client in keycloak :param clientrep: Client representation of client to be created. Must at least contain field clientId. :param realm: realm for client to be created. :return: HTTPResponse object on success @@ -653,12 +678,12 @@ def create_client(self, clientrep, realm="master"): client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) try: - return self._request(client_url, method='POST', data=json.dumps(clientrep)) + return self._request(client_url, method="POST", data=json.dumps(clientrep)) except Exception as e: self.fail_request(e, msg=f"Could not create client {clientrep['clientId']} in realm {realm}: {e}") def delete_client(self, id, realm="master"): - """ Delete a client from Keycloak + """Delete a client from Keycloak :param id: id (not clientId) of client to be deleted :param realm: realm of client to be deleted @@ -667,12 +692,12 @@ def delete_client(self, id, realm="master"): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return self._request(client_url, method='DELETE') + return self._request(client_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete client {id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete client {id} in realm {realm}: {e}") def get_client_roles_by_id(self, cid, realm="master"): - """ Fetch the roles of the a client on the Keycloak server. + """Fetch the roles of the a client on the Keycloak server. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. @@ -685,7 +710,7 @@ def get_client_roles_by_id(self, cid, realm="master"): self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} in realm {realm}: {e}") def get_client_role_id_by_name(self, cid, name, realm="master"): - """ Get the role ID of a client. + """Get the role ID of a client. :param cid: ID of the client from which to obtain the rolemappings. :param name: Name of the role. @@ -694,12 +719,12 @@ def get_client_role_id_by_name(self, cid, name, realm="master"): """ rolemappings = self.get_client_roles_by_id(cid, realm=realm) for role in rolemappings: - if name == role['name']: - return role['id'] + if name == role["name"]: + return role["id"] return None - def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'): - """ Obtain client representation by id + def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm="master"): + """Obtain client representation by id :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -711,42 +736,52 @@ def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'): try: rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: - if rid == role['id']: + if rid == role["id"]: return role except Exception as e: - self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch rolemappings for client {cid} in group {gid}, realm {realm}: {e}" + ) return None def get_client_group_available_rolemappings(self, gid, cid, realm="master"): - """ Fetch the available role of a client in a specified group on the Keycloak server. + """Fetch the available role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format( + url=self.baseurl, realm=realm, id=gid, client=cid + ) try: return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}" + ) def get_client_group_composite_rolemappings(self, gid, cid, realm="master"): - """ Fetch the composite role of a client in a specified group on the Keycloak server. + """Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format( + url=self.baseurl, realm=realm, id=gid, client=cid + ) try: return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}" + ) def get_role_by_id(self, rid, realm="master"): - """ Fetch a role by its id on the Keycloak server. + """Fetch a role by its id on the Keycloak server. :param rid: ID of the role. :param realm: Realm from which to obtain the rolemappings. @@ -759,7 +794,7 @@ def get_role_by_id(self, rid, realm="master"): self.fail_request(e, msg=f"Could not fetch role for id {rid} in realm {realm}: {e}") def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"): - """ Fetch a role by its id on the Keycloak server. + """Fetch a role by its id on the Keycloak server. :param rid: ID of the composite role. :param cid: ID of the client from which to obtain the rolemappings. @@ -773,7 +808,7 @@ def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master" self.fail_request(e, msg=f"Could not fetch role for id {rid} and cid {cid} in realm {realm}: {e}") def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"): - """ Assign roles to composite role + """Assign roles to composite role :param rid: ID of the composite role. :param roles_rep: Representation of the roles to assign. @@ -787,7 +822,7 @@ def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="ma self.fail_request(e, msg=f"Could not assign roles to composite role {rid} and realm {realm}: {e}") def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): - """ Add the specified realm role to specified group on the Keycloak server. + """Add the specified realm role to specified group on the Keycloak server. :param gid: ID of the group to add the role mapping. :param role_rep: Representation of the role to assign. @@ -801,7 +836,7 @@ def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): self.fail_request(e, msg=f"Could add realm role mappings for group {gid}, realm {realm}: {e}") def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): - """ Delete the specified realm role from the specified group on the Keycloak server. + """Delete the specified realm role from the specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param role_rep: Representation of the role to assign. @@ -815,7 +850,7 @@ def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): self.fail_request(e, msg=f"Could not delete realm role mappings for group {gid}, realm {realm}: {e}") def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Fetch the composite role of a client in a specified group on the Keycloak server. + """Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -823,14 +858,18 @@ def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format( + url=self.baseurl, realm=realm, id=gid, client=cid + ) try: self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}" + ) def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Delete the rolemapping of a client in a specified group on the Keycloak server. + """Delete the rolemapping of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -838,14 +877,18 @@ def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format( + url=self.baseurl, realm=realm, id=gid, client=cid + ) try: self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not delete available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not delete available rolemappings for client {cid} in group {gid}, realm {realm}: {e}" + ) - def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): - """ Obtain client representation by id + def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm="master"): + """Obtain client representation by id :param uid: ID of the user from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -857,42 +900,50 @@ def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): try: rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: - if rid == role['id']: + if rid == role["id"]: return role except Exception as e: - self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} and user {uid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch rolemappings for client {cid} and user {uid}, realm {realm}: {e}" + ) return None def get_client_user_available_rolemappings(self, uid, cid, realm="master"): - """ Fetch the available role of a client for a specified user on the Keycloak server. + """Fetch the available role of a client for a specified user on the Keycloak server. :param uid: ID of the user from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The effective rollemappings of specified client and user of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format( + url=self.baseurl, realm=realm, id=uid, client=cid + ) try: return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.fail_request(e, msg=f"Could not fetch effective rolemappings for client {cid} and user {uid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not fetch effective rolemappings for client {cid} and user {uid}, realm {realm}: {e}" + ) def get_client_user_composite_rolemappings(self, uid, cid, realm="master"): - """ Fetch the composite role of a client for a specified user on the Keycloak server. + """Fetch the composite role of a client for a specified user on the Keycloak server. :param uid: ID of the user from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format( + url=self.baseurl, realm=realm, id=uid, client=cid + ) try: return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: self.fail_request(e, msg=f"Could not fetch available rolemappings for user {uid} of realm {realm}: {e}") - def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): - """ Obtain role representation by id + def get_realm_user_rolemapping_by_id(self, uid, rid, realm="master"): + """Obtain role representation by id :param uid: ID of the user from which to obtain the rolemappings. :param rid: ID of the role. @@ -903,14 +954,14 @@ def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): try: rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: - if rid == role['id']: + if rid == role["id"]: return role except Exception as e: self.fail_request(e, msg=f"Could not fetch rolemappings for user {uid}, realm {realm}: {e}") return None def get_realm_user_available_rolemappings(self, uid, realm="master"): - """ Fetch the available role of a realm for a specified user on the Keycloak server. + """Fetch the available role of a realm for a specified user on the Keycloak server. :param uid: ID of the user from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. @@ -923,7 +974,7 @@ def get_realm_user_available_rolemappings(self, uid, realm="master"): self.fail_request(e, msg=f"Could not fetch available rolemappings for user {uid} of realm {realm}: {e}") def get_realm_user_composite_rolemappings(self, uid, realm="master"): - """ Fetch the composite role of a realm for a specified user on the Keycloak server. + """Fetch the composite role of a realm for a specified user on the Keycloak server. :param uid: ID of the user from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. @@ -936,30 +987,32 @@ def get_realm_user_composite_rolemappings(self, uid, realm="master"): self.fail_request(e, msg=f"Could not fetch effective rolemappings for user {uid}, realm {realm}: {e}") def get_user_by_username(self, username, realm="master"): - """ Fetch a keycloak user within a realm based on its username. + """Fetch a keycloak user within a realm based on its username. If the user does not exist, None is returned. :param username: Username of the user to fetch. :param realm: Realm in which the user resides; default 'master' """ users_url = URL_USERS.format(url=self.baseurl, realm=realm) - users_url += f'?username={username}&exact=true' + users_url += f"?username={username}&exact=true" try: userrep = None - users = self._request_and_deserialize(users_url, method='GET') + users = self._request_and_deserialize(users_url, method="GET") for user in users: - if user['username'] == username: + if user["username"] == username: userrep = user break return userrep except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain the user for realm {realm} and username {username}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain the user for realm {realm} and username {username}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain the user for realm {realm} and username {username}: {e}') + self.fail_request(e, msg=f"Could not obtain the user for realm {realm} and username {username}: {e}") def get_service_account_user_by_client_id(self, client_id, realm="master"): - """ Fetch a keycloak service account user within a realm based on its client_id. + """Fetch a keycloak service account user within a realm based on its client_id. If the user does not exist, None is returned. :param client_id: clientId of the service account user to fetch. @@ -969,16 +1022,18 @@ def get_service_account_user_by_client_id(self, client_id, realm="master"): service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid) try: - return self._request_and_deserialize(service_account_user_url, method='GET') + return self._request_and_deserialize(service_account_user_url, method="GET") except ValueError as e: self.module.fail_json( - msg=f'API returned incorrect JSON when trying to obtain the service-account-user for realm {realm} and client_id {client_id}: {e}' + msg=f"API returned incorrect JSON when trying to obtain the service-account-user for realm {realm} and client_id {client_id}: {e}" ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain the service-account-user for realm {realm} and client_id {client_id}: {e}') + self.fail_request( + e, msg=f"Could not obtain the service-account-user for realm {realm} and client_id {client_id}: {e}" + ) def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): - """ Assign a realm or client role to a specified user on the Keycloak server. + """Assign a realm or client role to a specified user on the Keycloak server. :param uid: ID of the user roles are assigned to. :param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm @@ -991,16 +1046,24 @@ def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): try: self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not map roles to userId {uid} for realm {realm} and roles {json.dumps(role_rep)}: {e}") + self.fail_request( + e, + msg=f"Could not map roles to userId {uid} for realm {realm} and roles {json.dumps(role_rep)}: {e}", + ) else: - user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format( + url=self.baseurl, realm=realm, id=uid, client=cid + ) try: self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not map roles to userId {cid} for client {uid}, realm {realm} and roles {json.dumps(role_rep)}: {e}") + self.fail_request( + e, + msg=f"Could not map roles to userId {cid} for client {uid}, realm {realm} and roles {json.dumps(role_rep)}: {e}", + ) def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): - """ Delete the rolemapping of a client in a specified user on the Keycloak server. + """Delete the rolemapping of a client in a specified user on the Keycloak server. :param uid: ID of the user from which to remove the rolemappings. :param cid: ID of the client from which to remove the rolemappings. @@ -1013,16 +1076,23 @@ def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): try: self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not remove roles {json.dumps(role_rep)} from userId {uid}, realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not remove roles {json.dumps(role_rep)} from userId {uid}, realm {realm}: {e}" + ) else: - user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format( + url=self.baseurl, realm=realm, id=uid, client=cid + ) try: self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.fail_request(e, msg=f"Could not remove roles {json.dumps(role_rep)} for client {cid} from userId {uid}, realm {realm}: {e}") + self.fail_request( + e, + msg=f"Could not remove roles {json.dumps(role_rep)} for client {cid} from userId {uid}, realm {realm}: {e}", + ) - def get_client_templates(self, realm='master'): - """ Obtains client template representations for client templates in a realm + def get_client_templates(self, realm="master"): + """Obtains client template representations for client templates in a realm :param realm: realm to be queried :return: list of dicts of client representations @@ -1030,14 +1100,16 @@ def get_client_templates(self, realm='master'): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return self._request_and_deserialize(url, method='GET') + return self._request_and_deserialize(url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of client templates for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of client templates for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of client templates for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of client templates for realm {realm}: {e}") - def get_client_template_by_id(self, id, realm='master'): - """ Obtain client template representation by id + def get_client_template_by_id(self, id, realm="master"): + """Obtain client template representation by id :param id: id (not name) of client template to be queried :param realm: client template from this realm @@ -1046,14 +1118,16 @@ def get_client_template_by_id(self, id, realm='master'): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) try: - return self._request_and_deserialize(url, method='GET') + return self._request_and_deserialize(url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain client templates {id} for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain client templates {id} for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain client template {id} for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain client template {id} for realm {realm}: {e}") - def get_client_template_by_name(self, name, realm='master'): - """ Obtain client template representation by name + def get_client_template_by_name(self, name, realm="master"): + """Obtain client template representation by name :param name: name of client template to be queried :param realm: client template from this realm @@ -1061,26 +1135,26 @@ def get_client_template_by_name(self, name, realm='master'): """ result = self.get_client_templates(realm) if isinstance(result, list): - result = [x for x in result if x['name'] == name] + result = [x for x in result if x["name"] == name] if len(result) > 0: return result[0] return None - def get_client_template_id(self, name, realm='master'): - """ Obtain client template id by name + def get_client_template_id(self, name, realm="master"): + """Obtain client template id by name :param name: name of client template to be queried :param realm: client template from this realm :return: client template id (usually a UUID) """ result = self.get_client_template_by_name(name, realm) - if isinstance(result, dict) and 'id' in result: - return result['id'] + if isinstance(result, dict) and "id" in result: + return result["id"] else: return None def update_client_template(self, id, clienttrep, realm="master"): - """ Update an existing client template + """Update an existing client template :param id: id (not name) of client template to be updated in Keycloak :param clienttrep: corresponding (partial/full) client template representation with updates :param realm: realm the client template is in @@ -1089,12 +1163,12 @@ def update_client_template(self, id, clienttrep, realm="master"): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return self._request(url, method='PUT', data=json.dumps(clienttrep)) + return self._request(url, method="PUT", data=json.dumps(clienttrep)) except Exception as e: - self.fail_request(e, msg=f'Could not update client template {id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not update client template {id} in realm {realm}: {e}") def create_client_template(self, clienttrep, realm="master"): - """ Create a client in keycloak + """Create a client in keycloak :param clienttrep: Client template representation of client template to be created. Must at least contain field name :param realm: realm for client template to be created in :return: HTTPResponse object on success @@ -1102,12 +1176,12 @@ def create_client_template(self, clienttrep, realm="master"): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return self._request(url, method='POST', data=json.dumps(clienttrep)) + return self._request(url, method="POST", data=json.dumps(clienttrep)) except Exception as e: self.fail_request(e, msg=f"Could not create client template {clienttrep['clientId']} in realm {realm}: {e}") def delete_client_template(self, id, realm="master"): - """ Delete a client template from Keycloak + """Delete a client template from Keycloak :param id: id (not name) of client to be deleted :param realm: realm of client template to be deleted @@ -1116,12 +1190,12 @@ def delete_client_template(self, id, realm="master"): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return self._request(url, method='DELETE') + return self._request(url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete client template {id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete client template {id} in realm {realm}: {e}") def get_clientscopes(self, realm="master"): - """ Fetch the name and ID of all clientscopes on the Keycloak server. + """Fetch the name and ID of all clientscopes on the Keycloak server. To fetch the full data of the group, make a subsequent call to get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. @@ -1136,7 +1210,7 @@ def get_clientscopes(self, realm="master"): self.fail_request(e, msg=f"Could not fetch list of clientscopes in realm {realm}: {e}") def get_clientscope_by_clientscopeid(self, cid, realm="master"): - """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + """Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. If the clientscope does not exist, None is returned. @@ -1157,7 +1231,7 @@ def get_clientscope_by_clientscopeid(self, cid, realm="master"): self.module.fail_json(msg=f"Could not clientscope group {cid} in realm {realm}: {e}") def get_clientscope_by_name(self, name, realm="master"): - """ Fetch a keycloak clientscope within a realm based on its name. + """Fetch a keycloak clientscope within a realm based on its name. The Keycloak API does not allow filtering of the clientscopes resource by name. As a result, this method first retrieves the entire list of clientscopes - name and ID - @@ -1171,8 +1245,8 @@ def get_clientscope_by_name(self, name, realm="master"): all_clientscopes = self.get_clientscopes(realm=realm) for clientscope in all_clientscopes: - if clientscope['name'] == name: - return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm) + if clientscope["name"] == name: + return self.get_clientscope_by_clientscopeid(clientscope["id"], realm=realm) return None @@ -1180,33 +1254,33 @@ def get_clientscope_by_name(self, name, realm="master"): self.module.fail_json(msg=f"Could not fetch clientscope {name} in realm {realm}: {e}") def create_clientscope(self, clientscoperep, realm="master"): - """ Create a Keycloak clientscope. + """Create a Keycloak clientscope. :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name. :return: HTTPResponse object on success """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep)) + return self._request(clientscopes_url, method="POST", data=json.dumps(clientscoperep)) except Exception as e: self.fail_request(e, msg=f"Could not create clientscope {clientscoperep['name']} in realm {realm}: {e}") def update_clientscope(self, clientscoperep, realm="master"): - """ Update an existing clientscope. + """Update an existing clientscope. :param grouprep: A GroupRepresentation of the updated group. :return HTTPResponse object on success """ - clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) + clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep["id"]) try: - return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep)) + return self._request(clientscope_url, method="PUT", data=json.dumps(clientscoperep)) except Exception as e: self.fail_request(e, msg=f"Could not update clientscope {clientscoperep['name']} in realm {realm}: {e}") def delete_clientscope(self, name=None, cid=None, realm="master"): - """ Delete a clientscope. One of name or cid must be provided. + """Delete a clientscope. One of name or cid must be provided. Providing the clientscope ID is preferred as it avoids a second lookup to convert a clientscope name to an ID. @@ -1225,8 +1299,8 @@ def delete_clientscope(self, name=None, cid=None, realm="master"): # less lookup. if cid is None and name is not None: for clientscope in self.get_clientscopes(realm=realm): - if clientscope['name'] == name: - cid = clientscope['id'] + if clientscope["name"] == name: + cid = clientscope["id"] break # if the group doesn't exist - no problem, nothing to delete. @@ -1236,13 +1310,13 @@ def delete_clientscope(self, name=None, cid=None, realm="master"): # should have a good cid by here. clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) try: - return self._request(clientscope_url, method='DELETE') + return self._request(clientscope_url, method="DELETE") except Exception as e: self.fail_request(e, msg=f"Unable to delete clientscope {cid}: {e}") def get_clientscope_protocolmappers(self, cid, realm="master"): - """ Fetch the name and ID of all clientscopes on the Keycloak server. + """Fetch the name and ID of all clientscopes on the Keycloak server. To fetch the full data of the group, make a subsequent call to get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. @@ -1258,7 +1332,7 @@ def get_clientscope_protocolmappers(self, cid, realm="master"): self.fail_request(e, msg=f"Could not fetch list of protocolmappers in realm {realm}: {e}") def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): - """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + """Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. If the clientscope does not exist, None is returned. @@ -1281,7 +1355,7 @@ def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="ma self.module.fail_json(msg=f"Could not fetch protocolmapper {cid} in realm {realm}: {e}") def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): - """ Fetch a keycloak clientscope within a realm based on its name. + """Fetch a keycloak clientscope within a realm based on its name. The Keycloak API does not allow filtering of the clientscopes resource by name. As a result, this method first retrieves the entire list of clientscopes - name and ID - @@ -1296,8 +1370,10 @@ def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm) for protocolmapper in all_protocolmappers: - if protocolmapper['name'] == name: - return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm) + if protocolmapper["name"] == name: + return self.get_clientscope_protocolmapper_by_protocolmapperid( + protocolmapper["id"], cid, realm=realm + ) return None @@ -1305,7 +1381,7 @@ def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): self.module.fail_json(msg=f"Could not fetch protocolmapper {name} in realm {realm}: {e}") def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): - """ Create a Keycloak clientscope protocolmapper. + """Create a Keycloak clientscope protocolmapper. :param cid: Id of the clientscope. :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name. @@ -1313,24 +1389,28 @@ def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) try: - return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep)) + return self._request(protocolmappers_url, method="POST", data=json.dumps(mapper_rep)) except Exception as e: self.fail_request(e, msg=f"Could not create protocolmapper {mapper_rep['name']} in realm {realm}: {e}") def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): - """ Update an existing clientscope. + """Update an existing clientscope. :param cid: Id of the clientscope. :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper. :return HTTPResponse object on success """ - protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) + protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format( + url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep["id"] + ) try: - return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep)) + return self._request(protocolmapper_url, method="PUT", data=json.dumps(mapper_rep)) except Exception as e: - self.fail_request(e, msg=f'Could not update protocolmappers for clientscope {mapper_rep} in realm {realm}: {e}') + self.fail_request( + e, msg=f"Could not update protocolmappers for clientscope {mapper_rep} in realm {realm}: {e}" + ) def get_default_clientscopes(self, realm, client_id=None): """Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1343,7 +1423,7 @@ def get_default_clientscopes(self, realm, client_id=None): :return The default clientscopes of this realm or client """ url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES - return self._get_clientscopes_of_type(realm, url, 'default', client_id) + return self._get_clientscopes_of_type(realm, url, "default", client_id) def get_optional_clientscopes(self, realm, client_id=None): """Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1356,7 +1436,7 @@ def get_optional_clientscopes(self, realm, client_id=None): :return The optional clientscopes of this realm or client """ url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES - return self._get_clientscopes_of_type(realm, url, 'optional', client_id) + return self._get_clientscopes_of_type(realm, url, "optional", client_id) def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None): """Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1382,7 +1462,10 @@ def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=N try: return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.fail_request(e, msg=f"Could not fetch list of {scope_type} clientscopes in client {client_id}: {clientscopes_url}") + self.fail_request( + e, + msg=f"Could not fetch list of {scope_type} clientscopes in client {client_id}: {clientscopes_url}", + ) def _decide_url_type_clientscope(self, client_id=None, scope_type="default"): """Decides which url to use. @@ -1407,7 +1490,7 @@ def add_default_clientscope(self, id, realm="master", client_id=None): :param realm: Realm in which the clientscope resides. :param client_id: The client in which the clientscope resides. """ - self._action_type_clientscope(id, client_id, "default", realm, 'add') + self._action_type_clientscope(id, client_id, "default", realm, "add") def add_optional_clientscope(self, id, realm="master", client_id=None): """Add a client scope as optional either on realm or client level. @@ -1416,7 +1499,7 @@ def add_optional_clientscope(self, id, realm="master", client_id=None): :param realm: Realm in which the clientscope resides. :param client_id: The client in which the clientscope resides. """ - self._action_type_clientscope(id, client_id, "optional", realm, 'add') + self._action_type_clientscope(id, client_id, "optional", realm, "add") def delete_default_clientscope(self, id, realm="master", client_id=None): """Remove a client scope as default either on realm or client level. @@ -1425,7 +1508,7 @@ def delete_default_clientscope(self, id, realm="master", client_id=None): :param realm: Realm in which the clientscope resides. :param client_id: The client in which the clientscope resides. """ - self._action_type_clientscope(id, client_id, "default", realm, 'delete') + self._action_type_clientscope(id, client_id, "default", realm, "delete") def delete_optional_clientscope(self, id, realm="master", client_id=None): """Remove a client scope as optional either on realm or client level. @@ -1434,10 +1517,10 @@ def delete_optional_clientscope(self, id, realm="master", client_id=None): :param realm: Realm in which the clientscope resides. :param client_id: The client in which the clientscope resides. """ - self._action_type_clientscope(id, client_id, "optional", realm, 'delete') + self._action_type_clientscope(id, client_id, "optional", realm, "delete") - def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'): - """ Delete or add a clientscope of type. + def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action="add"): + """Delete or add a clientscope of type. :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. :param client_id: The ID of the clientscope (preferred to name). :param scope_type 'default' or 'optional' @@ -1445,17 +1528,19 @@ def _action_type_clientscope(self, id=None, client_id=None, scope_type="default" """ cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm) # should have a good cid by here. - clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl) + clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format( + realm=realm, id=id, cid=cid, url=self.baseurl + ) try: - method = 'PUT' if action == "add" else 'DELETE' + method = "PUT" if action == "add" else "DELETE" return self._request(clientscope_type_url, method=method) except Exception as e: - place = 'realm' if client_id is None else f"client {client_id}" + place = "realm" if client_id is None else f"client {client_id}" self.fail_request(e, msg=f"Unable to {action} {scope_type} clientscope {id} @ {place} : {e}") def create_clientsecret(self, id, realm="master"): - """ Generate a new client secret by id + """Generate a new client secret by id :param id: id (not clientId) of client to be queried :param realm: client from this realm @@ -1464,18 +1549,18 @@ def create_clientsecret(self, id, realm="master"): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return self._request_and_deserialize(clientsecret_url, method='POST') + return self._request_and_deserialize(clientsecret_url, method="POST") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain clientsecret of client {id} for realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + self.module.fail_json(msg=f"Could not obtain clientsecret of client {id} for realm {realm}: {e}") def get_clientsecret(self, id, realm="master"): - """ Obtain client secret by id + """Obtain client secret by id :param id: id (not clientId) of client to be queried :param realm: client from this realm @@ -1484,18 +1569,18 @@ def get_clientsecret(self, id, realm="master"): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return self._request_and_deserialize(clientsecret_url, method='GET') + return self._request_and_deserialize(clientsecret_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain clientsecret of client {id} for realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + self.module.fail_json(msg=f"Could not obtain clientsecret of client {id} for realm {realm}: {e}") def get_groups(self, realm="master"): - """ Fetch the name and ID of all groups on the Keycloak server. + """Fetch the name and ID of all groups on the Keycloak server. To fetch the full data of the group, make a subsequent call to get_group_by_groupid, passing in the ID of the group you wish to return. @@ -1509,7 +1594,7 @@ def get_groups(self, realm="master"): self.fail_request(e, msg=f"Could not fetch list of groups in realm {realm}: {e}") def get_group_by_groupid(self, gid, realm="master"): - """ Fetch a keycloak group from the provided realm using the group's unique ID. + """Fetch a keycloak group from the provided realm using the group's unique ID. If the group does not exist, None is returned. @@ -1529,22 +1614,22 @@ def get_group_by_groupid(self, gid, realm="master"): self.module.fail_json(msg=f"Could not fetch group {gid} in realm {realm}: {e}") def get_subgroups(self, parent, realm="master"): - if 'subGroupCount' in parent: + if "subGroupCount" in parent: # Since version 23, when GETting a group Keycloak does not # return subGroups but only a subGroupCount. # Children must be fetched in a second request. - if parent['subGroupCount'] == 0: + if parent["subGroupCount"] == 0: group_children = [] else: group_children_url = f"{URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id'])}?max={parent['subGroupCount']}" group_children = self._request_and_deserialize(group_children_url, method="GET") subgroups = group_children else: - subgroups = parent['subGroups'] + subgroups = parent["subGroups"] return subgroups def get_group_by_name(self, name, realm="master", parents=None): - """ Fetch a keycloak group within a realm based on its name. + """Fetch a keycloak group within a realm based on its name. The Keycloak API does not allow filtering of the Groups resource by name. As a result, this method first retrieves the entire list of groups - name and ID - @@ -1567,8 +1652,8 @@ def get_group_by_name(self, name, realm="master", parents=None): all_groups = self.get_groups(realm=realm) for group in all_groups: - if group['name'] == name: - return self.get_group_by_groupid(group['id'], realm=realm) + if group["name"] == name: + return self.get_group_by_groupid(group["id"], realm=realm) return None @@ -1576,17 +1661,17 @@ def get_group_by_name(self, name, realm="master", parents=None): self.module.fail_json(msg=f"Could not fetch group {name} in realm {realm}: {e}") def _get_normed_group_parent(self, parent): - """ Converts parent dict information into a more easy to use form. + """Converts parent dict information into a more easy to use form. :param parent: parent describing dict """ - if parent['id']: - return (parent['id'], True) + if parent["id"]: + return (parent["id"], True) - return (parent['name'], False) + return (parent["name"], False) def get_subgroup_by_chain(self, name_chain, realm="master"): - """ Access a subgroup API object by walking down a given name/id chain. + """Access a subgroup API object by walking down a given name/id chain. Groups can be given either as by name or by ID, the first element must either be a toplvl group or given as ID, all parents must exist. @@ -1628,7 +1713,7 @@ def get_subgroup_by_chain(self, name_chain, realm="master"): return tmp def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None): - """ Get keycloak direct parent group API object for a given chain of parents. + """Get keycloak direct parent group API object for a given chain of parents. To successfully work the API for subgroups we actually don't need to "walk the whole tree" for nested groups but only need to know @@ -1666,25 +1751,22 @@ def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolv # current parent is given as name, it must be resolved # later, try next parent (recurse) children_to_resolve.append(cp) - return self.get_subgroup_direct_parent( - parents[1:], - realm=realm, children_to_resolve=children_to_resolve - ) + return self.get_subgroup_direct_parent(parents[1:], realm=realm, children_to_resolve=children_to_resolve) def create_group(self, grouprep, realm="master"): - """ Create a Keycloak group. + """Create a Keycloak group. :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. :return: HTTPResponse object on success """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return self._request(groups_url, method='POST', data=json.dumps(grouprep)) + return self._request(groups_url, method="POST", data=json.dumps(grouprep)) except Exception as e: self.fail_request(e, msg=f"Could not create group {grouprep['name']} in realm {realm}: {e}") def create_subgroup(self, parents, grouprep, realm="master"): - """ Create a Keycloak subgroup. + """Create a Keycloak subgroup. :param parents: list of one or more parent groups :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. @@ -1705,25 +1787,28 @@ def create_subgroup(self, parents, grouprep, realm="master"): parent_id = parent_id["id"] url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id) - return self._request(url, method='POST', data=json.dumps(grouprep)) + return self._request(url, method="POST", data=json.dumps(grouprep)) except Exception as e: - self.fail_request(e, msg=f"Could not create subgroup {grouprep['name']} for parent group {parent_id} in realm {realm}: {e}") + self.fail_request( + e, + msg=f"Could not create subgroup {grouprep['name']} for parent group {parent_id} in realm {realm}: {e}", + ) def update_group(self, grouprep, realm="master"): - """ Update an existing group. + """Update an existing group. :param grouprep: A GroupRepresentation of the updated group. :return HTTPResponse object on success """ - group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) + group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep["id"]) try: - return self._request(group_url, method='PUT', data=json.dumps(grouprep)) + return self._request(group_url, method="PUT", data=json.dumps(grouprep)) except Exception as e: self.fail_request(e, msg=f"Could not update group {grouprep['name']} in realm {realm}: {e}") def delete_group(self, name=None, groupid=None, realm="master"): - """ Delete a group. One of name or groupid must be provided. + """Delete a group. One of name or groupid must be provided. Providing the group ID is preferred as it avoids a second lookup to convert a group name to an ID. @@ -1742,8 +1827,8 @@ def delete_group(self, name=None, groupid=None, realm="master"): # less lookup. if groupid is None and name is not None: for group in self.get_groups(realm=realm): - if group['name'] == name: - groupid = group['id'] + if group["name"] == name: + groupid = group["id"] break # if the group doesn't exist - no problem, nothing to delete. @@ -1753,44 +1838,46 @@ def delete_group(self, name=None, groupid=None, realm="master"): # should have a good groupid by here. group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) try: - return self._request(group_url, method='DELETE') + return self._request(group_url, method="DELETE") except Exception as e: self.fail_request(e, msg=f"Unable to delete group {groupid}: {e}") - def get_realm_roles(self, realm='master'): - """ Obtains role representations for roles in a realm + def get_realm_roles(self, realm="master"): + """Obtains role representations for roles in a realm :param realm: realm to be queried :return: list of dicts of role representations """ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return self._request_and_deserialize(rolelist_url, method='GET') + return self._request_and_deserialize(rolelist_url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of roles for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of roles for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of roles for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of roles for realm {realm}: {e}") - def get_realm_role(self, name, realm='master'): - """ Fetch a keycloak role from the provided realm using the role's name. + def get_realm_role(self, name, realm="master"): + """Fetch a keycloak role from the provided realm using the role's name. If the role does not exist, None is returned. :param name: Name of the role to fetch. :param realm: Realm in which the role resides; default 'master'. """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe="")) try: return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not fetch role {name} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch role {name} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not fetch role {name} in realm {realm}: {e}') + self.module.fail_json(msg=f"Could not fetch role {name} in realm {realm}: {e}") - def create_realm_role(self, rolerep, realm='master'): - """ Create a Keycloak realm role. + def create_realm_role(self, rolerep, realm="master"): + """Create a Keycloak realm role. :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. :return: HTTPResponse object on success @@ -1800,74 +1887,86 @@ def create_realm_role(self, rolerep, realm='master'): if "composites" in rolerep: keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) rolerep["composites"] = keycloak_compatible_composites - return self._request(roles_url, method='POST', data=json.dumps(rolerep)) + return self._request(roles_url, method="POST", data=json.dumps(rolerep)) except Exception as e: self.fail_request(e, msg=f"Could not create role {rolerep['name']} in realm {realm}: {e}") - def update_realm_role(self, rolerep, realm='master'): - """ Update an existing realm role. + def update_realm_role(self, rolerep, realm="master"): + """Update an existing realm role. :param rolerep: A RoleRepresentation of the updated role. :return HTTPResponse object on success """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']), safe='') + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"]), safe="") try: composites = None if "composites" in rolerep: composites = copy.deepcopy(rolerep["composites"]) del rolerep["composites"] - role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + role_response = self._request(role_url, method="PUT", data=json.dumps(rolerep)) if composites is not None: self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm) return role_response except Exception as e: self.fail_request(e, msg=f"Could not update role {rolerep['name']} in realm {realm}: {e}") - def get_role_composites(self, rolerep, clientid=None, realm='master'): - composite_url = '' + def get_role_composites(self, rolerep, clientid=None, realm="master"): + composite_url = "" try: if clientid is not None: client = self.get_client_by_clientid(client_id=clientid, realm=realm) - cid = client['id'] - composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + cid = client["id"] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe="") + ) else: - composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + composite_url = URL_REALM_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe="") + ) # Get existing composites - return self._request_and_deserialize(composite_url, method='GET') + return self._request_and_deserialize(composite_url, method="GET") except Exception as e: self.fail_request(e, msg=f"Could not get role {rolerep['name']} composites in realm {realm}: {e}") - def create_role_composites(self, rolerep, composites, clientid=None, realm='master'): - composite_url = '' + def create_role_composites(self, rolerep, composites, clientid=None, realm="master"): + composite_url = "" try: if clientid is not None: client = self.get_client_by_clientid(client_id=clientid, realm=realm) - cid = client['id'] - composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + cid = client["id"] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe="") + ) else: - composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + composite_url = URL_REALM_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe="") + ) # Get existing composites # create new composites - return self._request(composite_url, method='POST', data=json.dumps(composites)) + return self._request(composite_url, method="POST", data=json.dumps(composites)) except Exception as e: self.fail_request(e, msg=f"Could not create role {rolerep['name']} composites in realm {realm}: {e}") - def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'): - composite_url = '' + def delete_role_composites(self, rolerep, composites, clientid=None, realm="master"): + composite_url = "" try: if clientid is not None: client = self.get_client_by_clientid(client_id=clientid, realm=realm) - cid = client['id'] - composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + cid = client["id"] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe="") + ) else: - composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + composite_url = URL_REALM_ROLE_COMPOSITES.format( + url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe="") + ) # Get existing composites # create new composites - return self._request(composite_url, method='DELETE', data=json.dumps(composites)) + return self._request(composite_url, method="DELETE", data=json.dumps(composites)) except Exception as e: self.fail_request(e, msg=f"Could not create role {rolerep['name']} composites in realm {realm}: {e}") - def update_role_composites(self, rolerep, composites, clientid=None, realm='master'): + def update_role_composites(self, rolerep, composites, clientid=None, realm="master"): # Get existing composites existing_composites = self.get_role_composites(rolerep=rolerep, clientid=clientid, realm=realm) composites_to_be_created = [] @@ -1878,32 +1977,35 @@ def update_role_composites(self, rolerep, composites, clientid=None, realm='mast for existing_composite in existing_composites: if existing_composite["clientRole"]: existing_composite_client = self.get_client_by_id(existing_composite["containerId"], realm=realm) - if ("client_id" in composite - and composite['client_id'] is not None - and existing_composite_client["clientId"] == composite["client_id"] - and composite["name"] == existing_composite["name"]): + if ( + "client_id" in composite + and composite["client_id"] is not None + and existing_composite_client["clientId"] == composite["client_id"] + and composite["name"] == existing_composite["name"] + ): composite_found = True break else: - if (("client_id" not in composite or composite['client_id'] is None) - and composite["name"] == existing_composite["name"]): + if ("client_id" not in composite or composite["client_id"] is None) and composite[ + "name" + ] == existing_composite["name"]: composite_found = True break - if not composite_found and ('state' not in composite or composite['state'] == 'present'): - if "client_id" in composite and composite['client_id'] is not None: - client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + if not composite_found and ("state" not in composite or composite["state"] == "present"): + if "client_id" in composite and composite["client_id"] is not None: + client_roles = self.get_client_roles(clientid=composite["client_id"], realm=realm) for client_role in client_roles: - if client_role['name'] == composite['name']: + if client_role["name"] == composite["name"]: composites_to_be_created.append(client_role) break else: realm_role = self.get_realm_role(name=composite["name"], realm=realm) composites_to_be_created.append(realm_role) - elif composite_found and 'state' in composite and composite['state'] == 'absent': - if "client_id" in composite and composite['client_id'] is not None: - client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + elif composite_found and "state" in composite and composite["state"] == "absent": + if "client_id" in composite and composite["client_id"] is not None: + client_roles = self.get_client_roles(clientid=composite["client_id"], realm=realm) for client_role in client_roles: - if client_role['name'] == composite['name']: + if client_role["name"] == composite["name"]: composites_to_be_deleted.append(client_role) break else: @@ -1912,25 +2014,29 @@ def update_role_composites(self, rolerep, composites, clientid=None, realm='mast if len(composites_to_be_created) > 0: # create new composites - self.create_role_composites(rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm) + self.create_role_composites( + rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm + ) if len(composites_to_be_deleted) > 0: # delete new composites - self.delete_role_composites(rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm) + self.delete_role_composites( + rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm + ) - def delete_realm_role(self, name, realm='master'): - """ Delete a realm role. + def delete_realm_role(self, name, realm="master"): + """Delete a realm role. :param name: The name of the role. :param realm: The realm in which this role resides, default "master". """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe="")) try: - return self._request(role_url, method='DELETE') + return self._request(role_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Unable to delete role {name} in realm {realm}: {e}') + self.fail_request(e, msg=f"Unable to delete role {name} in realm {realm}: {e}") - def get_client_roles(self, clientid, realm='master'): - """ Obtains role representations for client roles in a specific client + def get_client_roles(self, clientid, realm="master"): + """Obtains role representations for client roles in a specific client :param clientid: Client id to be queried :param realm: Realm to be queried @@ -1938,17 +2044,19 @@ def get_client_roles(self, clientid, realm='master'): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') + self.module.fail_json(msg=f"Could not find client {clientid} in realm {realm}") rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return self._request_and_deserialize(rolelist_url, method='GET') + return self._request_and_deserialize(rolelist_url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of roles for client {clientid} in realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of roles for client {clientid} in realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of roles for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of roles for client {clientid} in realm {realm}: {e}") - def get_client_role(self, name, clientid, realm='master'): - """ Fetch a keycloak client role from the provided realm using the role's name. + def get_client_role(self, name, clientid, realm="master"): + """Fetch a keycloak client role from the provided realm using the role's name. :param name: Name of the role to fetch. :param clientid: Client id for the client role @@ -1958,20 +2066,20 @@ def get_client_role(self, name, clientid, realm='master'): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) + self.module.fail_json(msg=f"Could not find client {clientid} in realm {realm}") + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe="")) try: return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not fetch role {name} in client {clientid} of realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch role {name} in client {clientid} of realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not fetch role {name} for client {clientid} in realm {realm}: {e}') + self.module.fail_json(msg=f"Could not fetch role {name} for client {clientid} in realm {realm}: {e}") - def create_client_role(self, rolerep, clientid, realm='master'): - """ Create a Keycloak client role. + def create_client_role(self, rolerep, clientid, realm="master"): + """Create a Keycloak client role. :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. :param clientid: Client id for the client role @@ -1980,23 +2088,22 @@ def create_client_role(self, rolerep, clientid, realm='master'): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') + self.module.fail_json(msg=f"Could not find client {clientid} in realm {realm}") roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: if "composites" in rolerep: keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) rolerep["composites"] = keycloak_compatible_composites - return self._request(roles_url, method='POST', data=json.dumps(rolerep)) + return self._request(roles_url, method="POST", data=json.dumps(rolerep)) except Exception as e: - self.fail_request(e, msg=f"Could not create role {rolerep['name']} for client {clientid} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not create role {rolerep['name']} for client {clientid} in realm {realm}: {e}" + ) def convert_role_composites(self, composites): - keycloak_compatible_composites = { - 'client': {}, - 'realm': [] - } + keycloak_compatible_composites = {"client": {}, "realm": []} for composite in composites: - if 'state' not in composite or composite['state'] == 'present': + if "state" not in composite or composite["state"] == "present": if "client_id" in composite and composite["client_id"] is not None: if composite["client_id"] not in keycloak_compatible_composites["client"]: keycloak_compatible_composites["client"][composite["client_id"]] = [] @@ -2006,7 +2113,7 @@ def convert_role_composites(self, composites): return keycloak_compatible_composites def update_client_role(self, rolerep, clientid, realm="master"): - """ Update an existing client role. + """Update an existing client role. :param rolerep: A RoleRepresentation of the updated role. :param clientid: Client id for the client role @@ -2015,22 +2122,24 @@ def update_client_role(self, rolerep, clientid, realm="master"): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'], safe='')) + self.module.fail_json(msg=f"Could not find client {clientid} in realm {realm}") + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe="")) try: composites = None if "composites" in rolerep: composites = copy.deepcopy(rolerep["composites"]) - del rolerep['composites'] - update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + del rolerep["composites"] + update_role_response = self._request(role_url, method="PUT", data=json.dumps(rolerep)) if composites is not None: self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm) return update_role_response except Exception as e: - self.fail_request(e, msg=f"Could not update role {rolerep['name']} for client {clientid} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not update role {rolerep['name']} for client {clientid} in realm {realm}: {e}" + ) def delete_client_role(self, name, clientid, realm="master"): - """ Delete a role. One of name or roleid must be provided. + """Delete a role. One of name or roleid must be provided. :param name: The name of the role. :param clientid: Client id for the client role @@ -2038,14 +2147,14 @@ def delete_client_role(self, name, clientid, realm="master"): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) + self.module.fail_json(msg=f"Could not find client {clientid} in realm {realm}") + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe="")) try: - return self._request(role_url, method='DELETE') + return self._request(role_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Unable to delete role {name} for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Unable to delete role {name} for client {clientid} in realm {realm}: {e}") - def get_authentication_flow_by_alias(self, alias, realm='master'): + def get_authentication_flow_by_alias(self, alias, realm="master"): """ Get an authentication flow by its alias :param alias: Alias of the authentication flow to get. @@ -2055,7 +2164,9 @@ def get_authentication_flow_by_alias(self, alias, realm='master'): try: authentication_flow = {} # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET')) + authentications = json.load( + self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method="GET") + ) for authentication in authentications: if authentication["alias"] == alias: authentication_flow = authentication @@ -2064,7 +2175,7 @@ def get_authentication_flow_by_alias(self, alias, realm='master'): except Exception as e: self.fail_request(e, msg=f"Unable get authentication flow {alias}: {e}") - def delete_authentication_flow_by_id(self, id, realm='master'): + def delete_authentication_flow_by_id(self, id, realm="master"): """ Delete an authentication flow from Keycloak :param id: id of authentication flow to be deleted @@ -2074,11 +2185,11 @@ def delete_authentication_flow_by_id(self, id, realm='master'): flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) try: - return self._request(flow_url, method='DELETE') + return self._request(flow_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete authentication flow {id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete authentication flow {id} in realm {realm}: {e}") - def copy_auth_flow(self, config, realm='master'): + def copy_auth_flow(self, config, realm="master"): """ Create a new authentication flow from a copy of another. :param config: Representation of the authentication flow to create. @@ -2086,21 +2197,17 @@ def copy_auth_flow(self, config, realm='master'): :return: Representation of the new authentication flow. """ try: - new_name = dict( - newName=config["alias"] - ) + new_name = dict(newName=config["alias"]) self._request( URL_AUTHENTICATION_FLOW_COPY.format( - url=self.baseurl, - realm=realm, - copyfrom=quote(config["copyFrom"], safe='')), - method='POST', - data=json.dumps(new_name)) + url=self.baseurl, realm=realm, copyfrom=quote(config["copyFrom"], safe="") + ), + method="POST", + data=json.dumps(new_name), + ) flow_list = json.load( - self._request( - URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, - realm=realm), - method='GET')) + self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method="GET") + ) for flow in flow_list: if flow["alias"] == config["alias"]: return flow @@ -2108,7 +2215,7 @@ def copy_auth_flow(self, config, realm='master'): except Exception as e: self.fail_request(e, msg=f"Could not copy authentication flow {config['alias']} in realm {realm}: {e}") - def create_empty_auth_flow(self, config, realm='master'): + def create_empty_auth_flow(self, config, realm="master"): """ Create a new empty authentication flow. :param config: Representation of the authentication flow to create. @@ -2117,32 +2224,25 @@ def create_empty_auth_flow(self, config, realm='master'): """ try: new_flow = dict( - alias=config["alias"], - providerId=config["providerId"], - description=config["description"], - topLevel=True + alias=config["alias"], providerId=config["providerId"], description=config["description"], topLevel=True ) self._request( - URL_AUTHENTICATION_FLOWS.format( - url=self.baseurl, - realm=realm), - method='POST', - data=json.dumps(new_flow)) + URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method="POST", data=json.dumps(new_flow) + ) flow_list = json.load( - self._request( - URL_AUTHENTICATION_FLOWS.format( - url=self.baseurl, - realm=realm), - method='GET')) + self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method="GET") + ) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.fail_request(e, msg=f"Could not create empty authentication flow {config['alias']} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not create empty authentication flow {config['alias']} in realm {realm}: {e}" + ) - def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): - """ Update authentication executions + def update_authentication_executions(self, flowAlias, updatedExec, realm="master"): + """Update authentication executions :param flowAlias: name of the parent flow :param updatedExec: JSON containing updated execution @@ -2151,18 +2251,21 @@ def update_authentication_executions(self, flowAlias, updatedExec, realm='master try: self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias, safe='')), - method='PUT', - data=json.dumps(updatedExec)) + url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe="") + ), + method="PUT", + data=json.dumps(updatedExec), + ) except HTTPError as e: - self.fail_request(e, msg=f"Unable to update execution '{flowAlias}': {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {updatedExec}") + self.fail_request( + e, + msg=f"Unable to update execution '{flowAlias}': {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {updatedExec}", + ) except Exception as e: self.module.fail_json(msg=f"Unable to update executions {updatedExec}: {e}") - def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'): - """ Add autenticatorConfig to the execution + def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm="master"): + """Add autenticatorConfig to the execution :param executionId: id of execution :param authenticationConfig: config to add to the execution @@ -2170,34 +2273,27 @@ def add_authenticationConfig_to_execution(self, executionId, authenticationConfi """ try: self._request( - URL_AUTHENTICATION_EXECUTION_CONFIG.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST', - data=json.dumps(authenticationConfig)) + URL_AUTHENTICATION_EXECUTION_CONFIG.format(url=self.baseurl, realm=realm, id=executionId), + method="POST", + data=json.dumps(authenticationConfig), + ) except Exception as e: self.fail_request(e, msg=f"Unable to add authenticationConfig {executionId}: {e}") - def delete_authentication_config(self, configId, realm='master'): - """ Delete authenticator config + def delete_authentication_config(self, configId, realm="master"): + """Delete authenticator config :param configId: id of authentication config :param realm: realm of authentication config to be deleted """ try: # Send a DELETE request to remove the specified authentication config from the Keycloak server. - self._request( - URL_AUTHENTICATION_CONFIG.format( - url=self.baseurl, - realm=realm, - id=configId), - method='DELETE') + self._request(URL_AUTHENTICATION_CONFIG.format(url=self.baseurl, realm=realm, id=configId), method="DELETE") except Exception as e: self.fail_request(e, msg=f"Unable to delete authentication config {configId}: {e}") - def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'): - """ Create new sublow on the flow + def create_subflow(self, subflowName, flowAlias, realm="master", flowType="basic-flow"): + """Create new sublow on the flow :param subflowName: name of the subflow to create :param flowAlias: name of the parent flow @@ -2210,16 +2306,16 @@ def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic newSubFlow["type"] = flowType self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias, safe='')), - method='POST', - data=json.dumps(newSubFlow)) + url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe="") + ), + method="POST", + data=json.dumps(newSubFlow), + ) except Exception as e: self.fail_request(e, msg=f"Unable to create new subflow {subflowName}: {e}") - def create_execution(self, execution, flowAlias, realm='master'): - """ Create new execution on the flow + def create_execution(self, execution, flowAlias, realm="master"): + """Create new execution on the flow :param execution: name of execution to create :param flowAlias: name of the parent flow @@ -2231,20 +2327,21 @@ def create_execution(self, execution, flowAlias, realm='master'): newExec["requirement"] = execution["requirement"] self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( - url=self.baseurl, - realm=realm, - flowalias=quote(flowAlias, safe='')), - method='POST', - data=json.dumps(newExec)) + url=self.baseurl, realm=realm, flowalias=quote(flowAlias, safe="") + ), + method="POST", + data=json.dumps(newExec), + ) except HTTPError as e: self.fail_request( - e, msg=f"Unable to create new execution '{flowAlias}' {execution['providerId']}: {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {newExec}" + e, + msg=f"Unable to create new execution '{flowAlias}' {execution['providerId']}: {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {newExec}", ) except Exception as e: self.module.fail_json(msg=f"Unable to create new execution '{flowAlias}' {execution['providerId']}: {e}") - def change_execution_priority(self, executionId, diff, realm='master'): - """ Raise or lower execution priority of diff time + def change_execution_priority(self, executionId, diff, realm="master"): + """Raise or lower execution priority of diff time :param executionId: id of execution to lower priority :param realm: realm the client is in @@ -2256,22 +2353,22 @@ def change_execution_priority(self, executionId, diff, realm='master'): for i in range(diff): self._request( URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST') + url=self.baseurl, realm=realm, id=executionId + ), + method="POST", + ) elif diff < 0: for i in range(-diff): self._request( URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( - url=self.baseurl, - realm=realm, - id=executionId), - method='POST') + url=self.baseurl, realm=realm, id=executionId + ), + method="POST", + ) except Exception as e: self.fail_request(e, msg=f"Unable to change execution priority {executionId}: {e}") - def get_executions_representation(self, config, realm='master'): + def get_executions_representation(self, config, realm="master"): """ Get a representation of the executions for an authentication flow. :param config: Representation of the authentication flow @@ -2283,26 +2380,28 @@ def get_executions_representation(self, config, realm='master'): executions = json.load( self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( - url=self.baseurl, - realm=realm, - flowalias=quote(config["alias"], safe='')), - method='GET')) + url=self.baseurl, realm=realm, flowalias=quote(config["alias"], safe="") + ), + method="GET", + ) + ) for execution in executions: if "authenticationConfig" in execution: execConfigId = execution["authenticationConfig"] execConfig = json.load( self._request( - URL_AUTHENTICATION_CONFIG.format( - url=self.baseurl, - realm=realm, - id=execConfigId), - method='GET')) + URL_AUTHENTICATION_CONFIG.format(url=self.baseurl, realm=realm, id=execConfigId), + method="GET", + ) + ) execution["authenticationConfig"] = execConfig return executions except Exception as e: - self.fail_request(e, msg=f"Could not get executions for authentication flow {config['alias']} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not get executions for authentication flow {config['alias']} in realm {realm}: {e}" + ) - def get_required_actions(self, realm='master'): + def get_required_actions(self, realm="master"): """ Get required actions. :param realm: Realm name (not id). @@ -2311,20 +2410,14 @@ def get_required_actions(self, realm='master'): try: required_actions = json.load( - self._request( - URL_AUTHENTICATION_REQUIRED_ACTIONS.format( - url=self.baseurl, - realm=realm - ), - method='GET' - ) + self._request(URL_AUTHENTICATION_REQUIRED_ACTIONS.format(url=self.baseurl, realm=realm), method="GET") ) return required_actions except Exception: return None - def register_required_action(self, rep, realm='master'): + def register_required_action(self, rep, realm="master"): """ Register required action. :param rep: JSON containing 'providerId', and 'name' attributes. @@ -2332,27 +2425,18 @@ def register_required_action(self, rep, realm='master'): :return: Representation of the required action. """ - data = { - 'name': rep['name'], - 'providerId': rep['providerId'] - } + data = {"name": rep["name"], "providerId": rep["providerId"]} try: return self._request( - URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format( - url=self.baseurl, - realm=realm - ), - method='POST', + URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format(url=self.baseurl, realm=realm), + method="POST", data=json.dumps(data), ) except Exception as e: - self.fail_request( - e, - msg=f"Unable to register required action {rep['name']} in realm {realm}: {e}" - ) + self.fail_request(e, msg=f"Unable to register required action {rep['name']} in realm {realm}: {e}") - def update_required_action(self, alias, rep, realm='master'): + def update_required_action(self, alias, rep, realm="master"): """ Update required action. :param alias: Alias of required action. @@ -2364,20 +2448,15 @@ def update_required_action(self, alias, rep, realm='master'): try: return self._request( URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( - url=self.baseurl, - alias=quote(alias, safe=''), - realm=realm + url=self.baseurl, alias=quote(alias, safe=""), realm=realm ), - method='PUT', + method="PUT", data=json.dumps(rep), ) except Exception as e: - self.fail_request( - e, - msg=f'Unable to update required action {alias} in realm {realm}: {e}' - ) + self.fail_request(e, msg=f"Unable to update required action {alias} in realm {realm}: {e}") - def delete_required_action(self, alias, realm='master'): + def delete_required_action(self, alias, realm="master"): """ Delete required action. :param alias: Alias of required action. @@ -2388,33 +2467,30 @@ def delete_required_action(self, alias, realm='master'): try: return self._request( URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( - url=self.baseurl, - alias=quote(alias, safe=''), - realm=realm + url=self.baseurl, alias=quote(alias, safe=""), realm=realm ), - method='DELETE', + method="DELETE", ) except Exception as e: - self.fail_request( - e, - msg=f'Unable to delete required action {alias} in realm {realm}: {e}' - ) + self.fail_request(e, msg=f"Unable to delete required action {alias} in realm {realm}: {e}") - def get_identity_providers(self, realm='master'): - """ Fetch representations for identity providers in a realm + def get_identity_providers(self, realm="master"): + """Fetch representations for identity providers in a realm :param realm: realm to be queried :return: list of representations for identity providers """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return self._request_and_deserialize(idps_url, method='GET') + return self._request_and_deserialize(idps_url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of identity providers for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of identity providers for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of identity providers for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of identity providers for realm {realm}: {e}") - def get_identity_provider(self, alias, realm='master'): - """ Fetch identity provider representation from a realm using the idp's alias. + def get_identity_provider(self, alias, realm="master"): + """Fetch identity provider representation from a realm using the idp's alias. If the identity provider does not exist, None is returned. :param alias: Alias of the identity provider to fetch. :param realm: Realm in which the identity provider resides; default 'master'. @@ -2426,80 +2502,79 @@ def get_identity_provider(self, alias, realm='master'): if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not fetch identity provider {alias} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch identity provider {alias} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not fetch identity provider {alias} in realm {realm}: {e}') + self.module.fail_json(msg=f"Could not fetch identity provider {alias} in realm {realm}: {e}") - def create_identity_provider(self, idprep, realm='master'): - """ Create an identity provider. + def create_identity_provider(self, idprep, realm="master"): + """Create an identity provider. :param idprep: Identity provider representation of the idp to be created. :param realm: Realm in which this identity provider resides, default "master". :return: HTTPResponse object on success """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return self._request(idps_url, method='POST', data=json.dumps(idprep)) + return self._request(idps_url, method="POST", data=json.dumps(idprep)) except Exception as e: self.fail_request(e, msg=f"Could not create identity provider {idprep['alias']} in realm {realm}: {e}") - def update_identity_provider(self, idprep, realm='master'): - """ Update an existing identity provider. + def update_identity_provider(self, idprep, realm="master"): + """Update an existing identity provider. :param idprep: Identity provider representation of the idp to be updated. :param realm: Realm in which this identity provider resides, default "master". :return HTTPResponse object on success """ - idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep["alias"]) try: - return self._request(idp_url, method='PUT', data=json.dumps(idprep)) + return self._request(idp_url, method="PUT", data=json.dumps(idprep)) except Exception as e: self.fail_request(e, msg=f"Could not update identity provider {idprep['alias']} in realm {realm}: {e}") - def delete_identity_provider(self, alias, realm='master'): - """ Delete an identity provider. + def delete_identity_provider(self, alias, realm="master"): + """Delete an identity provider. :param alias: Alias of the identity provider. :param realm: Realm in which this identity provider resides, default "master". """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return self._request(idp_url, method='DELETE') + return self._request(idp_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Unable to delete identity provider {alias} in realm {realm}: {e}') + self.fail_request(e, msg=f"Unable to delete identity provider {alias} in realm {realm}: {e}") - def get_identity_provider_mappers(self, alias, realm='master'): - """ Fetch representations for identity provider mappers + def get_identity_provider_mappers(self, alias, realm="master"): + """Fetch representations for identity provider mappers :param alias: Alias of the identity provider. :param realm: realm to be queried :return: list of representations for identity provider mappers """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return self._request_and_deserialize(mappers_url, method='GET') + return self._request_and_deserialize(mappers_url, method="GET") except ValueError as e: self.module.fail_json( - msg=f'API returned incorrect JSON when trying to obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}' + msg=f"API returned incorrect JSON when trying to obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}" ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}') + self.fail_request( + e, msg=f"Could not obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}" + ) - def fetch_idp_endpoints_import_config_url(self, fromUrl, providerId='oidc', realm='master'): - """ Import an identity provider configuration through Keycloak server from a well-known URL. + def fetch_idp_endpoints_import_config_url(self, fromUrl, providerId="oidc", realm="master"): + """Import an identity provider configuration through Keycloak server from a well-known URL. :param fromUrl: URL to import the identity provider configuration from. "param providerId: Provider ID of the identity provider to import, default 'oidc'. :param realm: Realm :return: IDP endpoins. """ try: - payload = { - "providerId": providerId, - "fromUrl": fromUrl - } + payload = {"providerId": providerId, "fromUrl": fromUrl} idps_url = URL_IDENTITY_PROVIDER_IMPORT.format(url=self.baseurl, realm=realm) - return self._request_and_deserialize(idps_url, method='POST', data=json.dumps(payload)) + return self._request_and_deserialize(idps_url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f'Could not import the IdP config in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not import the IdP config in realm {realm}: {e}") - def get_identity_provider_mapper(self, mid, alias, realm='master'): - """ Fetch identity provider representation from a realm using the idp's alias. + def get_identity_provider_mapper(self, mid, alias, realm="master"): + """Fetch identity provider representation from a realm using the idp's alias. If the identity provider does not exist, None is returned. :param mid: Unique ID of the mapper to fetch. :param alias: Alias of the identity provider. @@ -2512,12 +2587,16 @@ def get_identity_provider_mapper(self, mid, alias, realm='master'): if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}') + self.fail_request( + e, msg=f"Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}" + ) except Exception as e: - self.module.fail_json(msg=f'Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}') + self.module.fail_json( + msg=f"Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}" + ) - def create_identity_provider_mapper(self, mapper, alias, realm='master'): - """ Create an identity provider mapper. + def create_identity_provider_mapper(self, mapper, alias, realm="master"): + """Create an identity provider mapper. :param mapper: IdentityProviderMapperRepresentation of the mapper to be created. :param alias: Alias of the identity provider. :param realm: Realm in which this identity provider resides, default "master". @@ -2525,54 +2604,63 @@ def create_identity_provider_mapper(self, mapper, alias, realm='master'): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return self._request(mappers_url, method='POST', data=json.dumps(mapper)) + return self._request(mappers_url, method="POST", data=json.dumps(mapper)) except Exception as e: - self.fail_request(e, msg=f"Could not create identity provider mapper {mapper['name']} for idp {alias} in realm {realm}: {e}") + self.fail_request( + e, + msg=f"Could not create identity provider mapper {mapper['name']} for idp {alias} in realm {realm}: {e}", + ) - def update_identity_provider_mapper(self, mapper, alias, realm='master'): - """ Update an existing identity provider. + def update_identity_provider_mapper(self, mapper, alias, realm="master"): + """Update an existing identity provider. :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated. :param alias: Alias of the identity provider. :param realm: Realm in which this identity provider resides, default "master". :return HTTPResponse object on success """ - mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper["id"]) try: - return self._request(mapper_url, method='PUT', data=json.dumps(mapper)) + return self._request(mapper_url, method="PUT", data=json.dumps(mapper)) except Exception as e: - self.fail_request(e, msg=f"Could not update mapper {mapper['id']} for identity provider {alias} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not update mapper {mapper['id']} for identity provider {alias} in realm {realm}: {e}" + ) - def delete_identity_provider_mapper(self, mid, alias, realm='master'): - """ Delete an identity provider. + def delete_identity_provider_mapper(self, mid, alias, realm="master"): + """Delete an identity provider. :param mid: Unique ID of the mapper to delete. :param alias: Alias of the identity provider. :param realm: Realm in which this identity provider resides, default "master". """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return self._request(mapper_url, method='DELETE') + return self._request(mapper_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Unable to delete mapper {mid} for identity provider {alias} in realm {realm}: {e}') + self.fail_request( + e, msg=f"Unable to delete mapper {mid} for identity provider {alias} in realm {realm}: {e}" + ) - def get_components(self, filter=None, realm='master'): - """ Fetch representations for components in a realm + def get_components(self, filter=None, realm="master"): + """Fetch representations for components in a realm :param realm: realm to be queried :param filter: search filter :return: list of representations for components """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) if filter is not None: - comps_url += f'?{filter}' + comps_url += f"?{filter}" try: - return self._request_and_deserialize(comps_url, method='GET') + return self._request_and_deserialize(comps_url, method="GET") except ValueError as e: - self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of components for realm {realm}: {e}') + self.module.fail_json( + msg=f"API returned incorrect JSON when trying to obtain list of components for realm {realm}: {e}" + ) except Exception as e: - self.fail_request(e, msg=f'Could not obtain list of components for realm {realm}: {e}') + self.fail_request(e, msg=f"Could not obtain list of components for realm {realm}: {e}") - def get_component(self, cid, realm='master'): - """ Fetch component representation from a realm using its cid. + def get_component(self, cid, realm="master"): + """Fetch component representation from a realm using its cid. If the component does not exist, None is returned. :param cid: Unique ID of the component to fetch. :param realm: Realm in which the component resides; default 'master'. @@ -2584,58 +2672,58 @@ def get_component(self, cid, realm='master'): if e.code == 404: return None else: - self.fail_request(e, msg=f'Could not fetch component {cid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch component {cid} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg=f'Could not fetch component {cid} in realm {realm}: {e}') + self.module.fail_json(msg=f"Could not fetch component {cid} in realm {realm}: {e}") - def create_component(self, comprep, realm='master'): - """ Create an component. + def create_component(self, comprep, realm="master"): + """Create an component. :param comprep: Component representation of the component to be created. :param realm: Realm in which this component resides, default "master". :return: Component representation of the created component """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) try: - resp = self._request(comps_url, method='POST', data=json.dumps(comprep)) - comp_url = resp.getheader('Location') + resp = self._request(comps_url, method="POST", data=json.dumps(comprep)) + comp_url = resp.getheader("Location") if comp_url is None: - self.module.fail_json(msg=f'Could not create component in realm {realm}: unexpected response') + self.module.fail_json(msg=f"Could not create component in realm {realm}: unexpected response") return self._request_and_deserialize(comp_url, method="GET") except Exception as e: - self.fail_request(e, msg=f'Could not create component in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not create component in realm {realm}: {e}") - def update_component(self, comprep, realm='master'): - """ Update an existing component. + def update_component(self, comprep, realm="master"): + """Update an existing component. :param comprep: Component representation of the component to be updated. :param realm: Realm in which this component resides, default "master". :return HTTPResponse object on success """ - cid = comprep.get('id') + cid = comprep.get("id") if cid is None: - self.module.fail_json(msg='Cannot update component without id') + self.module.fail_json(msg="Cannot update component without id") comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return self._request(comp_url, method='PUT', data=json.dumps(comprep)) + return self._request(comp_url, method="PUT", data=json.dumps(comprep)) except Exception as e: - self.fail_request(e, msg=f'Could not update component {cid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not update component {cid} in realm {realm}: {e}") - def delete_component(self, cid, realm='master'): - """ Delete an component. + def delete_component(self, cid, realm="master"): + """Delete an component. :param cid: Unique ID of the component. :param realm: Realm in which this component resides, default "master". """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return self._request(comp_url, method='DELETE') + return self._request(comp_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Unable to delete component {cid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Unable to delete component {cid} in realm {realm}: {e}") def get_authz_authorization_scope_by_name(self, name, client_id, realm): url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) search_url = f"{url}/search?name={quote(name, safe='')}" try: - return self._request_and_deserialize(search_url, method='GET') + return self._request_and_deserialize(search_url, method="GET") except Exception: return False @@ -2644,29 +2732,34 @@ def create_authz_authorization_scope(self, payload, client_id, realm): url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) try: - return self._request(url, method='POST', data=json.dumps(payload)) + return self._request(url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f"Could not create authorization scope {payload['name']} for client {client_id} in realm {realm}: {e}") + self.fail_request( + e, + msg=f"Could not create authorization scope {payload['name']} for client {client_id} in realm {realm}: {e}", + ) def update_authz_authorization_scope(self, payload, id, client_id, realm): """Update an authorization scope for a Keycloak client""" url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return self._request(url, method='PUT', data=json.dumps(payload)) + return self._request(url, method="PUT", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f"Could not create update scope {payload['name']} for client {client_id} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not create update scope {payload['name']} for client {client_id} in realm {realm}: {e}" + ) def remove_authz_authorization_scope(self, id, client_id, realm): """Remove an authorization scope from a Keycloak client""" url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return self._request(url, method='DELETE') + return self._request(url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete scope {id} for client {client_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete scope {id} for client {client_id} in realm {realm}: {e}") - def get_user_by_id(self, user_id, realm='master'): + def get_user_by_id(self, user_id, realm="master"): """ Get a User by its ID. :param user_id: ID of the user. @@ -2674,19 +2767,13 @@ def get_user_by_id(self, user_id, realm='master'): :return: Representation of the user. """ try: - user_url = URL_USER.format( - url=self.baseurl, - realm=realm, - id=user_id) - userrep = json.load( - self._request( - user_url, - method='GET')) + user_url = URL_USER.format(url=self.baseurl, realm=realm, id=user_id) + userrep = json.load(self._request(user_url, method="GET")) return userrep except Exception as e: - self.fail_request(e, msg=f'Could not get user {user_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not get user {user_id} in realm {realm}: {e}") - def create_user(self, userrep, realm='master'): + def create_user(self, userrep, realm="master"): """ Create a new User. :param userrep: Representation of the user to create @@ -2694,18 +2781,12 @@ def create_user(self, userrep, realm='master'): :return: Representation of the user created. """ try: - if 'attributes' in userrep and isinstance(userrep['attributes'], list): - attributes = copy.deepcopy(userrep['attributes']) - userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) - users_url = URL_USERS.format( - url=self.baseurl, - realm=realm) - self._request(users_url, - method='POST', - data=json.dumps(userrep)) - created_user = self.get_user_by_username( - username=userrep['username'], - realm=realm) + if "attributes" in userrep and isinstance(userrep["attributes"], list): + attributes = copy.deepcopy(userrep["attributes"]) + userrep["attributes"] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + users_url = URL_USERS.format(url=self.baseurl, realm=realm) + self._request(users_url, method="POST", data=json.dumps(userrep)) + created_user = self.get_user_by_username(username=userrep["username"], realm=realm) return created_user except Exception as e: self.fail_request(e, msg=f"Could not create user {userrep['username']} in realm {realm}: {e}") @@ -2713,20 +2794,20 @@ def create_user(self, userrep, realm='master'): def convert_user_attributes_to_keycloak_dict(self, attributes): keycloak_user_attributes_dict = {} for attribute in attributes: - if ('state' not in attribute or attribute['state'] == 'present') and 'name' in attribute: - keycloak_user_attributes_dict[attribute['name']] = attribute['values'] if 'values' in attribute else [] + if ("state" not in attribute or attribute["state"] == "present") and "name" in attribute: + keycloak_user_attributes_dict[attribute["name"]] = attribute["values"] if "values" in attribute else [] return keycloak_user_attributes_dict def convert_keycloak_user_attributes_dict_to_module_list(self, attributes): module_attributes_list = [] for key in attributes: attr = {} - attr['name'] = key - attr['values'] = attributes[key] + attr["name"] = key + attr["values"] = attributes[key] module_attributes_list.append(attr) return module_attributes_list - def update_user(self, userrep, realm='master'): + def update_user(self, userrep, realm="master"): """ Update a User. :param userrep: Representation of the user to update. This representation must include the ID of the user. @@ -2734,25 +2815,17 @@ def update_user(self, userrep, realm='master'): :return: Representation of the updated user. """ try: - if 'attributes' in userrep and isinstance(userrep['attributes'], list): - attributes = copy.deepcopy(userrep['attributes']) - userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) - user_url = URL_USER.format( - url=self.baseurl, - realm=realm, - id=userrep["id"]) - self._request( - user_url, - method='PUT', - data=json.dumps(userrep)) - updated_user = self.get_user_by_id( - user_id=userrep['id'], - realm=realm) + if "attributes" in userrep and isinstance(userrep["attributes"], list): + attributes = copy.deepcopy(userrep["attributes"]) + userrep["attributes"] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + user_url = URL_USER.format(url=self.baseurl, realm=realm, id=userrep["id"]) + self._request(user_url, method="PUT", data=json.dumps(userrep)) + updated_user = self.get_user_by_id(user_id=userrep["id"], realm=realm) return updated_user except Exception as e: self.fail_request(e, msg=f"Could not update user {userrep['username']} in realm {realm}: {e}") - def delete_user(self, user_id, realm='master'): + def delete_user(self, user_id, realm="master"): """ Delete a User. :param user_id: ID of the user to be deleted @@ -2760,17 +2833,12 @@ def delete_user(self, user_id, realm='master'): :return: HTTP response. """ try: - user_url = URL_USER.format( - url=self.baseurl, - realm=realm, - id=user_id) - return self._request( - user_url, - method='DELETE') + user_url = URL_USER.format(url=self.baseurl, realm=realm, id=user_id) + return self._request(user_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete user {user_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete user {user_id} in realm {realm}: {e}") - def get_user_groups(self, user_id, realm='master'): + def get_user_groups(self, user_id, realm="master"): """ Get the group names for a user. :param user_id: User ID @@ -2778,9 +2846,9 @@ def get_user_groups(self, user_id, realm='master'): :return: The client group names as a list of strings. """ user_groups = self.get_user_group_details(user_id, realm) - return [user_group['name'] for user_group in user_groups if 'name' in user_group] + return [user_group["name"] for user_group in user_groups if "name" in user_group] - def get_user_group_details(self, user_id, realm='master'): + def get_user_group_details(self, user_id, realm="master"): """ Get the group details for a user. :param user_id: User ID @@ -2789,15 +2857,15 @@ def get_user_group_details(self, user_id, realm='master'): """ try: user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id) - return self._request_and_deserialize(user_groups_url, method='GET') + return self._request_and_deserialize(user_groups_url, method="GET") except Exception as e: - self.fail_request(e, msg=f'Could not get groups for user {user_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not get groups for user {user_id} in realm {realm}: {e}") - def add_user_in_group(self, user_id, group_id, realm='master'): + def add_user_in_group(self, user_id, group_id, realm="master"): """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" return self.add_user_to_group(user_id, group_id, realm) - def add_user_to_group(self, user_id, group_id, realm='master'): + def add_user_to_group(self, user_id, group_id, realm="master"): """ Add a user to a group. :param user_id: User ID @@ -2806,18 +2874,12 @@ def add_user_to_group(self, user_id, group_id, realm='master'): :return: HTTP Response """ try: - user_group_url = URL_USER_GROUP.format( - url=self.baseurl, - realm=realm, - id=user_id, - group_id=group_id) - return self._request( - user_group_url, - method='PUT') + user_group_url = URL_USER_GROUP.format(url=self.baseurl, realm=realm, id=user_id, group_id=group_id) + return self._request(user_group_url, method="PUT") except Exception as e: - self.fail_request(e, msg=f'Could not add user {user_id} to group {group_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not add user {user_id} to group {group_id} in realm {realm}: {e}") - def remove_user_from_group(self, user_id, group_id, realm='master'): + def remove_user_from_group(self, user_id, group_id, realm="master"): """ Remove a user from a group for a user. :param user_id: User ID @@ -2826,18 +2888,12 @@ def remove_user_from_group(self, user_id, group_id, realm='master'): :return: HTTP response """ try: - user_group_url = URL_USER_GROUP.format( - url=self.baseurl, - realm=realm, - id=user_id, - group_id=group_id) - return self._request( - user_group_url, - method='DELETE') + user_group_url = URL_USER_GROUP.format(url=self.baseurl, realm=realm, id=user_id, group_id=group_id) + return self._request(user_group_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not remove user {user_id} from group {group_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not remove user {user_id} from group {group_id} in realm {realm}: {e}") - def update_user_groups_membership(self, userrep, groups, realm='master'): + def update_user_groups_membership(self, userrep, groups, realm="master"): """ Update user's group membership :param userrep: Representation of the user. This representation must include the ID. @@ -2849,60 +2905,68 @@ def update_user_groups_membership(self, userrep, groups, realm='master'): if not groups_to_add and not groups_to_remove: return False - user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm) - user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group] - user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group] - - groups_to_add = [group_to_add for group_to_add in groups_to_add - if group_to_add not in user_group_names and group_to_add not in user_group_paths] - groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove - if group_to_remove in user_group_names or group_to_remove in user_group_paths] + user_groups = self.get_user_group_details(user_id=userrep["id"], realm=realm) + user_group_names = [user_group["name"] for user_group in user_groups if "name" in user_group] + user_group_paths = [user_group["path"] for user_group in user_groups if "path" in user_group] + + groups_to_add = [ + group_to_add + for group_to_add in groups_to_add + if group_to_add not in user_group_names and group_to_add not in user_group_paths + ] + groups_to_remove = [ + group_to_remove + for group_to_remove in groups_to_remove + if group_to_remove in user_group_names or group_to_remove in user_group_paths + ] if not groups_to_add and not groups_to_remove: return False for group_to_add in groups_to_add: realm_group = self.find_group_by_path(group_to_add, realm=realm) if realm_group: - self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + self.add_user_to_group(user_id=userrep["id"], group_id=realm_group["id"], realm=realm) for group_to_remove in groups_to_remove: realm_group = self.find_group_by_path(group_to_remove, realm=realm) if realm_group: - self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + self.remove_user_from_group(user_id=userrep["id"], group_id=realm_group["id"], realm=realm) return True except Exception as e: - self.module.fail_json(msg=f"Could not update group membership for user {userrep['username']} in realm {realm}: {e}") + self.module.fail_json( + msg=f"Could not update group membership for user {userrep['username']} in realm {realm}: {e}" + ) def extract_groups_to_add_to_and_remove_from_user(self, groups): groups_to_add = [] groups_to_remove = [] if isinstance(groups, list): for group in groups: - group_name = group['name'] if isinstance(group, dict) and 'name' in group else group + group_name = group["name"] if isinstance(group, dict) and "name" in group else group if isinstance(group, dict): - if 'state' not in group or group['state'] == 'present': + if "state" not in group or group["state"] == "present": groups_to_add.append(group_name) else: groups_to_remove.append(group_name) return groups_to_add, groups_to_remove - def find_group_by_path(self, target, realm='master'): + def find_group_by_path(self, target, realm="master"): """ Finds a realm group by path, e.g. '/my/group'. The path is formed by prepending a '/' character to `target` unless it's already present. This adds support for finding top level groups by name and subgroups by path. """ groups = self.get_groups(realm=realm) - path = target if target.startswith('/') else f"/{target}" - for segment in path.split('/'): + path = target if target.startswith("/") else f"/{target}" + for segment in path.split("/"): if not segment: continue abort = True for group in groups: - if group['path'] == path: - return self.get_group_by_groupid(group['id'], realm=realm) - if group['name'] == segment: + if group["path"] == path: + return self.get_group_by_groupid(group["id"], realm=realm) + if group["name"] == segment: groups = self.get_subgroups(group, realm=realm) abort = False break @@ -2916,18 +2980,22 @@ def convert_user_group_list_of_str_to_list_of_dict(self, groups): for group in groups: if isinstance(group, str): group_dict = {} - group_dict['name'] = group + group_dict["name"] = group list_of_groups.append(group_dict) return list_of_groups def create_authz_custom_policy(self, policy_type, payload, client_id, realm): """Create a custom policy for a Keycloak client""" - url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm) + url = URL_AUTHZ_CUSTOM_POLICY.format( + url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm + ) try: - return self._request(url, method='POST', data=json.dumps(payload)) + return self._request(url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}" + ) def remove_authz_custom_policy(self, policy_id, client_id, realm): """Remove a custom policy from a Keycloak client""" @@ -2935,128 +3003,145 @@ def remove_authz_custom_policy(self, policy_id, client_id, realm): delete_url = f"{url}/{policy_id}" try: - return self._request(delete_url, method='DELETE') + return self._request(delete_url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete custom policy {id} for client {client_id} in realm {realm}: {e}') + self.fail_request( + e, msg=f"Could not delete custom policy {id} for client {client_id} in realm {realm}: {e}" + ) def get_authz_permission_by_name(self, name, client_id, realm): """Get authorization permission by name""" url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) - search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + search_url = "%s/search?name=%s" % (url, name.replace(" ", "%20")) try: - return self._request_and_deserialize(search_url, method='GET') + return self._request_and_deserialize(search_url, method="GET") except Exception: return False def create_authz_permission(self, payload, permission_type, client_id, realm): """Create an authorization permission for a Keycloak client""" - url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm) + url = URL_AUTHZ_PERMISSIONS.format( + url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm + ) try: - return self._request(url, method='POST', data=json.dumps(payload)) + return self._request(url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}") + self.fail_request( + e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}" + ) def remove_authz_permission(self, id, client_id, realm): """Create an authorization permission for a Keycloak client""" url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) try: - return self._request(url, method='DELETE') + return self._request(url, method="DELETE") except Exception as e: - self.fail_request(e, msg=f'Could not delete permission {id} for client {client_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete permission {id} for client {client_id} in realm {realm}: {e}") def update_authz_permission(self, payload, permission_type, id, client_id, realm): """Update a permission for a Keycloak client""" - url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm) + url = URL_AUTHZ_PERMISSION.format( + url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm + ) try: - return self._request(url, method='PUT', data=json.dumps(payload)) + return self._request(url, method="PUT", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f"Could not create update permission {payload['name']} for client {client_id} in realm {realm}: {e}") + self.fail_request( + e, + msg=f"Could not create update permission {payload['name']} for client {client_id} in realm {realm}: {e}", + ) def get_authz_resource_by_name(self, name, client_id, realm): """Get authorization resource by name""" url = URL_AUTHZ_RESOURCES.format(url=self.baseurl, client_id=client_id, realm=realm) - search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + search_url = "%s/search?name=%s" % (url, name.replace(" ", "%20")) try: - return self._request_and_deserialize(search_url, method='GET') + return self._request_and_deserialize(search_url, method="GET") except Exception: return False def get_authz_policy_by_name(self, name, client_id, realm): """Get authorization policy by name""" url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) - search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20')) + search_url = "%s/search?name=%s&permission=false" % (url, name.replace(" ", "%20")) try: - return self._request_and_deserialize(search_url, method='GET') + return self._request_and_deserialize(search_url, method="GET") except Exception: return False def get_client_role_scope_from_client(self, clientid, clientscopeid, realm="master"): - """ Fetch the roles associated with the client's scope for a specific client on the Keycloak server. + """Fetch the roles associated with the client's scope for a specific client on the Keycloak server. :param clientid: ID of the client from which to obtain the associated roles. :param clientscopeid: ID of the client who owns the roles. :param realm: Realm from which to obtain the scope. :return: The client scope of roles from specified client. """ - client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format( + url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid + ) try: - return self._request_and_deserialize(client_role_scope_url, method='GET') + return self._request_and_deserialize(client_role_scope_url, method="GET") except Exception as e: - self.fail_request(e, msg=f'Could not fetch roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch roles scope for client {clientid} in realm {realm}: {e}") def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): - """ Update and fetch the roles associated with the client's scope on the Keycloak server. + """Update and fetch the roles associated with the client's scope on the Keycloak server. :param payload: List of roles to be added to the scope. :param clientid: ID of the client to update scope. :param clientscopeid: ID of the client who owns the roles. :param realm: Realm from which to obtain the clients. :return: The client scope of roles from specified client. """ - client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format( + url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid + ) try: - self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + self._request(client_role_scope_url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f'Could not update roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not update roles scope for client {clientid} in realm {realm}: {e}") return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) def delete_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): - """ Delete the roles contains in the payload from the client's scope on the Keycloak server. + """Delete the roles contains in the payload from the client's scope on the Keycloak server. :param payload: List of roles to be deleted. :param clientid: ID of the client to delete roles from scope. :param clientscopeid: ID of the client who owns the roles. :param realm: Realm from which to obtain the clients. :return: The client scope of roles from specified client. """ - client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format( + url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid + ) try: - self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + self._request(client_role_scope_url, method="DELETE", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f'Could not delete roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete roles scope for client {clientid} in realm {realm}: {e}") return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) def get_client_role_scope_from_realm(self, clientid, realm="master"): - """ Fetch the realm roles from the client's scope on the Keycloak server. + """Fetch the realm roles from the client's scope on the Keycloak server. :param clientid: ID of the client from which to obtain the associated realm roles. :param realm: Realm from which to obtain the clients. :return: The client realm roles scope. """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - return self._request_and_deserialize(client_role_scope_url, method='GET') + return self._request_and_deserialize(client_role_scope_url, method="GET") except Exception as e: - self.fail_request(e, msg=f'Could not fetch roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not fetch roles scope for client {clientid} in realm {realm}: {e}") def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): - """ Update and fetch the realm roles from the client's scope on the Keycloak server. + """Update and fetch the realm roles from the client's scope on the Keycloak server. :param payload: List of realm roles to add. :param clientid: ID of the client to update scope. :param realm: Realm from which to obtain the clients. @@ -3064,15 +3149,15 @@ def update_client_role_scope_from_realm(self, payload, clientid, realm="master") """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + self._request(client_role_scope_url, method="POST", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f'Could not update roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not update roles scope for client {clientid} in realm {realm}: {e}") return self.get_client_role_scope_from_realm(clientid, realm) def delete_client_role_scope_from_realm(self, payload, clientid, realm="master"): - """ Delete the realm roles contains in the payload from the client's scope on the Keycloak server. + """Delete the realm roles contains in the payload from the client's scope on the Keycloak server. :param payload: List of realm roles to delete. :param clientid: ID of the client to delete roles from scope. :param realm: Realm from which to obtain the clients. @@ -3080,15 +3165,15 @@ def delete_client_role_scope_from_realm(self, payload, clientid, realm="master") """ client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) try: - self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + self._request(client_role_scope_url, method="DELETE", data=json.dumps(payload)) except Exception as e: - self.fail_request(e, msg=f'Could not delete roles scope for client {clientid} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not delete roles scope for client {clientid} in realm {realm}: {e}") return self.get_client_role_scope_from_realm(clientid, realm) def fail_request(self, e, msg, **kwargs): - """ Triggers a module failure. This should be called + """Triggers a module failure. This should be called when an exception occurs during/after a request. Attempts to parse the exception e as an HTTP error and append it to msg. @@ -3106,7 +3191,7 @@ def fail_request(self, e, msg, **kwargs): self.module.fail_json(msg, **kwargs) def fail_open_url(self, e, msg, **kwargs): - """ DEPRECATED: Use fail_request instead. + """DEPRECATED: Use fail_request instead. Triggers a module failure. This should be called when an exception occurs during/after a request. @@ -3120,7 +3205,9 @@ def fail_open_url(self, e, msg, **kwargs): """ return self.fail_request(e, msg, **kwargs) - def send_execute_actions_email(self, user_id, realm='master', client_id=None, data=None, redirect_uri=None, lifespan=None): + def send_execute_actions_email( + self, user_id, realm="master", client_id=None, data=None, redirect_uri=None, lifespan=None + ): """ Send an email to the user with a link they can click to perform required actions (e.g. reset password). Uses execute-actions-email endpoint with provided required actions (defaults handled by caller). @@ -3138,11 +3225,11 @@ def send_execute_actions_email(self, user_id, realm='master', client_id=None, da params = {} if client_id is not None: - params['client_id'] = client_id + params["client_id"] = client_id if redirect_uri is not None: - params['redirect_uri'] = redirect_uri + params["redirect_uri"] = redirect_uri if lifespan is not None: - params['lifespan'] = lifespan + params["lifespan"] = lifespan if params: execute_action_url = f"{execute_action_url}?{urlencode(params)}" @@ -3152,6 +3239,6 @@ def send_execute_actions_email(self, user_id, realm='master', client_id=None, da # API expects JSON array of action names body = json.dumps(data) - return self._request(execute_action_url, method='PUT', data=body) + return self._request(execute_action_url, method="PUT", data=body) except Exception as e: - self.fail_request(e, msg=f'Could not send execute actions email to user {user_id} in realm {realm}: {e}') + self.fail_request(e, msg=f"Could not send execute actions email to user {user_id} in realm {realm}: {e}") diff --git a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py index 2118e8f6e2e..f3dc2b95d2e 100644 --- a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py +++ b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py @@ -8,8 +8,7 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ - keycloak_argument_spec +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import keycloak_argument_spec def keycloak_clientsecret_module(): @@ -22,9 +21,9 @@ def keycloak_clientsecret_module(): argument_spec = keycloak_argument_spec() meta_args = dict( - realm=dict(default='master'), - id=dict(type='str'), - client_id=dict(type='str', aliases=['clientId']), + realm=dict(default="master"), + id=dict(type="str"), + client_id=dict(type="str", aliases=["clientId"]), ) argument_spec.update(meta_args) @@ -32,14 +31,15 @@ def keycloak_clientsecret_module(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['id', 'client_id'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - mutually_exclusive=[ - ['token', 'auth_realm'], - ['token', 'auth_username'], - ['token', 'auth_password'] - ]) + required_one_of=( + [ + ["id", "client_id"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + mutually_exclusive=[["token", "auth_realm"], ["token", "auth_username"], ["token", "auth_password"]], + ) return module @@ -54,9 +54,9 @@ def keycloak_clientsecret_module_resolve_params(module, kc): :return: tuple of id, realm """ - realm = module.params.get('realm') - id = module.params.get('id') - client_id = module.params.get('client_id') + realm = module.params.get("realm") + id = module.params.get("id") + client_id = module.params.get("client_id") # only lookup the client_id if id isn't provided. # in the case that both are provided, prefer the ID, since it is one @@ -66,10 +66,8 @@ def keycloak_clientsecret_module_resolve_params(module, kc): client = kc.get_client_by_clientid(client_id, realm=realm) if client is None: - module.fail_json( - msg=f'Client does not exist {client_id}' - ) + module.fail_json(msg=f"Client does not exist {client_id}") - id = client['id'] + id = client["id"] return id, realm diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py index fd5b7fe64dd..c76477d3e03 100644 --- a/plugins/module_utils/ilo_redfish_utils.py +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -10,38 +9,37 @@ class iLORedfishUtils(RedfishUtils): - def get_ilo_sessions(self): result = {} # listing all users has always been slower than other operations, why? session_list = [] sessions_results = [] # Get these entries, but does not fail if not found - properties = ['Description', 'Id', 'Name', 'UserName'] + properties = ["Description", "Id", "Name", "UserName"] # Changed self.sessions_uri to Hardcoded string. response = self.get_request(f"{self.root_uri}{self.service_root}SessionService/Sessions/") - if not response['ret']: + if not response["ret"]: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] current_session = None - if 'Oem' in data: + if "Oem" in data: if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] - for sessions in data['Members']: + for sessions in data["Members"]: # session_list[] are URIs - session_list.append(sessions['@odata.id']) + session_list.append(sessions["@odata.id"]) # for each session, get details for uri in session_list: session = {} if uri != current_session: response = self.get_request(self.root_uri + uri) - if not response['ret']: + if not response["ret"]: return response - data = response['data'] + data = response["data"] for property in properties: if property in data: session[property] = data[property] @@ -52,41 +50,37 @@ def get_ilo_sessions(self): def set_ntp_server(self, mgr_attributes): result = {} - setkey = mgr_attributes['mgr_attr_name'] + setkey = mgr_attributes["mgr_attr_name"] nic_info = self.get_manager_ethernet_uri() ethuri = nic_info["nic_addr"] response = self.get_request(self.root_uri + ethuri) - if not response['ret']: + if not response["ret"]: return response - result['ret'] = True - data = response['data'] - payload = {"DHCPv4": { - "UseNTPServers": "" - }} + result["ret"] = True + data = response["data"] + payload = {"DHCPv4": {"UseNTPServers": ""}} if data["DHCPv4"]["UseNTPServers"]: payload["DHCPv4"]["UseNTPServers"] = False res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv4['ret']: + if not res_dhv4["ret"]: return res_dhv4 - payload = {"DHCPv6": { - "UseNTPServers": "" - }} + payload = {"DHCPv6": {"UseNTPServers": ""}} if data["DHCPv6"]["UseNTPServers"]: payload["DHCPv6"]["UseNTPServers"] = False res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv6['ret']: + if not res_dhv6["ret"]: return res_dhv6 datetime_uri = f"{self.manager_uri}DateTime" - listofips = mgr_attributes['mgr_attr_value'].split(" ") + listofips = mgr_attributes["mgr_attr_value"].split(" ") if len(listofips) > 2: - return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"} + return {"ret": False, "changed": False, "msg": "More than 2 NTP Servers mentioned"} ntp_list = [] for ips in listofips: @@ -98,46 +92,46 @@ def set_ntp_server(self, mgr_attributes): payload = {setkey: ntp_list} response1 = self.patch_request(self.root_uri + datetime_uri, payload) - if not response1['ret']: + if not response1["ret"]: return response1 - return {'ret': True, 'changed': True, 'msg': f"Modified {mgr_attributes['mgr_attr_name']}"} + return {"ret": True, "changed": True, "msg": f"Modified {mgr_attributes['mgr_attr_name']}"} def set_time_zone(self, attr): - key = attr['mgr_attr_name'] + key = attr["mgr_attr_name"] uri = f"{self.manager_uri}DateTime/" response = self.get_request(self.root_uri + uri) - if not response['ret']: + if not response["ret"]: return response data = response["data"] if key not in data: - return {'ret': False, 'changed': False, 'msg': f"Key {key} not found"} + return {"ret": False, "changed": False, "msg": f"Key {key} not found"} timezones = data["TimeZoneList"] index = "" for tz in timezones: - if attr['mgr_attr_value'] in tz["Name"]: + if attr["mgr_attr_value"] in tz["Name"]: index = tz["Index"] break payload = {key: {"Index": index}} response = self.patch_request(self.root_uri + uri, payload) - if not response['ret']: + if not response["ret"]: return response - return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"} def set_dns_server(self, attr): - key = attr['mgr_attr_name'] + key = attr["mgr_attr_name"] nic_info = self.get_manager_ethernet_uri() uri = nic_info["nic_addr"] - listofips = attr['mgr_attr_value'].split(" ") + listofips = attr["mgr_attr_value"].split(" ") if len(listofips) > 3: - return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"} + return {"ret": False, "changed": False, "msg": "More than 3 DNS Servers mentioned"} dns_list = [] for ips in listofips: @@ -146,87 +140,63 @@ def set_dns_server(self, attr): while len(dns_list) < 3: dns_list.append("0.0.0.0") - payload = { - "Oem": { - "Hpe": { - "IPv4": { - key: dns_list - } - } - } - } + payload = {"Oem": {"Hpe": {"IPv4": {key: dns_list}}}} response = self.patch_request(self.root_uri + uri, payload) - if not response['ret']: + if not response["ret"]: return response - return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"} def set_domain_name(self, attr): - key = attr['mgr_attr_name'] + key = attr["mgr_attr_name"] nic_info = self.get_manager_ethernet_uri() ethuri = nic_info["nic_addr"] response = self.get_request(self.root_uri + ethuri) - if not response['ret']: + if not response["ret"]: return response - data = response['data'] + data = response["data"] - payload = {"DHCPv4": { - "UseDomainName": "" - }} + payload = {"DHCPv4": {"UseDomainName": ""}} if data["DHCPv4"]["UseDomainName"]: payload["DHCPv4"]["UseDomainName"] = False res_dhv4 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv4['ret']: + if not res_dhv4["ret"]: return res_dhv4 - payload = {"DHCPv6": { - "UseDomainName": "" - }} + payload = {"DHCPv6": {"UseDomainName": ""}} if data["DHCPv6"]["UseDomainName"]: payload["DHCPv6"]["UseDomainName"] = False res_dhv6 = self.patch_request(self.root_uri + ethuri, payload) - if not res_dhv6['ret']: + if not res_dhv6["ret"]: return res_dhv6 - domain_name = attr['mgr_attr_value'] + domain_name = attr["mgr_attr_value"] - payload = {"Oem": { - "Hpe": { - key: domain_name - } - }} + payload = {"Oem": {"Hpe": {key: domain_name}}} response = self.patch_request(self.root_uri + ethuri, payload) - if not response['ret']: + if not response["ret"]: return response - return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} + return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"} def set_wins_registration(self, mgrattr): - Key = mgrattr['mgr_attr_name'] + Key = mgrattr["mgr_attr_name"] nic_info = self.get_manager_ethernet_uri() ethuri = nic_info["nic_addr"] - payload = { - "Oem": { - "Hpe": { - "IPv4": { - Key: False - } - } - } - } + payload = {"Oem": {"Hpe": {"IPv4": {Key: False}}}} response = self.patch_request(self.root_uri + ethuri, payload) - if not response['ret']: + if not response["ret"]: return response - return {'ret': True, 'changed': True, 'msg': f"Modified {mgrattr['mgr_attr_name']}"} + return {"ret": True, "changed": True, "msg": f"Modified {mgrattr['mgr_attr_name']}"} def get_server_poststate(self): # Get server details @@ -236,15 +206,9 @@ def get_server_poststate(self): server_data = response["data"] if "Hpe" in server_data["Oem"]: - return { - "ret": True, - "server_poststate": server_data["Oem"]["Hpe"]["PostState"] - } + return {"ret": True, "server_poststate": server_data["Oem"]["Hpe"]["PostState"]} else: - return { - "ret": True, - "server_poststate": server_data["Oem"]["Hp"]["PostState"] - } + return {"ret": True, "server_poststate": server_data["Oem"]["Hp"]["PostState"]} def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800): # This method checks if OOB controller reboot is completed @@ -270,19 +234,11 @@ def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1 break pcount = pcount + 1 if state["server_poststate"] in ["PowerOff", "Off"]: - return { - "ret": False, - "changed": False, - "msg": "Server is powered OFF" - } + return {"ret": False, "changed": False, "msg": "Server is powered OFF"} # When server is not rebooting if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: - return { - "ret": True, - "changed": False, - "msg": "Server is not rebooting" - } + return {"ret": True, "changed": False, "msg": "Server is not rebooting"} while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times: state = self.get_server_poststate() @@ -290,16 +246,8 @@ def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1 return state if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: - return { - "ret": True, - "changed": True, - "msg": "Server reboot is completed" - } + return {"ret": True, "changed": True, "msg": "Server reboot is completed"} time.sleep(polling_interval) times = times + 1 - return { - "ret": False, - "changed": False, - "msg": f"Server Reboot has failed, server state: {state} " - } + return {"ret": False, "changed": False, "msg": f"Server Reboot has failed, server state: {state} "} diff --git a/plugins/module_utils/influxdb.py b/plugins/module_utils/influxdb.py index 9eed90cfda7..6cd864782da 100644 --- a/plugins/module_utils/influxdb.py +++ b/plugins/module_utils/influxdb.py @@ -1,4 +1,3 @@ - # Copyright (c) 2017, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause @@ -14,6 +13,7 @@ REQUESTS_IMP_ERR = None try: import requests.exceptions # noqa: F401, pylint: disable=unused-import + HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -24,46 +24,47 @@ from influxdb import InfluxDBClient from influxdb import __version__ as influxdb_version from influxdb import exceptions # noqa: F401, pylint: disable=unused-import + HAS_INFLUXDB = True except ImportError: INFLUXDB_IMP_ERR = traceback.format_exc() HAS_INFLUXDB = False -class InfluxDb(): +class InfluxDb: def __init__(self, module): self.module = module self.params = self.module.params self.check_lib() - self.hostname = self.params['hostname'] - self.port = self.params['port'] - self.path = self.params['path'] - self.username = self.params['username'] - self.password = self.params['password'] - self.database_name = self.params.get('database_name') + self.hostname = self.params["hostname"] + self.port = self.params["port"] + self.path = self.params["path"] + self.username = self.params["username"] + self.password = self.params["password"] + self.database_name = self.params.get("database_name") def check_lib(self): if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) if not HAS_INFLUXDB: - self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("influxdb"), exception=INFLUXDB_IMP_ERR) @staticmethod def influxdb_argument_spec(): return dict( - hostname=dict(type='str', default='localhost'), - port=dict(type='int', default=8086), - path=dict(type='str', default=''), - username=dict(type='str', default='root', aliases=['login_username']), - password=dict(type='str', default='root', no_log=True, aliases=['login_password']), - ssl=dict(type='bool', default=False), - validate_certs=dict(type='bool', default=True), - timeout=dict(type='int'), - retries=dict(type='int', default=3), - proxies=dict(type='dict', default={}), - use_udp=dict(type='bool', default=False), - udp_port=dict(type='int', default=4444), + hostname=dict(type="str", default="localhost"), + port=dict(type="int", default=8086), + path=dict(type="str", default=""), + username=dict(type="str", default="root", aliases=["login_username"]), + password=dict(type="str", default="root", no_log=True, aliases=["login_password"]), + ssl=dict(type="bool", default=False), + validate_certs=dict(type="bool", default=True), + timeout=dict(type="int"), + retries=dict(type="int", default=3), + proxies=dict(type="dict", default={}), + use_udp=dict(type="bool", default=False), + udp_port=dict(type="int", default=4444), ) def connect_to_influxdb(self): @@ -73,19 +74,19 @@ def connect_to_influxdb(self): username=self.username, password=self.password, database=self.database_name, - ssl=self.params['ssl'], - verify_ssl=self.params['validate_certs'], - timeout=self.params['timeout'], - use_udp=self.params['use_udp'], - udp_port=self.params['udp_port'], - proxies=self.params['proxies'], + ssl=self.params["ssl"], + verify_ssl=self.params["validate_certs"], + timeout=self.params["timeout"], + use_udp=self.params["use_udp"], + udp_port=self.params["udp_port"], + proxies=self.params["proxies"], ) influxdb_api_version = LooseVersion(influxdb_version) - if influxdb_api_version >= LooseVersion('4.1.0'): + if influxdb_api_version >= LooseVersion("4.1.0"): # retries option is added in version 4.1.0 - args.update(retries=self.params['retries']) + args.update(retries=self.params["retries"]) - if influxdb_api_version >= LooseVersion('5.1.0'): + if influxdb_api_version >= LooseVersion("5.1.0"): # path argument is added in version 5.1.0 args.update(path=self.path) diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index 6141680c745..7a2fa6b2648 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -24,10 +24,10 @@ def _env_then_dns_fallback(*args, **kwargs): - ''' Load value from environment or DNS in that order''' + """Load value from environment or DNS in that order""" try: result = env_fallback(*args, **kwargs) - if result == '': + if result == "": raise AnsibleFallbackNotFound return result except AnsibleFallbackNotFound: @@ -35,7 +35,7 @@ def _env_then_dns_fallback(*args, **kwargs): # The ipa-ca entry is a standard entry that IPA will have set for # the CA. try: - return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0] + return socket.gethostbyaddr(socket.gethostbyname("ipa-ca"))[0] except Exception: raise AnsibleFallbackNotFound @@ -47,61 +47,67 @@ def __init__(self, module, host, port, protocol): self.protocol = protocol self.module = module self.headers = None - self.timeout = module.params.get('ipa_timeout') + self.timeout = module.params.get("ipa_timeout") self.use_gssapi = False def get_base_url(self): - return f'{self.protocol}://{self.host}/ipa' + return f"{self.protocol}://{self.host}/ipa" def get_json_url(self): - return f'{self.get_base_url()}/session/json' + return f"{self.get_base_url()}/session/json" def login(self, username, password): - if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: + if "KRB5CCNAME" in os.environ and HAS_GSSAPI: self.use_gssapi = True - elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: + elif "KRB5_CLIENT_KTNAME" in os.environ and HAS_GSSAPI: ccache = f"MEMORY:{uuid.uuid4()!s}" - os.environ['KRB5CCNAME'] = ccache + os.environ["KRB5CCNAME"] = ccache self.use_gssapi = True else: if not password: - if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ: + if "KRB5CCNAME" in os.environ or "KRB5_CLIENT_KTNAME" in os.environ: self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'") - self._fail('login', 'Password is required if not using ' - 'GSSAPI. To use GSSAPI, please set the ' - 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' - ' environment variables.') - url = f'{self.get_base_url()}/session/login_password' + self._fail( + "login", + "Password is required if not using " + "GSSAPI. To use GSSAPI, please set the " + "KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) " + " environment variables.", + ) + url = f"{self.get_base_url()}/session/login_password" data = f"user={quote(username, safe='')}&password={quote(password, safe='')}" - headers = {'referer': self.get_base_url(), - 'Content-Type': 'application/x-www-form-urlencoded', - 'Accept': 'text/plain'} + headers = { + "referer": self.get_base_url(), + "Content-Type": "application/x-www-form-urlencoded", + "Accept": "text/plain", + } try: - resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout) - status_code = info['status'] + resp, info = fetch_url( + module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout + ) + status_code = info["status"] if status_code not in [200, 201, 204]: - self._fail('login', info['msg']) + self._fail("login", info["msg"]) - self.headers = {'Cookie': info.get('set-cookie')} + self.headers = {"Cookie": info.get("set-cookie")} except Exception as e: - self._fail('login', to_native(e)) + self._fail("login", to_native(e)) if not self.headers: self.headers = dict() - self.headers.update({ - 'referer': self.get_base_url(), - 'Content-Type': 'application/json', - 'Accept': 'application/json'}) + self.headers.update( + {"referer": self.get_base_url(), "Content-Type": "application/json", "Accept": "application/json"} + ) def _fail(self, msg, e): - if 'message' in e: - err_string = e.get('message') + if "message" in e: + err_string = e.get("message") else: err_string = e - self.module.fail_json(msg=f'{msg}: {err_string}') + self.module.fail_json(msg=f"{msg}: {err_string}") def get_ipa_version(self): - response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*') + response = self.ping()["summary"] + ipa_ver_regex = re.compile(r"IPA server version (\d+\.\d+\.\d+).*") version_match = ipa_ver_regex.match(response) ipa_version = None if version_match: @@ -109,41 +115,47 @@ def get_ipa_version(self): return ipa_version def ping(self): - return self._post_json(method='ping', name=None) + return self._post_json(method="ping", name=None) def _post_json(self, method, name, item=None): if item is None: item = {} - url = f'{self.get_base_url()}/session/json' + url = f"{self.get_base_url()}/session/json" data = dict(method=method) # TODO: We should probably handle this a little better. - if method in ('ping', 'config_show', 'otpconfig_show'): - data['params'] = [[], {}] - elif method in ('config_mod', 'otpconfig_mod'): - data['params'] = [[], item] + if method in ("ping", "config_show", "otpconfig_show"): + data["params"] = [[], {}] + elif method in ("config_mod", "otpconfig_mod"): + data["params"] = [[], item] else: - data['params'] = [[name], item] + data["params"] = [[name], item] try: - resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), - headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi) - status_code = info['status'] + resp, info = fetch_url( + module=self.module, + url=url, + data=to_bytes(json.dumps(data)), + headers=self.headers, + timeout=self.timeout, + use_gssapi=self.use_gssapi, + ) + status_code = info["status"] if status_code not in [200, 201, 204]: - self._fail(method, info['msg']) + self._fail(method, info["msg"]) except Exception as e: - self._fail(f'post {method}', to_native(e)) + self._fail(f"post {method}", to_native(e)) - charset = resp.headers.get_content_charset('latin-1') + charset = resp.headers.get_content_charset("latin-1") resp = json.loads(to_text(resp.read(), encoding=charset)) - err = resp.get('error') + err = resp.get("error") if err is not None: - self._fail(f'response {method}', err) + self._fail(f"response {method}", err) - if 'result' in resp: - result = resp.get('result') - if 'result' in result: - result = result.get('result') + if "result" in resp: + result = resp.get("result") + if "result" in result: + result = result.get("result") if isinstance(result, list): if len(result) > 0: return result[0] @@ -195,11 +207,11 @@ def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, def ipa_argument_spec(): return dict( - ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])), - ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])), - ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])), - ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])), - ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])), - ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])), - validate_certs=dict(type='bool', default=True), + ipa_prot=dict(type="str", default="https", choices=["http", "https"], fallback=(env_fallback, ["IPA_PROT"])), + ipa_host=dict(type="str", default="ipa.example.com", fallback=(_env_then_dns_fallback, ["IPA_HOST"])), + ipa_port=dict(type="int", default=443, fallback=(env_fallback, ["IPA_PORT"])), + ipa_user=dict(type="str", default="admin", fallback=(env_fallback, ["IPA_USER"])), + ipa_pass=dict(type="str", no_log=True, fallback=(env_fallback, ["IPA_PASS"])), + ipa_timeout=dict(type="int", default=10, fallback=(env_fallback, ["IPA_TIMEOUT"])), + validate_certs=dict(type="bool", default=True), ) diff --git a/plugins/module_utils/jenkins.py b/plugins/module_utils/jenkins.py index 26334f89b8f..9c9f16c969c 100644 --- a/plugins/module_utils/jenkins.py +++ b/plugins/module_utils/jenkins.py @@ -1,4 +1,3 @@ - # Copyright (c) 2022, Alexei Znamensky # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -12,8 +11,8 @@ def download_updates_file(updates_expiration): - updates_filename = 'jenkins-plugin-cache.json' - updates_dir = os.path.expanduser('~/.ansible/tmp') + updates_filename = "jenkins-plugin-cache.json" + updates_dir = os.path.expanduser("~/.ansible/tmp") updates_file = os.path.join(updates_dir, updates_filename) download_updates = True diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index 15c630391ee..6f400b0166c 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -26,8 +26,7 @@ def is_ssh_url(url): - - """ check if url is ssh """ + """check if url is ssh""" if "@" in url and "://" not in url: return True @@ -38,12 +37,11 @@ def is_ssh_url(url): def get_fqdn_and_port(repo_url): - - """ chop the hostname and port out of a url """ + """chop the hostname and port out of a url""" fqdn = None port = None - ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?') + ipv6_re = re.compile(r"(\[[^]]*\])(?::([0-9]+))?") if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] @@ -76,9 +74,9 @@ def check_hostkey(module, fqdn): # this is a variant of code found in connection_plugins/paramiko.py and we should modify # the paramiko code to import and use this. -def not_in_host_file(self, host): - if 'USER' in os.environ: +def not_in_host_file(self, host): + if "USER" in os.environ: user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: user_host_file = "~/.ssh/known_hosts" @@ -111,10 +109,10 @@ def not_in_host_file(self, host): if tokens[0].find(HASHED_KEY_MAGIC) == 0: # this is a hashed known host entry try: - (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2) - hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC) :].split("|", 2) + hash = hmac.new(kn_salt.decode("base64"), digestmod=sha1) hash.update(host) - if hash.digest() == kn_host.decode('base64'): + if hash.digest() == kn_host.decode("base64"): return False except Exception: # invalid hashed host key, skip it @@ -128,12 +126,11 @@ def not_in_host_file(self, host): def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): + """use ssh-keyscan to add the hostkey""" - """ use ssh-keyscan to add the hostkey """ - - keyscan_cmd = module.get_bin_path('ssh-keyscan', True) + keyscan_cmd = module.get_bin_path("ssh-keyscan", True) - if 'USER' in os.environ: + if "USER" in os.environ: user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: @@ -144,7 +141,7 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, int('700', 8)) + os.makedirs(user_ssh_dir, int("700", 8)) except Exception: module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}") else: @@ -160,14 +157,14 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: - msg = 'failed to retrieve hostkey' + msg = "failed to retrieve hostkey" if not out: msg += f'. "{this_cmd}" returned no matches.' else: msg += f' using command "{this_cmd}". [stdout]: {out}' if err: - msg += f' [stderr]: {err}' + msg += f" [stderr]: {err}" module.fail_json(msg=msg) diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index cca92958d89..c67e645c8a2 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -1,4 +1,3 @@ - # Copyright (c) 2016, Peter Sagerson # Copyright (c) 2016, Jiri Tyr # Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) @@ -21,51 +20,53 @@ HAS_LDAP = True SASCL_CLASS = { - 'gssapi': ldap.sasl.gssapi, - 'external': ldap.sasl.external, + "gssapi": ldap.sasl.gssapi, + "external": ldap.sasl.external, } except ImportError: HAS_LDAP = False def gen_specs(**specs): - specs.update({ - 'bind_dn': dict(), - 'bind_pw': dict(default='', no_log=True), - 'ca_path': dict(type='path'), - 'dn': dict(required=True), - 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']), - 'server_uri': dict(default='ldapi:///'), - 'start_tls': dict(default=False, type='bool'), - 'validate_certs': dict(default=True, type='bool'), - 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'), - 'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'), - 'client_cert': dict(default=None, type='path'), - 'client_key': dict(default=None, type='path'), - }) + specs.update( + { + "bind_dn": dict(), + "bind_pw": dict(default="", no_log=True), + "ca_path": dict(type="path"), + "dn": dict(required=True), + "referrals_chasing": dict(type="str", default="anonymous", choices=["disabled", "anonymous"]), + "server_uri": dict(default="ldapi:///"), + "start_tls": dict(default=False, type="bool"), + "validate_certs": dict(default=True, type="bool"), + "sasl_class": dict(choices=["external", "gssapi"], default="external", type="str"), + "xorder_discovery": dict(choices=["enable", "auto", "disable"], default="auto", type="str"), + "client_cert": dict(default=None, type="path"), + "client_key": dict(default=None, type="path"), + } + ) return specs def ldap_required_together(): - return [['client_cert', 'client_key']] + return [["client_cert", "client_key"]] class LdapGeneric: def __init__(self, module): # Shortcuts self.module = module - self.bind_dn = self.module.params['bind_dn'] - self.bind_pw = self.module.params['bind_pw'] - self.ca_path = self.module.params['ca_path'] - self.referrals_chasing = self.module.params['referrals_chasing'] - self.server_uri = self.module.params['server_uri'] - self.start_tls = self.module.params['start_tls'] - self.verify_cert = self.module.params['validate_certs'] - self.sasl_class = self.module.params['sasl_class'] - self.xorder_discovery = self.module.params['xorder_discovery'] - self.client_cert = self.module.params['client_cert'] - self.client_key = self.module.params['client_key'] + self.bind_dn = self.module.params["bind_dn"] + self.bind_pw = self.module.params["bind_pw"] + self.ca_path = self.module.params["ca_path"] + self.referrals_chasing = self.module.params["referrals_chasing"] + self.server_uri = self.module.params["server_uri"] + self.start_tls = self.module.params["start_tls"] + self.verify_cert = self.module.params["validate_certs"] + self.sasl_class = self.module.params["sasl_class"] + self.xorder_discovery = self.module.params["xorder_discovery"] + self.client_cert = self.module.params["client_cert"] + self.client_key = self.module.params["client_key"] # Establish connection self.connection = self._connect_to_ldap() @@ -74,17 +75,13 @@ def __init__(self, module): # Try to find the X_ORDERed version of the DN self.dn = self._find_dn() else: - self.dn = self.module.params['dn'] + self.dn = self.module.params["dn"] def fail(self, msg, exn): - self.module.fail_json( - msg=msg, - details=to_native(exn), - exception=traceback.format_exc() - ) + self.module.fail_json(msg=msg, details=to_native(exn), exception=traceback.format_exc()) def _find_dn(self): - dn = self.module.params['dn'] + dn = self.module.params["dn"] explode_dn = ldap.dn.explode_dn(dn) @@ -92,8 +89,7 @@ def _find_dn(self): try: escaped_value = ldap.filter.escape_filter_chars(explode_dn[0]) filterstr = f"({escaped_value})" - dns = self.connection.search_s(','.join(explode_dn[1:]), - ldap.SCOPE_ONELEVEL, filterstr) + dns = self.connection.search_s(",".join(explode_dn[1:]), ldap.SCOPE_ONELEVEL, filterstr) if len(dns) == 1: dn, dummy = dns[0] except Exception: @@ -114,7 +110,7 @@ def _connect_to_ldap(self): connection = ldap.initialize(self.server_uri) - if self.referrals_chasing == 'disabled': + if self.referrals_chasing == "disabled": # Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067) connection.set_option(ldap.OPT_REFERRALS, 0) @@ -129,7 +125,7 @@ def _connect_to_ldap(self): connection.simple_bind_s(self.bind_dn, self.bind_pw) else: klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external) - connection.sasl_interactive_bind_s('', klass()) + connection.sasl_interactive_bind_s("", klass()) except ldap.LDAPError as e: self.fail("Cannot bind to the server.", e) @@ -138,6 +134,6 @@ def _connect_to_ldap(self): def _xorder_dn(self): # match X_ORDERed DNs regex = r".+\{\d+\}.+" - explode_dn = ldap.dn.explode_dn(self.module.params['dn']) + explode_dn = ldap.dn.explode_dn(self.module.params["dn"]) return re.match(regex, explode_dn[0]) is not None diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index 3700082bd87..b5279c85c18 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -16,4 +16,4 @@ def get_user_agent(module): """Retrieve a user-agent to send with LinodeClient requests.""" - return f'Ansible-{module}/{ansible_version}' + return f"Ansible-{module}/{ansible_version}" diff --git a/plugins/module_utils/locale_gen.py b/plugins/module_utils/locale_gen.py index b8a48d320b1..22661ae2619 100644 --- a/plugins/module_utils/locale_gen.py +++ b/plugins/module_utils/locale_gen.py @@ -22,7 +22,7 @@ def locale_gen_runner(module): command="locale-gen", arg_formats=dict( name=cmd_runner_fmt.as_list(), - purge=cmd_runner_fmt.as_fixed('--purge'), + purge=cmd_runner_fmt.as_fixed("--purge"), ), check_rc=True, ) diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index 364c4f85d24..cc3836b3e4b 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -1,4 +1,3 @@ - # Copyright (c) 2016, Hiroaki Nakamura # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause @@ -23,7 +22,7 @@ class UnixHTTPConnection(HTTPConnection): def __init__(self, path): - HTTPConnection.__init__(self, 'localhost') + HTTPConnection.__init__(self, "localhost") self.path = path def connect(self): @@ -39,7 +38,9 @@ def __init__(self, msg, **kwargs): class LXDClient: - def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True): + def __init__( + self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True + ): """LXD Client. :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) @@ -58,7 +59,7 @@ def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_ self.url = url self.debug = debug self.logs = [] - if url.startswith('https:'): + if url.startswith("https:"): self.cert_file = cert_file self.key_file = key_file parts = generic_urlparse(urlparse(self.url)) @@ -68,28 +69,28 @@ def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_ ctx.load_verify_locations(cafile=server_cert_file) ctx.check_hostname = server_check_hostname ctx.load_cert_chain(cert_file, keyfile=key_file) - self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) - elif url.startswith('unix:'): - unix_socket_path = url[len('unix:'):] + self.connection = HTTPSConnection(parts.get("netloc"), context=ctx) + elif url.startswith("unix:"): + unix_socket_path = url[len("unix:") :] self.connection = UnixHTTPConnection(unix_socket_path) else: - raise LXDClientException('URL scheme must be unix: or https:') + raise LXDClientException("URL scheme must be unix: or https:") def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None): resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) - if resp_json['type'] == 'async': + if resp_json["type"] == "async": url = f"{resp_json['operation']}/wait" - resp_json = self._send_request('GET', url) + resp_json = self._send_request("GET", url) if wait_for_container: - while resp_json['metadata']['status'] == 'Running': - resp_json = self._send_request('GET', url) - if resp_json['metadata']['status'] != 'Success': + while resp_json["metadata"]["status"] == "Running": + resp_json = self._send_request("GET", url) + if resp_json["metadata"]["status"] != "Success": self._raise_err_from_json(resp_json) return resp_json def authenticate(self, trust_password): - body_json = {'type': 'client', 'password': trust_password} - return self._send_request('POST', '/1.0/certificates', body_json=body_json) + body_json = {"type": "client", "password": trust_password} + return self._send_request("POST", "/1.0/certificates", body_json=body_json) def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None): try: @@ -97,44 +98,46 @@ def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeou self.connection.request(method, url, body=body) resp = self.connection.getresponse() resp_data = resp.read() - resp_data = to_text(resp_data, errors='surrogate_or_strict') + resp_data = to_text(resp_data, errors="surrogate_or_strict") resp_json = json.loads(resp_data) - self.logs.append({ - 'type': 'sent request', - 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout}, - 'response': {'json': resp_json} - }) - resp_type = resp_json.get('type', None) - if resp_type == 'error': - if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes: + self.logs.append( + { + "type": "sent request", + "request": {"method": method, "url": url, "json": body_json, "timeout": timeout}, + "response": {"json": resp_json}, + } + ) + resp_type = resp_json.get("type", None) + if resp_type == "error": + if ok_error_codes is not None and resp_json["error_code"] in ok_error_codes: return resp_json - if resp_json['error'] == "Certificate already in trust store": + if resp_json["error"] == "Certificate already in trust store": return resp_json self._raise_err_from_json(resp_json) return resp_json except socket.error as e: - raise LXDClientException('cannot connect to the LXD server', err=e) + raise LXDClientException("cannot connect to the LXD server", err=e) def _raise_err_from_json(self, resp_json): err_params = {} if self.debug: - err_params['logs'] = self.logs + err_params["logs"] = self.logs raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params) @staticmethod def _get_err_from_resp_json(resp_json): err = None - metadata = resp_json.get('metadata', None) + metadata = resp_json.get("metadata", None) if metadata is not None: - err = metadata.get('err', None) + err = metadata.get("err", None) if err is None: - err = resp_json.get('error', None) + err = resp_json.get("error", None) return err def default_key_file(): - return os.path.expanduser('~/.config/lxc/client.key') + return os.path.expanduser("~/.config/lxc/client.key") def default_cert_file(): - return os.path.expanduser('~/.config/lxc/client.crt') + return os.path.expanduser("~/.config/lxc/client.crt") diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index 56b031aad76..707dcf06012 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -21,6 +21,7 @@ CLIENT_IMP_ERR = None try: from manageiq_client.api import ManageIQClient + HAS_CLIENT = True except ImportError: CLIENT_IMP_ERR = traceback.format_exc() @@ -29,54 +30,61 @@ def manageiq_argument_spec(): options = dict( - url=dict(default=os.environ.get('MIQ_URL', None)), - username=dict(default=os.environ.get('MIQ_USERNAME', None)), - password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True), - token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True), - validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), - ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']), + url=dict(default=os.environ.get("MIQ_URL", None)), + username=dict(default=os.environ.get("MIQ_USERNAME", None)), + password=dict(default=os.environ.get("MIQ_PASSWORD", None), no_log=True), + token=dict(default=os.environ.get("MIQ_TOKEN", None), no_log=True), + validate_certs=dict(default=True, type="bool", aliases=["verify_ssl"]), + ca_cert=dict(required=False, default=None, aliases=["ca_bundle_path"]), ) return dict( - manageiq_connection=dict(type='dict', - apply_defaults=True, - options=options), + manageiq_connection=dict(type="dict", apply_defaults=True, options=options), ) def check_client(module): if not HAS_CLIENT: - module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR) + module.fail_json(msg=missing_required_lib("manageiq-client"), exception=CLIENT_IMP_ERR) def validate_connection_params(module): - params = module.params['manageiq_connection'] + params = module.params["manageiq_connection"] error_str = "missing required argument: manageiq_connection[{}]" - url = params['url'] - token = params['token'] - username = params['username'] - password = params['password'] + url = params["url"] + token = params["token"] + username = params["username"] + password = params["password"] if (url and username and password) or (url and token): return params - for arg in ['url', 'username', 'password']: - if params[arg] in (None, ''): + for arg in ["url", "username", "password"]: + if params[arg] in (None, ""): module.fail_json(msg=error_str.format(arg)) def manageiq_entities(): return { - 'provider': 'providers', 'host': 'hosts', 'vm': 'vms', - 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores', - 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services', - 'service template': 'service_templates', 'template': 'templates', - 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints' + "provider": "providers", + "host": "hosts", + "vm": "vms", + "category": "categories", + "cluster": "clusters", + "data store": "data_stores", + "group": "groups", + "resource pool": "resource_pools", + "service": "services", + "service template": "service_templates", + "template": "templates", + "tenant": "tenants", + "user": "users", + "blueprint": "blueprints", } class ManageIQ: """ - class encapsulating ManageIQ API client. + class encapsulating ManageIQ API client. """ def __init__(self, module): @@ -85,24 +93,26 @@ def __init__(self, module): params = validate_connection_params(module) - url = params['url'] - username = params['username'] - password = params['password'] - token = params['token'] - verify_ssl = params['validate_certs'] - ca_bundle_path = params['ca_cert'] + url = params["url"] + username = params["username"] + password = params["password"] + token = params["token"] + verify_ssl = params["validate_certs"] + ca_bundle_path = params["ca_cert"] self._module = module self._api_url = f"{url}/api" self._auth = dict(user=username, password=password, token=token) try: - self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) + self._client = ManageIQClient( + self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path + ) except Exception as e: self.module.fail_json(msg=f"failed to open connection ({url}): {e}") @property def module(self): - """ Ansible module module + """Ansible module module Returns: the ansible module @@ -111,7 +121,7 @@ def module(self): @property def api_url(self): - """ Base ManageIQ API + """Base ManageIQ API Returns: the base ManageIQ API @@ -120,7 +130,7 @@ def api_url(self): @property def client(self): - """ ManageIQ client + """ManageIQ client Returns: the ManageIQ client @@ -128,7 +138,7 @@ def client(self): return self._client def find_collection_resource_by(self, collection_name, **params): - """ Searches the collection resource by the collection name and the param passed. + """Searches the collection resource by the collection name and the param passed. Returns: the resource as an object if it exists in manageiq, None otherwise. @@ -142,7 +152,7 @@ def find_collection_resource_by(self, collection_name, **params): return vars(entity) def find_collection_resource_or_fail(self, collection_name, **params): - """ Searches the collection resource by the collection name and the param passed. + """Searches the collection resource by the collection name and the param passed. Returns: the resource as an object if it exists in manageiq, Fail otherwise. @@ -159,12 +169,12 @@ def policies(self, resource_id, resource_type, resource_name): # query resource id, fail if resource does not exist if resource_id is None: - resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] + resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)["id"] return ManageIQPolicies(manageiq, resource_type, resource_id) def query_resource_id(self, resource_type, resource_name): - """ Query the resource name in ManageIQ. + """Query the resource name in ManageIQ. Returns: the resource ID if it exists in ManageIQ, Fail otherwise. @@ -179,7 +189,7 @@ def query_resource_id(self, resource_type, resource_name): class ManageIQPolicies: """ - Object to execute policies management operations of manageiq resources. + Object to execute policies management operations of manageiq resources. """ def __init__(self, manageiq, resource_type, resource_id): @@ -191,29 +201,27 @@ def __init__(self, manageiq, resource_type, resource_id): self.resource_type = resource_type self.resource_id = resource_id - self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + self.resource_url = f"{self.api_url}/{resource_type}/{resource_id}" def query_profile_href(self, profile): - """ Add or Update the policy_profile href field + """Add or Update the policy_profile href field Example: {name: STR, ...} => {name: STR, href: STR} """ - resource = self.manageiq.find_collection_resource_or_fail( - "policy_profiles", **profile) - return dict(name=profile['name'], href=resource['href']) + resource = self.manageiq.find_collection_resource_or_fail("policy_profiles", **profile) + return dict(name=profile["name"], href=resource["href"]) def query_resource_profiles(self): - """ Returns a set of the profile objects objects assigned to the resource - """ - url = '{resource_url}/policy_profiles?expand=resources' + """Returns a set of the profile objects objects assigned to the resource""" + url = "{resource_url}/policy_profiles?expand=resources" try: response = self.client.get(url.format(resource_url=self.resource_url)) except Exception as e: msg = f"Failed to query {self.resource_type} policies: {e}" self.module.fail_json(msg=msg) - resources = response.get('resources', []) + resources = response.get("resources", []) # clean the returned rest api profile object to look like: # {profile_name: STR, profile_description: STR, policies: ARR} @@ -222,16 +230,15 @@ def query_resource_profiles(self): return profiles def query_profile_policies(self, profile_id): - """ Returns a set of the policy objects assigned to the resource - """ - url = '{api_url}/policy_profiles/{profile_id}?expand=policies' + """Returns a set of the policy objects assigned to the resource""" + url = "{api_url}/policy_profiles/{profile_id}?expand=policies" try: response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) except Exception as e: msg = f"Failed to query {self.resource_type} policies: {e}" self.module.fail_json(msg=msg) - resources = response.get('policies', []) + resources = response.get("policies", []) # clean the returned rest api policy object to look like: # {name: STR, description: STR, active: BOOL} @@ -240,42 +247,36 @@ def query_profile_policies(self, profile_id): return policies def clean_policy_object(self, policy): - """ Clean a policy object to have human readable form of: + """Clean a policy object to have human readable form of: { name: STR, description: STR, active: BOOL } """ - name = policy.get('name') - description = policy.get('description') - active = policy.get('active') + name = policy.get("name") + description = policy.get("description") + active = policy.get("active") - return dict( - name=name, - description=description, - active=active) + return dict(name=name, description=description, active=active) def clean_profile_object(self, profile): - """ Clean a profile object to have human readable form of: + """Clean a profile object to have human readable form of: { profile_name: STR, profile_description: STR, policies: ARR } """ - profile_id = profile['id'] - name = profile.get('name') - description = profile.get('description') + profile_id = profile["id"] + name = profile.get("name") + description = profile.get("description") policies = self.query_profile_policies(profile_id) - return dict( - profile_name=name, - profile_description=description, - policies=policies) + return dict(profile_name=name, profile_description=description, policies=policies) def profiles_to_update(self, profiles, action): - """ Create a list of policies we need to update in ManageIQ. + """Create a list of policies we need to update in ManageIQ. Returns: Whether or not a change took place and a message describing the @@ -286,12 +287,12 @@ def profiles_to_update(self, profiles, action): # make a list of assigned full profile names strings # e.g. ['openscap profile', ...] - assigned_profiles_set = set(profile['profile_name'] for profile in assigned_profiles) + assigned_profiles_set = set(profile["profile_name"] for profile in assigned_profiles) for profile in profiles: - assigned = profile.get('name') in assigned_profiles_set + assigned = profile.get("name") in assigned_profiles_set - if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): + if (action == "unassign" and assigned) or (action == "assign" and not assigned): # add/update the policy profile href field # {name: STR, ...} => {name: STR, href: STR} profile = self.query_profile_href(profile) @@ -300,17 +301,14 @@ def profiles_to_update(self, profiles, action): return profiles_to_post def assign_or_unassign_profiles(self, profiles, action): - """ Perform assign/unassign action - """ + """Perform assign/unassign action""" # get a list of profiles needed to be changed profiles_to_post = self.profiles_to_update(profiles, action) if not profiles_to_post: - return dict( - changed=False, - msg=f"Profiles {profiles} already {action}ed, nothing to do") + return dict(changed=False, msg=f"Profiles {profiles} already {action}ed, nothing to do") # try to assign or unassign profiles to resource - url = f'{self.resource_url}/policy_profiles' + url = f"{self.resource_url}/policy_profiles" try: response = self.client.post(url, action=action, resources=profiles_to_post) except Exception as e: @@ -318,20 +316,18 @@ def assign_or_unassign_profiles(self, profiles, action): self.module.fail_json(msg=msg) # check all entities in result to be successful - for result in response['results']: - if not result['success']: + for result in response["results"]: + if not result["success"]: msg = f"Failed to {action}: {result['message']}" self.module.fail_json(msg=msg) # successfully changed all needed profiles - return dict( - changed=True, - msg=f"Successfully {action}ed profiles: {profiles}") + return dict(changed=True, msg=f"Successfully {action}ed profiles: {profiles}") class ManageIQTags: """ - Object to execute tags management operations of manageiq resources. + Object to execute tags management operations of manageiq resources. """ def __init__(self, manageiq, resource_type, resource_id): @@ -343,15 +339,14 @@ def __init__(self, manageiq, resource_type, resource_id): self.resource_type = resource_type self.resource_id = resource_id - self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + self.resource_url = f"{self.api_url}/{resource_type}/{resource_id}" def full_tag_name(self, tag): - """ Returns the full tag name in manageiq - """ + """Returns the full tag name in manageiq""" return f"/managed/{tag['category']}/{tag['name']}" def clean_tag_object(self, tag): - """ Clean a tag object to have human readable form of: + """Clean a tag object to have human readable form of: { full_name: STR, name: STR, @@ -359,26 +354,26 @@ def clean_tag_object(self, tag): category: STR } """ - full_name = tag.get('name') - categorization = tag.get('categorization', {}) + full_name = tag.get("name") + categorization = tag.get("categorization", {}) return dict( full_name=full_name, - name=categorization.get('name'), - display_name=categorization.get('display_name'), - category=categorization.get('category', {}).get('name')) + name=categorization.get("name"), + display_name=categorization.get("display_name"), + category=categorization.get("category", {}).get("name"), + ) def query_resource_tags(self): - """ Returns a set of the tag objects assigned to the resource - """ - url = '{resource_url}/tags?expand=resources&attributes=categorization' + """Returns a set of the tag objects assigned to the resource""" + url = "{resource_url}/tags?expand=resources&attributes=categorization" try: response = self.client.get(url.format(resource_url=self.resource_url)) except Exception as e: msg = f"Failed to query {self.resource_type} tags: {e}" self.module.fail_json(msg=msg) - resources = response.get('resources', []) + resources = response.get("resources", []) # clean the returned rest api tag object to look like: # {full_name: STR, name: STR, display_name: STR, category: STR} @@ -387,7 +382,7 @@ def query_resource_tags(self): return tags def tags_to_update(self, tags, action): - """ Create a list of tags we need to update in ManageIQ. + """Create a list of tags we need to update in ManageIQ. Returns: Whether or not a change took place and a message describing the @@ -398,30 +393,27 @@ def tags_to_update(self, tags, action): # make a list of assigned full tag names strings # e.g. ['/managed/environment/prod', ...] - assigned_tags_set = set(tag['full_name'] for tag in assigned_tags) + assigned_tags_set = set(tag["full_name"] for tag in assigned_tags) for tag in tags: assigned = self.full_tag_name(tag) in assigned_tags_set - if assigned and action == 'unassign': + if assigned and action == "unassign": tags_to_post.append(tag) - elif (not assigned) and action == 'assign': + elif (not assigned) and action == "assign": tags_to_post.append(tag) return tags_to_post def assign_or_unassign_tags(self, tags, action): - """ Perform assign/unassign action - """ + """Perform assign/unassign action""" # get a list of tags needed to be changed tags_to_post = self.tags_to_update(tags, action) if not tags_to_post: - return dict( - changed=False, - msg=f"Tags already {action}ed, nothing to do") + return dict(changed=False, msg=f"Tags already {action}ed, nothing to do") # try to assign or unassign tags to resource - url = f'{self.resource_url}/tags' + url = f"{self.resource_url}/tags" try: response = self.client.post(url, action=action, resources=tags) except Exception as e: @@ -429,12 +421,10 @@ def assign_or_unassign_tags(self, tags, action): self.module.fail_json(msg=msg) # check all entities in result to be successful - for result in response['results']: - if not result['success']: + for result in response["results"]: + if not result["success"]: msg = f"Failed to {action}: {result['message']}" self.module.fail_json(msg=msg) # successfully changed all needed tags - return dict( - changed=True, - msg=f"Successfully {action}ed tags") + return dict(changed=True, msg=f"Successfully {action}ed tags") diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 0156f5b8fec..6d1038b2968 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -18,9 +18,9 @@ class Response: - ''' + """ Create a response object to mimic that of requests. - ''' + """ def __init__(self): self.content = None @@ -32,12 +32,12 @@ def json(self): def memset_api_call(api_key, api_method, payload=None): - ''' + """ Generic function which returns results back to calling function. Requires an API key and an API method to assemble the API URL. Returns response text to be analysed. - ''' + """ # instantiate a response object response = Response() @@ -53,13 +53,13 @@ def memset_api_call(api_key, api_method, payload=None): msg = None data = urlencode(payload) - headers = {'Content-Type': 'application/x-www-form-urlencoded'} - api_uri_base = 'https://api.memset.com/v1/json/' - api_uri = f'{api_uri_base}{api_method}/' + headers = {"Content-Type": "application/x-www-form-urlencoded"} + api_uri_base = "https://api.memset.com/v1/json/" + api_uri = f"{api_uri_base}{api_method}/" try: resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key) - response.content = resp.read().decode('utf-8') + response.content = resp.read().decode("utf-8") response.status_code = resp.getcode() except urllib_error.HTTPError as e: try: @@ -68,7 +68,7 @@ def memset_api_call(api_key, api_method, payload=None): errorcode = None has_failed = True - response.content = e.read().decode('utf8') + response.content = e.read().decode("utf8") response.status_code = errorcode if response.status_code is not None: @@ -87,29 +87,29 @@ def memset_api_call(api_key, api_method, payload=None): def check_zone_domain(data, domain): - ''' + """ Returns true if domain already exists, and false if not. - ''' + """ exists = False if data.status_code in [201, 200]: for zone_domain in data.json(): - if zone_domain['domain'] == domain: + if zone_domain["domain"] == domain: exists = True return exists def check_zone(data, name): - ''' + """ Returns true if zone already exists, and false if not. - ''' + """ counter = 0 exists = False if data.status_code in [201, 200]: for zone in data.json(): - if zone['nickname'] == name: + if zone["nickname"] == name: counter += 1 if counter == 1: exists = True @@ -118,26 +118,26 @@ def check_zone(data, name): def get_zone_id(zone_name, current_zones): - ''' + """ Returns the zone's id if it exists and is unique - ''' + """ zone_exists = False zone_id, msg = None, None zone_list = [] for zone in current_zones: - if zone['nickname'] == zone_name: - zone_list.append(zone['id']) + if zone["nickname"] == zone_name: + zone_list.append(zone["id"]) counter = len(zone_list) if counter == 0: - msg = 'No matching zone found' + msg = "No matching zone found" elif counter == 1: zone_id = zone_list[0] zone_exists = True elif counter > 1: zone_id = None - msg = 'Zone ID could not be returned as duplicate zone names were detected' + msg = "Zone ID could not be returned as duplicate zone names were detected" return zone_exists, msg, counter, zone_id diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index 85682b9f62c..871753f0d35 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -16,7 +16,11 @@ class ModuleHelperBase: module: dict[str, t.Any] | None = None # TODO: better spec using t.TypedDict ModuleHelperException = _MHE _delegated_to_module: tuple[str, ...] = ( - 'check_mode', 'get_bin_path', 'warn', 'deprecate', 'debug', + "check_mode", + "get_bin_path", + "warn", + "deprecate", + "debug", ) def __init__(self, module=None): @@ -80,8 +84,8 @@ def run(self): self.__run__() self.__quit_module__() output = self.output - if 'failed' not in output: - output['failed'] = False + if "failed" not in output: + output["failed"] = False self.module.exit_json(changed=self.has_changed(), **output) @classmethod diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 0be576ccfa9..e05492b66a9 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -33,7 +33,7 @@ def wrapper(self, *args, **kwargs): def module_fails_on_exception(func): - conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + conflict_list = ("msg", "exception", "output", "vars", "changed") @wraps(func) def wrapper(self, *args, **kwargs): @@ -51,14 +51,17 @@ def fix_var_conflicts(output): self.update_output(e.update_output) # patchy solution to resolve conflict with output variables output = fix_var_conflicts(self.output) - self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **output) + self.module.fail_json( + msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars.output(), **output + ) except Exception as e: # patchy solution to resolve conflict with output variables output = fix_var_conflicts(self.output) msg = f"Module failed with exception: {str(e).strip()}" - self.module.fail_json(msg=msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **output) + self.module.fail_json( + msg=msg, exception=traceback.format_exc(), output=self.output, vars=self.vars.output(), **output + ) + return wrapper @@ -72,22 +75,25 @@ def wrapper(self, *args, **kwargs): def check_mode_skip_returns(callable=None, value=None): - def deco(func): if callable is not None: + @wraps(func) def wrapper_callable(self, *args, **kwargs): if self.module.check_mode: return callable(self, *args, **kwargs) return func(self, *args, **kwargs) + return wrapper_callable else: + @wraps(func) def wrapper_value(self, *args, **kwargs): if self.module.check_mode: return value return func(self, *args, **kwargs) + return wrapper_value return deco diff --git a/plugins/module_utils/mh/mixins/deprecate_attrs.py b/plugins/module_utils/mh/mixins/deprecate_attrs.py index 539af923837..c2991a66f1b 100644 --- a/plugins/module_utils/mh/mixins/deprecate_attrs.py +++ b/plugins/module_utils/mh/mixins/deprecate_attrs.py @@ -10,7 +10,6 @@ class DeprecateAttrsMixin: - def _deprecate_setup(self, attr, target, module): if target is None: target = self @@ -22,7 +21,9 @@ def _deprecate_setup(self, attr, target, module): elif hasattr(target, "module") and isinstance(target.module, AnsibleModule): module = target.module else: - raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.") + raise ValueError( + "Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly." + ) # setup internal state dicts value_attr = "__deprecated_attr_value" @@ -35,7 +36,9 @@ def _deprecate_setup(self, attr, target, module): trigger_dict = getattr(target, trigger_attr) return target, module, value_dict, trigger_dict - def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None): + def _deprecate_attr( + self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None + ): target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module) value_dict[attr] = getattr(target, attr, value) diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py index 50cce4743f4..7382def5b3e 100644 --- a/plugins/module_utils/mh/mixins/state.py +++ b/plugins/module_utils/mh/mixins/state.py @@ -7,7 +7,7 @@ class StateMixin: - state_param: str = 'state' + state_param: str = "state" default_state: str | None = None def _state(self): @@ -23,7 +23,7 @@ def __run__(self): # resolve aliases if state not in self.module.params: - aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])] + aliased = [name for name, param in self.module.argument_spec.items() if state in param.get("aliases", [])] if aliased: state = aliased[0] self.vars.effective_state = state diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index 684e46924ff..9047de262d5 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -31,7 +31,8 @@ def __init__(self, module=None): self.vars = VarDict() for name, value in self.module.params.items(): self.vars.set( - name, value, + name, + value, diff=name in self.diff_params, output=name in self.output_params, change=None if not self.change_params else name in self.change_params, @@ -62,11 +63,11 @@ def output(self): if self.facts_name: facts = self.vars.facts() if facts is not None: - result['ansible_facts'] = {self.facts_name: facts} + result["ansible_facts"] = {self.facts_name: facts} if self.diff_mode: - diff = result.get('diff', {}) + diff = result.get("diff", {}) vars_diff = self.vars.diff() or {} - result['diff'] = dict_merge(dict(diff), vars_diff) + result["diff"] = dict_merge(dict(diff), vars_diff) return result diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index f5c6275741a..9c067ff2216 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -8,9 +8,13 @@ # pylint: disable=unused-import from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, + ModuleHelper, + StateModuleHelper, ) from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 from ansible_collections.community.general.plugins.module_utils.mh.deco import ( - cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, + cause_changes, + module_fails_on_exception, + check_mode_skip, + check_mode_skip_returns, ) diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py index 7d6bd7fe86b..13847869bd9 100644 --- a/plugins/module_utils/net_tools/pritunl/api.py +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -54,9 +54,7 @@ def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=T ) -def _delete_pritunl_organization( - api_token, api_secret, base_url, organization_id, validate_certs=True -): +def _delete_pritunl_organization(api_token, api_secret, base_url, organization_id, validate_certs=True): return pritunl_auth_request( base_url=base_url, api_token=api_token, @@ -67,9 +65,7 @@ def _delete_pritunl_organization( ) -def _post_pritunl_organization( - api_token, api_secret, base_url, organization_data, validate_certs=True -): +def _post_pritunl_organization(api_token, api_secret, base_url, organization_data, validate_certs=True): return pritunl_auth_request( api_token=api_token, api_secret=api_secret, @@ -82,9 +78,7 @@ def _post_pritunl_organization( ) -def _get_pritunl_users( - api_token, api_secret, base_url, organization_id, validate_certs=True -): +def _get_pritunl_users(api_token, api_secret, base_url, organization_id, validate_certs=True): return pritunl_auth_request( api_token=api_token, api_secret=api_secret, @@ -95,9 +89,7 @@ def _get_pritunl_users( ) -def _delete_pritunl_user( - api_token, api_secret, base_url, organization_id, user_id, validate_certs=True -): +def _delete_pritunl_user(api_token, api_secret, base_url, organization_id, user_id, validate_certs=True): return pritunl_auth_request( api_token=api_token, api_secret=api_secret, @@ -108,9 +100,7 @@ def _delete_pritunl_user( ) -def _post_pritunl_user( - api_token, api_secret, base_url, organization_id, user_data, validate_certs=True -): +def _post_pritunl_user(api_token, api_secret, base_url, organization_id, user_data, validate_certs=True): return pritunl_auth_request( api_token=api_token, api_secret=api_secret, @@ -144,9 +134,7 @@ def _put_pritunl_user( ) -def list_pritunl_organizations( - api_token, api_secret, base_url, validate_certs=True, filters=None -): +def list_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True, filters=None): orgs = [] response = _get_pritunl_organizations( @@ -164,18 +152,13 @@ def list_pritunl_organizations( if filters is None: orgs.append(org) else: - if not any( - filter_val != org[filter_key] - for filter_key, filter_val in filters.items() - ): + if not any(filter_val != org[filter_key] for filter_key, filter_val in filters.items()): orgs.append(org) return orgs -def list_pritunl_users( - api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None -): +def list_pritunl_users(api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None): users = [] response = _get_pritunl_users( @@ -195,10 +178,7 @@ def list_pritunl_users( users.append(user) else: - if not any( - filter_val != user[filter_key] - for filter_key, filter_val in filters.items() - ): + if not any(filter_val != user[filter_key] for filter_key, filter_val in filters.items()): users.append(user) return users @@ -220,9 +200,7 @@ def post_pritunl_organization( ) if response.getcode() != 200: - raise PritunlException( - f"Could not add organization {organization_name} to Pritunl" - ) + raise PritunlException(f"Could not add organization {organization_name} to Pritunl") # The user PUT request returns the updated user object return json.loads(response.read()) @@ -248,9 +226,7 @@ def post_pritunl_user( ) if response.getcode() != 200: - raise PritunlException( - f"Could not remove user {user_id} from organization {organization_id} from Pritunl" - ) + raise PritunlException(f"Could not remove user {user_id} from organization {organization_id} from Pritunl") # user POST request returns an array of a single item, # so return this item instead of the list return json.loads(response.read())[0] @@ -266,16 +242,12 @@ def post_pritunl_user( ) if response.getcode() != 200: - raise PritunlException( - f"Could not update user {user_id} from organization {organization_id} from Pritunl" - ) + raise PritunlException(f"Could not update user {user_id} from organization {organization_id} from Pritunl") # The user PUT request returns the updated user object return json.loads(response.read()) -def delete_pritunl_organization( - api_token, api_secret, base_url, organization_id, validate_certs=True -): +def delete_pritunl_organization(api_token, api_secret, base_url, organization_id, validate_certs=True): response = _delete_pritunl_organization( api_token=api_token, api_secret=api_secret, @@ -285,16 +257,12 @@ def delete_pritunl_organization( ) if response.getcode() != 200: - raise PritunlException( - f"Could not remove organization {organization_id} from Pritunl" - ) + raise PritunlException(f"Could not remove organization {organization_id} from Pritunl") return json.loads(response.read()) -def delete_pritunl_user( - api_token, api_secret, base_url, organization_id, user_id, validate_certs=True -): +def delete_pritunl_user(api_token, api_secret, base_url, organization_id, user_id, validate_certs=True): response = _delete_pritunl_user( api_token=api_token, api_secret=api_secret, @@ -305,9 +273,7 @@ def delete_pritunl_user( ) if response.getcode() != 200: - raise PritunlException( - f"Could not remove user {user_id} from organization {organization_id} from Pritunl" - ) + raise PritunlException(f"Could not remove user {user_id} from organization {organization_id} from Pritunl") return json.loads(response.read()) @@ -332,9 +298,7 @@ def pritunl_auth_request( auth_string = f"{api_token}&{auth_timestamp}&{auth_nonce}&{method.upper()}&{path}" auth_signature = base64.b64encode( - hmac.new( - api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256 - ).digest() + hmac.new(api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256).digest() ) auth_headers = { diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py index f316a3e4c47..abbd066b620 100644 --- a/plugins/module_utils/ocapi_utils.py +++ b/plugins/module_utils/ocapi_utils.py @@ -15,16 +15,15 @@ from ansible.module_utils.common.text.converters import to_native -GET_HEADERS = {'accept': 'application/json'} -PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} -POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} -DELETE_HEADERS = {'accept': 'application/json'} +GET_HEADERS = {"accept": "application/json"} +PUT_HEADERS = {"content-type": "application/json", "accept": "application/json"} +POST_HEADERS = {"content-type": "application/json", "accept": "application/json"} +DELETE_HEADERS = {"accept": "application/json"} HEALTH_OK = 5 class OcapiUtils: - def __init__(self, creds, base_uri, proxy_slot_number, timeout, module): self.root_uri = base_uri self.proxy_slot_number = proxy_slot_number @@ -38,8 +37,8 @@ def _auth_params(self): :return: tuple of username, password """ - username = self.creds['user'] - password = self.creds['pswd'] + username = self.creds["user"] + password = self.creds["pswd"] force_basic_auth = True return username, password, force_basic_auth @@ -47,77 +46,89 @@ def get_request(self, uri): req_headers = dict(GET_HEADERS) username, password, basic_auth = self._auth_params() try: - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp = open_url( + uri, + method="GET", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + validate_certs=False, + follow_redirects="all", + use_proxy=True, + timeout=self.timeout, + ) data = json.loads(to_native(resp.read())) headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - return {'ret': False, - 'msg': f"HTTP Error {e.code} on GET request to '{uri}'", - 'status': e.code} + return {"ret": False, "msg": f"HTTP Error {e.code} on GET request to '{uri}'", "status": e.code} except URLError as e: - return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"} + return {"ret": False, "msg": f"URL Error on GET request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: - return {'ret': False, - 'msg': f"Failed GET request to '{uri}': '{e}'"} - return {'ret': True, 'data': data, 'headers': headers} + return {"ret": False, "msg": f"Failed GET request to '{uri}': '{e}'"} + return {"ret": True, "data": data, "headers": headers} def delete_request(self, uri, etag=None): req_headers = dict(DELETE_HEADERS) if etag is not None: - req_headers['If-Match'] = etag + req_headers["If-Match"] = etag username, password, basic_auth = self._auth_params() try: - resp = open_url(uri, method="DELETE", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp = open_url( + uri, + method="DELETE", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + validate_certs=False, + follow_redirects="all", + use_proxy=True, + timeout=self.timeout, + ) if resp.status != 204: data = json.loads(to_native(resp.read())) else: data = "" headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - return {'ret': False, - 'msg': f"HTTP Error {e.code} on DELETE request to '{uri}'", - 'status': e.code} + return {"ret": False, "msg": f"HTTP Error {e.code} on DELETE request to '{uri}'", "status": e.code} except URLError as e: - return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"} + return {"ret": False, "msg": f"URL Error on DELETE request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: - return {'ret': False, - 'msg': f"Failed DELETE request to '{uri}': '{e}'"} - return {'ret': True, 'data': data, 'headers': headers} + return {"ret": False, "msg": f"Failed DELETE request to '{uri}': '{e}'"} + return {"ret": True, "data": data, "headers": headers} def put_request(self, uri, payload, etag=None): req_headers = dict(PUT_HEADERS) if etag is not None: - req_headers['If-Match'] = etag + req_headers["If-Match"] = etag username, password, basic_auth = self._auth_params() try: - resp = open_url(uri, data=json.dumps(payload), - headers=req_headers, method="PUT", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp = open_url( + uri, + data=json.dumps(payload), + headers=req_headers, + method="PUT", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + validate_certs=False, + follow_redirects="all", + use_proxy=True, + timeout=self.timeout, + ) headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - return {'ret': False, - 'msg': f"HTTP Error {e.code} on PUT request to '{uri}'", - 'status': e.code} + return {"ret": False, "msg": f"HTTP Error {e.code} on PUT request to '{uri}'", "status": e.code} except URLError as e: - return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"} + return {"ret": False, "msg": f"URL Error on PUT request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: - return {'ret': False, - 'msg': f"Failed PUT request to '{uri}': '{e}'"} - return {'ret': True, 'headers': headers, 'resp': resp} + return {"ret": False, "msg": f"Failed PUT request to '{uri}': '{e}'"} + return {"ret": True, "headers": headers, "resp": resp} def post_request(self, uri, payload, content_type="application/json", timeout=None): req_headers = dict(POST_HEADERS) @@ -129,24 +140,28 @@ def post_request(self, uri, payload, content_type="application/json", timeout=No else: request_data = payload try: - resp = open_url(uri, data=request_data, - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout if timeout is None else timeout) + resp = open_url( + uri, + data=request_data, + headers=req_headers, + method="POST", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + validate_certs=False, + follow_redirects="all", + use_proxy=True, + timeout=self.timeout if timeout is None else timeout, + ) headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: - return {'ret': False, - 'msg': f"HTTP Error {e.code} on POST request to '{uri}'", - 'status': e.code} + return {"ret": False, "msg": f"HTTP Error {e.code} on POST request to '{uri}'", "status": e.code} except URLError as e: - return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"} + return {"ret": False, "msg": f"URL Error on POST request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: - return {'ret': False, - 'msg': f"Failed POST request to '{uri}': '{e}'"} - return {'ret': True, 'headers': headers, 'resp': resp} + return {"ret": False, "msg": f"Failed POST request to '{uri}': '{e}'"} + return {"ret": True, "headers": headers, "resp": resp} def get_uri_with_slot_number_query_param(self, uri): """Return the URI with proxy slot number added as a query param, if there is one. @@ -172,29 +187,25 @@ def manage_system_power(self, command): # Get the resource so that we have the Etag response = self.get_request(resource_uri) - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} - etag = response['headers']['etag'] - if response['ret'] is False: + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} + etag = response["headers"]["etag"] + if response["ret"] is False: return response # Issue the PUT to do the reboot (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } - payload = {'Reboot': True} + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} + payload = {"Reboot": True} response = self.put_request(resource_uri, payload, etag) - if response['ret'] is False: + if response["ret"] is False: return response elif command.startswith("PowerMode"): return self.manage_power_mode(command) else: - return {'ret': False, 'msg': f"Invalid command: {command}"} + return {"ret": False, "msg": f"Invalid command: {command}"} - return {'ret': True} + return {"ret": True} def manage_chassis_indicator_led(self, command): """Process a command to manage the chassis indicator LED. @@ -214,91 +225,73 @@ def manage_indicator_led(self, command, resource_uri=None): resource_uri = self.root_uri resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) - payloads = { - 'IndicatorLedOn': { - 'ID': 2 - }, - 'IndicatorLedOff': { - 'ID': 4 - } - } + payloads = {"IndicatorLedOn": {"ID": 2}, "IndicatorLedOff": {"ID": 4}} response = self.get_request(resource_uri) - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} - etag = response['headers']['etag'] - if response['ret'] is False: + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} + etag = response["headers"]["etag"] + if response["ret"] is False: return response - data = response['data'] + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} - if 'ID' not in data[key]: - return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'} + return {"ret": False, "msg": f"Key {key} not found"} + if "ID" not in data[key]: + return {"ret": False, "msg": "IndicatorLED for resource has no ID."} if command in payloads.keys(): # See if the LED is already set as requested. - current_led_status = data[key]['ID'] - if current_led_status == payloads[command]['ID']: - return {'ret': True, 'changed': False} + current_led_status = data[key]["ID"] + if current_led_status == payloads[command]["ID"]: + return {"ret": True, "changed": False} # Set the LED (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } - payload = {'IndicatorLED': payloads[command]} + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} + payload = {"IndicatorLED": payloads[command]} response = self.put_request(resource_uri, payload, etag) - if response['ret'] is False: + if response["ret"] is False: return response else: - return {'ret': False, 'msg': 'Invalid command'} + return {"ret": False, "msg": "Invalid command"} - return {'ret': True} + return {"ret": True} def manage_power_mode(self, command): key = "PowerState" resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri) - payloads = { - "PowerModeNormal": 2, - "PowerModeLow": 4 - } + payloads = {"PowerModeNormal": 2, "PowerModeLow": 4} response = self.get_request(resource_uri) - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} - etag = response['headers']['etag'] - if response['ret'] is False: + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} + etag = response["headers"]["etag"] + if response["ret"] is False: return response - data = response['data'] + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} - if 'ID' not in data[key]: - return {'ret': False, 'msg': 'PowerState for resource has no ID.'} + return {"ret": False, "msg": f"Key {key} not found"} + if "ID" not in data[key]: + return {"ret": False, "msg": "PowerState for resource has no ID."} if command in payloads.keys(): # See if the PowerState is already set as requested. - current_power_state = data[key]['ID'] + current_power_state = data[key]["ID"] if current_power_state == payloads[command]: - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} # Set the Power State (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } - payload = {'PowerState': {"ID": payloads[command]}} + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} + payload = {"PowerState": {"ID": payloads[command]}} response = self.put_request(resource_uri, payload, etag) - if response['ret'] is False: + if response["ret"] is False: return response else: - return {'ret': False, 'msg': f"Invalid command: {command}"} + return {"ret": False, "msg": f"Invalid command: {command}"} - return {'ret': True} + return {"ret": True} def prepare_multipart_firmware_upload(self, filename): """Prepare a multipart/form-data body for OCAPI firmware upload. @@ -315,13 +308,12 @@ def prepare_multipart_firmware_upload(self, filename): boundary = str(uuid.uuid4()) # Generate a random boundary body = f"--{boundary}\r\n" body += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{to_native(os.path.basename(filename))}"\r\n' - body += 'Content-Type: application/octet-stream\r\n\r\n' - body_bytes = bytearray(body, 'utf-8') - with open(filename, 'rb') as f: + body += "Content-Type: application/octet-stream\r\n\r\n" + body_bytes = bytearray(body, "utf-8") + with open(filename, "rb") as f: body_bytes += f.read() - body_bytes += bytearray(f"\r\n--{boundary}--", 'utf-8') - return (f"multipart/form-data; boundary={boundary}", - body_bytes) + body_bytes += bytearray(f"\r\n--{boundary}--", "utf-8") + return (f"multipart/form-data; boundary={boundary}", body_bytes) def upload_firmware_image(self, update_image_path): """Perform Firmware Upload to the OCAPI storage device. @@ -329,22 +321,18 @@ def upload_firmware_image(self, update_image_path): :param str update_image_path: The path/filename of the firmware image, on the local filesystem. """ if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)): - return {'ret': False, 'msg': 'File does not exist.'} + return {"ret": False, "msg": "File does not exist."} url = f"{self.root_uri}OperatingSystem" url = self.get_uri_with_slot_number_query_param(url) content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path) # Post the firmware (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} result = self.post_request(url, b_form_data, content_type=content_type, timeout=300) - if result['ret'] is False: + if result["ret"] is False: return result - return {'ret': True} + return {"ret": True} def update_firmware_image(self): """Perform a Firmware Update on the OCAPI storage device.""" @@ -352,25 +340,21 @@ def update_firmware_image(self): resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) # We have to do a GET to obtain the Etag. It's required on the PUT. response = self.get_request(resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} - etag = response['headers']['etag'] + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} + etag = response["headers"]["etag"] # Issue the PUT (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } - payload = {'FirmwareUpdate': True} + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} + payload = {"FirmwareUpdate": True} response = self.put_request(resource_uri, payload, etag) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'jobUri': response["headers"]["location"]} + return {"ret": True, "jobUri": response["headers"]["location"]} def activate_firmware_image(self): """Perform a Firmware Activate on the OCAPI storage device.""" @@ -378,25 +362,21 @@ def activate_firmware_image(self): resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) # We have to do a GET to obtain the Etag. It's required on the PUT. response = self.get_request(resource_uri) - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} - etag = response['headers']['etag'] - if response['ret'] is False: + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} + etag = response["headers"]["etag"] + if response["ret"] is False: return response # Issue the PUT (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } - payload = {'FirmwareActivate': True} + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} + payload = {"FirmwareActivate": True} response = self.put_request(resource_uri, payload, etag) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'jobUri': response["headers"]["location"]} + return {"ret": True, "jobUri": response["headers"]["location"]} def get_job_status(self, job_uri): """Get the status of a job. @@ -405,8 +385,8 @@ def get_job_status(self, job_uri): """ job_uri = self.get_uri_with_slot_number_query_param(job_uri) response = self.get_request(job_uri) - if response['ret'] is False: - if response.get('status') == 404: + if response["ret"] is False: + if response.get("status") == 404: # Job not found -- assume 0% return { "ret": True, @@ -416,7 +396,7 @@ def get_job_status(self, job_uri): "operationHealth": None, "operationHealthId": None, "details": "Job does not exist.", - "jobExists": False + "jobExists": False, } else: return response @@ -432,7 +412,7 @@ def get_job_status(self, job_uri): "operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None, "operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None, "details": details, - "jobExists": True + "jobExists": True, } return return_value @@ -442,50 +422,28 @@ def delete_job(self, job_uri): # We have to do a GET to obtain the Etag. It's required on the DELETE. response = self.get_request(job_uri) - if response['ret'] is True: - if 'etag' not in response['headers']: - return {'ret': False, 'msg': 'Etag not found in response.'} + if response["ret"] is True: + if "etag" not in response["headers"]: + return {"ret": False, "msg": "Etag not found in response."} else: - etag = response['headers']['etag'] + etag = response["headers"]["etag"] - if response['data']['PercentComplete'] != 100: - return { - 'ret': False, - 'changed': False, - 'msg': 'Cannot delete job because it is in progress.' - } + if response["data"]["PercentComplete"] != 100: + return {"ret": False, "changed": False, "msg": "Cannot delete job because it is in progress."} - if response['ret'] is False: - if response['status'] == 404: - return { - 'ret': True, - 'changed': False, - 'msg': 'Job already deleted.' - } + if response["ret"] is False: + if response["status"] == 404: + return {"ret": True, "changed": False, "msg": "Job already deleted."} return response if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} # Do the DELETE (unless we are in check mode) response = self.delete_request(job_uri, etag) - if response['ret'] is False: - if response['status'] == 404: - return { - 'ret': True, - 'changed': False - } - elif response['status'] == 409: - return { - 'ret': False, - 'changed': False, - 'msg': 'Cannot delete job because it is in progress.' - } + if response["ret"] is False: + if response["status"] == 404: + return {"ret": True, "changed": False} + elif response["status"] == 409: + return {"ret": False, "changed": False, "msg": "Cannot delete job because it is in progress."} return response - return { - 'ret': True, - 'changed': True - } + return {"ret": True, "changed": True} diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index 9a6a2488af6..164ebbcba69 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -14,28 +14,28 @@ class OneAndOneResources: - firewall_policy = 'firewall_policy' - load_balancer = 'load_balancer' - monitoring_policy = 'monitoring_policy' - private_network = 'private_network' - public_ip = 'public_ip' - role = 'role' - server = 'server' - user = 'user' - vpn = 'vpn' + firewall_policy = "firewall_policy" + load_balancer = "load_balancer" + monitoring_policy = "monitoring_policy" + private_network = "private_network" + public_ip = "public_ip" + role = "role" + server = "server" + user = "user" + vpn = "vpn" def get_resource(oneandone_conn, resource_type, resource_id): switcher = { - 'firewall_policy': oneandone_conn.get_firewall, - 'load_balancer': oneandone_conn.get_load_balancer, - 'monitoring_policy': oneandone_conn.get_monitoring_policy, - 'private_network': oneandone_conn.get_private_network, - 'public_ip': oneandone_conn.get_public_ip, - 'role': oneandone_conn.get_role, - 'server': oneandone_conn.get_server, - 'user': oneandone_conn.get_user, - 'vpn': oneandone_conn.get_vpn, + "firewall_policy": oneandone_conn.get_firewall, + "load_balancer": oneandone_conn.get_load_balancer, + "monitoring_policy": oneandone_conn.get_monitoring_policy, + "private_network": oneandone_conn.get_private_network, + "public_ip": oneandone_conn.get_public_ip, + "role": oneandone_conn.get_role, + "server": oneandone_conn.get_server, + "user": oneandone_conn.get_user, + "vpn": oneandone_conn.get_vpn, } return switcher.get(resource_type, None)(resource_id) @@ -47,10 +47,10 @@ def get_datacenter(oneandone_conn, datacenter, full_object=False): Returns the datacenter ID. """ for _datacenter in oneandone_conn.list_datacenters(): - if datacenter in (_datacenter['id'], _datacenter['country_code']): + if datacenter in (_datacenter["id"], _datacenter["country_code"]): if full_object: return _datacenter - return _datacenter['id'] + return _datacenter["id"] def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False): @@ -59,11 +59,10 @@ def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=Fal Return the instance size ID. """ for _fixed_instance_size in oneandone_conn.fixed_server_flavors(): - if fixed_instance_size in (_fixed_instance_size['id'], - _fixed_instance_size['name']): + if fixed_instance_size in (_fixed_instance_size["id"], _fixed_instance_size["name"]): if full_object: return _fixed_instance_size - return _fixed_instance_size['id'] + return _fixed_instance_size["id"] def get_appliance(oneandone_conn, appliance, full_object=False): @@ -71,11 +70,11 @@ def get_appliance(oneandone_conn, appliance, full_object=False): Validates the appliance exists by ID or name. Return the appliance ID. """ - for _appliance in oneandone_conn.list_appliances(q='IMAGE'): - if appliance in (_appliance['id'], _appliance['name']): + for _appliance in oneandone_conn.list_appliances(q="IMAGE"): + if appliance in (_appliance["id"], _appliance["name"]): if full_object: return _appliance - return _appliance['id'] + return _appliance["id"] def get_private_network(oneandone_conn, private_network, full_object=False): @@ -84,11 +83,10 @@ def get_private_network(oneandone_conn, private_network, full_object=False): Return the private network ID. """ for _private_network in oneandone_conn.list_private_networks(): - if private_network in (_private_network['name'], - _private_network['id']): + if private_network in (_private_network["name"], _private_network["id"]): if full_object: return _private_network - return _private_network['id'] + return _private_network["id"] def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False): @@ -97,11 +95,10 @@ def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False): Return the monitoring policy ID. """ for _monitoring_policy in oneandone_conn.list_monitoring_policies(): - if monitoring_policy in (_monitoring_policy['name'], - _monitoring_policy['id']): + if monitoring_policy in (_monitoring_policy["name"], _monitoring_policy["id"]): if full_object: return _monitoring_policy - return _monitoring_policy['id'] + return _monitoring_policy["id"] def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False): @@ -110,11 +107,10 @@ def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False): Return the firewall policy ID. """ for _firewall_policy in oneandone_conn.list_firewall_policies(): - if firewall_policy in (_firewall_policy['name'], - _firewall_policy['id']): + if firewall_policy in (_firewall_policy["name"], _firewall_policy["id"]): if full_object: return _firewall_policy - return _firewall_policy['id'] + return _firewall_policy["id"] def get_load_balancer(oneandone_conn, load_balancer, full_object=False): @@ -123,11 +119,10 @@ def get_load_balancer(oneandone_conn, load_balancer, full_object=False): Return the load balancer ID. """ for _load_balancer in oneandone_conn.list_load_balancers(): - if load_balancer in (_load_balancer['name'], - _load_balancer['id']): + if load_balancer in (_load_balancer["name"], _load_balancer["id"]): if full_object: return _load_balancer - return _load_balancer['id'] + return _load_balancer["id"] def get_server(oneandone_conn, instance, full_object=False): @@ -136,10 +131,10 @@ def get_server(oneandone_conn, instance, full_object=False): Returns the server if one was found. """ for server in oneandone_conn.list_servers(per_page=1000): - if instance in (server['id'], server['name']): + if instance in (server["id"], server["name"]): if full_object: return server - return server['id'] + return server["id"] def get_user(oneandone_conn, user, full_object=False): @@ -148,10 +143,10 @@ def get_user(oneandone_conn, user, full_object=False): Returns the user if one was found. """ for _user in oneandone_conn.list_users(per_page=1000): - if user in (_user['id'], _user['name']): + if user in (_user["id"], _user["name"]): if full_object: return _user - return _user['id'] + return _user["id"] def get_role(oneandone_conn, role, full_object=False): @@ -161,10 +156,10 @@ def get_role(oneandone_conn, role, full_object=False): Returns the role if one was found, else None. """ for _role in oneandone_conn.list_roles(per_page=1000): - if role in (_role['id'], _role['name']): + if role in (_role["id"], _role["name"]): if full_object: return _role - return _role['id'] + return _role["id"] def get_vpn(oneandone_conn, vpn, full_object=False): @@ -173,10 +168,10 @@ def get_vpn(oneandone_conn, vpn, full_object=False): Returns the vpn if one was found. """ for _vpn in oneandone_conn.list_vpns(per_page=1000): - if vpn in (_vpn['id'], _vpn['name']): + if vpn in (_vpn["id"], _vpn["name"]): if full_object: return _vpn - return _vpn['id'] + return _vpn["id"] def get_public_ip(oneandone_conn, public_ip, full_object=False): @@ -185,17 +180,13 @@ def get_public_ip(oneandone_conn, public_ip, full_object=False): Returns the public ip if one was found. """ for _public_ip in oneandone_conn.list_public_ips(per_page=1000): - if public_ip in (_public_ip['id'], _public_ip['ip']): + if public_ip in (_public_ip["id"], _public_ip["ip"]): if full_object: return _public_ip - return _public_ip['id'] + return _public_ip["id"] -def wait_for_resource_creation_completion(oneandone_conn, - resource_type, - resource_id, - wait_timeout, - wait_interval): +def wait_for_resource_creation_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval): """ Waits for the resource create operation to complete based on the timeout period. """ @@ -207,33 +198,25 @@ def wait_for_resource_creation_completion(oneandone_conn, resource = get_resource(oneandone_conn, resource_type, resource_id) if resource_type == OneAndOneResources.server: - resource_state = resource['status']['state'] + resource_state = resource["status"]["state"] else: - resource_state = resource['state'] + resource_state = resource["state"] - if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or - (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')): + if (resource_type == OneAndOneResources.server and resource_state.lower() == "powered_on") or ( + resource_type != OneAndOneResources.server and resource_state.lower() == "active" + ): return - elif resource_state.lower() == 'failed': - raise Exception(f'{resource_type} creation failed for {resource_id}') - elif resource_state.lower() in ('active', - 'enabled', - 'deploying', - 'configuring'): + elif resource_state.lower() == "failed": + raise Exception(f"{resource_type} creation failed for {resource_id}") + elif resource_state.lower() in ("active", "enabled", "deploying", "configuring"): continue else: - raise Exception( - f'Unknown {resource_type} state {resource_state}') + raise Exception(f"Unknown {resource_type} state {resource_state}") - raise Exception( - f'Timed out waiting for {resource_type} completion for {resource_id}') + raise Exception(f"Timed out waiting for {resource_type} completion for {resource_id}") -def wait_for_resource_deletion_completion(oneandone_conn, - resource_type, - resource_id, - wait_timeout, - wait_interval): +def wait_for_resource_deletion_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval): """ Waits for the resource delete operation to complete based on the timeout period. """ @@ -242,23 +225,21 @@ def wait_for_resource_deletion_completion(oneandone_conn, time.sleep(wait_interval) # Refresh the operation info - logs = oneandone_conn.list_logs(q='DELETE', - period='LAST_HOUR', - sort='-start_date') + logs = oneandone_conn.list_logs(q="DELETE", period="LAST_HOUR", sort="-start_date") if resource_type == OneAndOneResources.server: - _type = 'VM' + _type = "VM" elif resource_type == OneAndOneResources.private_network: - _type = 'PRIVATENETWORK' + _type = "PRIVATENETWORK" else: - raise Exception( - f'Unsupported wait_for delete operation for {resource_type} resource') + raise Exception(f"Unsupported wait_for delete operation for {resource_type} resource") for log in logs: - if (log['resource']['id'] == resource_id and - log['action'] == 'DELETE' and - log['type'] == _type and - log['status']['state'] == 'OK'): + if ( + log["resource"]["id"] == resource_id + and log["action"] == "DELETE" + and log["type"] == _type + and log["status"]["state"] == "OK" + ): return - raise Exception( - f'Timed out waiting for {resource_type} deletion for {resource_id}') + raise Exception(f"Timed out waiting for {resource_type} deletion for {resource_id}") diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 9738e9203ef..f333d067c3c 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -20,6 +20,7 @@ HPE_ONEVIEW_IMP_ERR = None try: from hpOneView.oneview_client import OneViewClient + HAS_HPE_ONEVIEW = True except ImportError: HPE_ONEVIEW_IMP_ERR = traceback.format_exc() @@ -46,7 +47,7 @@ def transform_list_to_dict(list_): if isinstance(value, Mapping): ret.update(value) else: - ret[to_native(value, errors='surrogate_or_strict')] = True + ret[to_native(value, errors="surrogate_or_strict")] = True return ret @@ -121,7 +122,7 @@ class OneViewModuleException(Exception): Attributes: msg (str): Exception message. oneview_response (dict): OneView rest response. - """ + """ def __init__(self, data): self.msg = None @@ -133,7 +134,7 @@ def __init__(self, data): self.oneview_response = data if data and isinstance(data, dict): - self.msg = data.get('message') + self.msg = data.get("message") if self.oneview_response: Exception.__init__(self, self.msg, self.oneview_response) @@ -163,6 +164,7 @@ class OneViewModuleValueError(OneViewModuleException): Attributes: msg (str): Exception message. """ + pass @@ -174,27 +176,28 @@ class OneViewModuleResourceNotFound(OneViewModuleException): Attributes: msg (str): Exception message. """ + pass class OneViewModuleBase(metaclass=abc.ABCMeta): - MSG_CREATED = 'Resource created successfully.' - MSG_UPDATED = 'Resource updated successfully.' - MSG_DELETED = 'Resource deleted successfully.' - MSG_ALREADY_PRESENT = 'Resource is already present.' - MSG_ALREADY_ABSENT = 'Resource is already absent.' - MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. ' + MSG_CREATED = "Resource created successfully." + MSG_UPDATED = "Resource updated successfully." + MSG_DELETED = "Resource deleted successfully." + MSG_ALREADY_PRESENT = "Resource is already present." + MSG_ALREADY_ABSENT = "Resource is already absent." + MSG_DIFF_AT_KEY = "Difference found at key '{0}'. " ONEVIEW_COMMON_ARGS = dict( - config=dict(type='path'), - hostname=dict(type='str'), - username=dict(type='str'), - password=dict(type='str', no_log=True), - api_version=dict(type='int'), - image_streamer_hostname=dict(type='str') + config=dict(type="path"), + hostname=dict(type="str"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + api_version=dict(type="int"), + image_streamer_hostname=dict(type="str"), ) - ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True)) + ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type="bool", default=True)) resource_client = None @@ -212,19 +215,18 @@ def __init__(self, additional_arg_spec=None, validate_etag_support=False, suppor self._check_hpe_oneview_sdk() self._create_oneview_client() - self.state = self.module.params.get('state') - self.data = self.module.params.get('data') + self.state = self.module.params.get("state") + self.data = self.module.params.get("data") # Preload params for get_all - used by facts - self.facts_params = self.module.params.get('params') or {} + self.facts_params = self.module.params.get("params") or {} # Preload options as dict - used by facts - self.options = transform_list_to_dict(self.module.params.get('options')) + self.options = transform_list_to_dict(self.module.params.get("options")) self.validate_etag_support = validate_etag_support def _build_argument_spec(self, additional_arg_spec, validate_etag_support): - merged_arg_spec = dict() merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS) @@ -238,19 +240,21 @@ def _build_argument_spec(self, additional_arg_spec, validate_etag_support): def _check_hpe_oneview_sdk(self): if not HAS_HPE_ONEVIEW: - self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR) + self.module.fail_json(msg=missing_required_lib("hpOneView"), exception=HPE_ONEVIEW_IMP_ERR) def _create_oneview_client(self): - if self.module.params.get('hostname'): - config = dict(ip=self.module.params['hostname'], - credentials=dict(userName=self.module.params['username'], password=self.module.params['password']), - api_version=self.module.params['api_version'], - image_streamer_ip=self.module.params['image_streamer_hostname']) + if self.module.params.get("hostname"): + config = dict( + ip=self.module.params["hostname"], + credentials=dict(userName=self.module.params["username"], password=self.module.params["password"]), + api_version=self.module.params["api_version"], + image_streamer_ip=self.module.params["image_streamer_hostname"], + ) self.oneview_client = OneViewClient(config) - elif not self.module.params['config']: + elif not self.module.params["config"]: self.oneview_client = OneViewClient.from_environment_variables() else: - self.oneview_client = OneViewClient.from_json_file(self.module.params['config']) + self.oneview_client = OneViewClient.from_json_file(self.module.params["config"]) @abc.abstractmethod def execute_module(self): @@ -275,21 +279,21 @@ def run(self): """ try: if self.validate_etag_support: - if not self.module.params.get('validate_etag'): + if not self.module.params.get("validate_etag"): self.oneview_client.connection.disable_etag_validation() result = self.execute_module() if "changed" not in result: - result['changed'] = False + result["changed"] = False self.module.exit_json(**result) except OneViewModuleException as exception: - error_msg = '; '.join(to_native(e) for e in exception.args) + error_msg = "; ".join(to_native(e) for e in exception.args) self.module.fail_json(msg=error_msg, exception=traceback.format_exc()) - def resource_absent(self, resource, method='delete'): + def resource_absent(self, resource, method="delete"): """ Generic implementation of the absent state for the OneView resources. @@ -315,10 +319,10 @@ def get_by_name(self, name): :return: The resource found or None. """ - result = self.resource_client.get_by('name', name) + result = self.resource_client.get_by("name", name) return result[0] if result else None - def resource_present(self, resource, fact_name, create_method='create'): + def resource_present(self, resource, fact_name, create_method="create"): """ Generic implementation of the present state for the OneView resources. @@ -351,11 +355,7 @@ def resource_present(self, resource, fact_name, create_method='create'): changed = True msg = self.MSG_UPDATED - return dict( - msg=msg, - changed=changed, - ansible_facts={fact_name: resource} - ) + return dict(msg=msg, changed=changed, ansible_facts={fact_name: resource}) def resource_scopes_set(self, state, fact_name, scope_uris): """ @@ -370,13 +370,13 @@ def resource_scopes_set(self, state, fact_name, scope_uris): """ if scope_uris is None: scope_uris = [] - resource = state['ansible_facts'][fact_name] - operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris) + resource = state["ansible_facts"][fact_name] + operation_data = dict(operation="replace", path="/scopeUris", value=scope_uris) - if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris): - state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data) - state['changed'] = True - state['msg'] = self.MSG_UPDATED + if resource["scopeUris"] is None or set(resource["scopeUris"]) != set(scope_uris): + state["ansible_facts"][fact_name] = self.resource_client.patch(resource["uri"], **operation_data) + state["changed"] = True + state["msg"] = self.MSG_UPDATED return state diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index 8bafc7426cf..bf5bcb4725a 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -13,22 +13,26 @@ def online_argument_spec(): return dict( - api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']), - no_log=True, aliases=['oauth_token']), - api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']), - api_timeout=dict(type='int', default=30, aliases=['timeout']), - validate_certs=dict(default=True, type='bool'), + api_token=dict( + required=True, + fallback=(env_fallback, ["ONLINE_TOKEN", "ONLINE_API_KEY", "ONLINE_OAUTH_TOKEN", "ONLINE_API_TOKEN"]), + no_log=True, + aliases=["oauth_token"], + ), + api_url=dict( + fallback=(env_fallback, ["ONLINE_API_URL"]), default="https://api.online.net", aliases=["base_url"] + ), + api_timeout=dict(type="int", default=30, aliases=["timeout"]), + validate_certs=dict(default=True, type="bool"), ) class OnlineException(Exception): - def __init__(self, message): self.message = message class Response: - def __init__(self, resp, info): self.body = None if resp: @@ -56,26 +60,26 @@ def ok(self): class Online: - def __init__(self, module): self.module = module self.headers = { - 'Authorization': f"Bearer {self.module.params.get('api_token')}", - 'User-Agent': self.get_user_agent_string(module), - 'Content-type': 'application/json', + "Authorization": f"Bearer {self.module.params.get('api_token')}", + "User-Agent": self.get_user_agent_string(module), + "Content-type": "application/json", } self.name = None def get_resources(self): - results = self.get(f'/{self.name}') + results = self.get(f"/{self.name}") if not results.ok: raise OnlineException( - f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]") + f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]" + ) return results.json def _url_builder(self, path): - if path[0] == '/': + if path[0] == "/": path = path[1:] return f"{self.module.params.get('api_url')}/{path}" @@ -87,13 +91,17 @@ def send(self, method, path, data=None, headers=None): self.headers.update(headers) resp, info = fetch_url( - self.module, url, data=data, headers=self.headers, method=method, - timeout=self.module.params.get('api_timeout') + self.module, + url, + data=data, + headers=self.headers, + method=method, + timeout=self.module.params.get("api_timeout"), ) # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases - if info['status'] == -1: - self.module.fail_json(msg=info['msg']) + if info["status"] == -1: + self.module.fail_json(msg=info["msg"]) return Response(resp, info) @@ -102,16 +110,16 @@ def get_user_agent_string(module): return f"ansible {module.ansible_version} Python {sys.version.split(' ', 1)[0]}" def get(self, path, data=None, headers=None): - return self.send('GET', path, data, headers) + return self.send("GET", path, data, headers) def put(self, path, data=None, headers=None): - return self.send('PUT', path, data, headers) + return self.send("PUT", path, data, headers) def post(self, path, data=None, headers=None): - return self.send('POST', path, data, headers) + return self.send("POST", path, data, headers) def delete(self, path, data=None, headers=None): - return self.send('DELETE', path, data, headers) + return self.send("DELETE", path, data, headers) def patch(self, path, data=None, headers=None): return self.send("PATCH", path, data, headers) diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index bb28266f8f2..72165d93965 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -13,7 +13,19 @@ from ansible.module_utils.basic import AnsibleModule -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] +IMAGE_STATES = [ + "INIT", + "READY", + "USED", + "DISABLED", + "LOCKED", + "ERROR", + "CLONE", + "DELETE", + "USED_PERS", + "LOCKED_USED", + "LOCKED_USED_PERS", +] HAS_PYONE = True try: @@ -29,8 +41,10 @@ # There are either lists of dictionaries (length > 1) or just dictionaries. def flatten(to_flatten, extract=False): """Flattens nested lists (with optional value extraction).""" + def recurse(to_flatten): return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten] + value = recurse(to_flatten) if extract and len(value) == 1: return value[0] @@ -41,6 +55,7 @@ def recurse(to_flatten): # It renders JSON-like template representation into OpenNebula's template syntax (string). def render(to_render): """Converts dictionary to OpenNebula template.""" + def recurse(to_render): for key, value in sorted(to_render.items()): if value is None: @@ -53,11 +68,12 @@ def recurse(to_render): yield f"{key}=[{','.join(recurse(item))}]" continue if isinstance(value, str): - _value = value.replace('\\', '\\\\').replace('"', '\\"') + _value = value.replace("\\", "\\\\").replace('"', '\\"') yield f'{key}="{_value}"' continue yield f'{key}="{value}"' - return '\n'.join(recurse(to_render)) + + return "\n".join(recurse(to_render)) class OpenNebulaModule: @@ -68,26 +84,27 @@ class OpenNebulaModule: """ common_args = dict( - api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")), - api_username=dict(type='str', default=environ.get("ONE_USERNAME")), - api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")), - validate_certs=dict(default=True, type='bool'), - wait_timeout=dict(type='int', default=300), + api_url=dict(type="str", aliases=["api_endpoint"], default=environ.get("ONE_URL")), + api_username=dict(type="str", default=environ.get("ONE_USERNAME")), + api_password=dict(type="str", no_log=True, aliases=["api_token"], default=environ.get("ONE_PASSWORD")), + validate_certs=dict(default=True, type="bool"), + wait_timeout=dict(type="int", default=300), ) - def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None): - + def __init__( + self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None + ): module_args = OpenNebulaModule.common_args.copy() module_args.update(argument_spec) - self.module = AnsibleModule(argument_spec=module_args, - supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) - self.result = dict(changed=False, - original_message='', - message='') + self.module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if, + ) + self.result = dict(changed=False, original_message="", message="") self.one = self.create_one_client() self.resolved_parameters = self.resolve_parameters() @@ -101,7 +118,7 @@ def create_one_client(self): """ # context required for not validating SSL, old python versions won't validate anyway. - if hasattr(ssl, '_create_unverified_context'): + if hasattr(ssl, "_create_unverified_context"): no_ssl_validation_context = ssl._create_unverified_context() else: no_ssl_validation_context = None @@ -144,7 +161,7 @@ def fail(self, msg): Args: msg: human readable failure reason. """ - if hasattr(self, 'one'): + if hasattr(self, "one"): self.close_one_client() self.module.fail_json(msg=msg) @@ -153,7 +170,7 @@ def exit(self): Utility exit method, will ensure pyone is properly closed before exiting. """ - if hasattr(self, 'one'): + if hasattr(self, "one"): self.close_one_client() self.module.exit_json(**self.result) @@ -169,11 +186,11 @@ def resolve_parameters(self): resolved_params = dict(self.module.params) - if 'cluster_name' in self.module.params: + if "cluster_name" in self.module.params: clusters = self.one.clusterpool.info() for cluster in clusters.CLUSTER: - if cluster.NAME == self.module.params.get('cluster_name'): - resolved_params['cluster_id'] = cluster.ID + if cluster.NAME == self.module.params.get("cluster_name"): + resolved_params["cluster_id"] = cluster.ID return resolved_params @@ -196,14 +213,14 @@ def get_parameter(self, name): return self.resolved_parameters.get(name) def get_host_by_name(self, name): - ''' + """ Returns a host given its name. Args: name: the name of the host Returns: the host object or None if the host is absent. - ''' + """ hosts = self.one.hostpool.info() for h in hosts.HOST: if h.NAME == name: @@ -226,14 +243,14 @@ def get_cluster_by_name(self, name): return None def get_template_by_name(self, name): - ''' + """ Returns a template given its name. Args: name: the name of the template Returns: the template object or None if the host is absent. - ''' + """ templates = self.one.templatepool.info() for t in templates.TEMPLATE: if t.NAME == name: @@ -262,7 +279,7 @@ def cast_template(self, template): if isinstance(value, dict): self.cast_template(template[key]) elif isinstance(value, list): - template[key] = ', '.join(value) + template[key] = ", ".join(value) elif not isinstance(value, str): template[key] = str(value) @@ -290,9 +307,16 @@ def requires_template_update(self, current, desired): return True return not (desired == intersection) - def wait_for_state(self, element_name, state, state_name, target_states, - invalid_states=None, transition_states=None, - wait_timeout=None): + def wait_for_state( + self, + element_name, + state, + state_name, + target_states, + invalid_states=None, + transition_states=None, + wait_timeout=None, + ): """ Args: element_name: the name of the object we are waiting for: HOST, VM, etc. @@ -313,11 +337,11 @@ def wait_for_state(self, element_name, state, state_name, target_states, current_state = state() if current_state in invalid_states: - self.fail(f'invalid {element_name} state {state_name(current_state)}') + self.fail(f"invalid {element_name} state {state_name(current_state)}") if transition_states: if current_state not in transition_states: - self.fail(f'invalid {element_name} transition state {state_name(current_state)}') + self.fail(f"invalid {element_name} transition state {state_name(current_state)}") if current_state in target_states: return True @@ -353,17 +377,17 @@ def get_image_list_id(self, image, element): """ list_of_id = [] - if element == 'VMS': + if element == "VMS": image_list = image.VMS - if element == 'CLONES': + if element == "CLONES": image_list = image.CLONES - if element == 'APP_CLONES': + if element == "APP_CLONES": image_list = image.APP_CLONES for iter in image_list.ID: list_of_id.append( # These are optional so firstly check for presence - getattr(iter, 'ID', 'Null'), + getattr(iter, "ID", "Null"), ) return list_of_id @@ -374,16 +398,18 @@ def get_image_snapshots_list(self, image): list_of_snapshots = [] for iter in image.SNAPSHOTS.SNAPSHOT: - list_of_snapshots.append({ - 'date': iter['DATE'], - 'parent': iter['PARENT'], - 'size': iter['SIZE'], - # These are optional so firstly check for presence - 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), - 'children': getattr(iter, 'CHILDREN', 'Null'), - 'active': getattr(iter, 'ACTIVE', 'Null'), - 'name': getattr(iter, 'NAME', 'Null'), - }) + list_of_snapshots.append( + { + "date": iter["DATE"], + "parent": iter["PARENT"], + "size": iter["SIZE"], + # These are optional so firstly check for presence + "allow_orhans": getattr(image.SNAPSHOTS, "ALLOW_ORPHANS", "Null"), + "children": getattr(iter, "CHILDREN", "Null"), + "active": getattr(iter, "ACTIVE", "Null"), + "name": getattr(iter, "NAME", "Null"), + } + ) return list_of_snapshots def get_image_info(self, image): @@ -393,43 +419,43 @@ def get_image_info(self, image): Returns: a copy of the parameters that includes the resolved parameters. """ info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - 'permissions': { - 'owner_u': image.PERMISSIONS.OWNER_U, - 'owner_m': image.PERMISSIONS.OWNER_M, - 'owner_a': image.PERMISSIONS.OWNER_A, - 'group_u': image.PERMISSIONS.GROUP_U, - 'group_m': image.PERMISSIONS.GROUP_M, - 'group_a': image.PERMISSIONS.GROUP_A, - 'other_u': image.PERMISSIONS.OTHER_U, - 'other_m': image.PERMISSIONS.OTHER_M, - 'other_a': image.PERMISSIONS.OTHER_A + "id": image.ID, + "name": image.NAME, + "state": IMAGE_STATES[image.STATE], + "running_vms": image.RUNNING_VMS, + "used": bool(image.RUNNING_VMS), + "user_name": image.UNAME, + "user_id": image.UID, + "group_name": image.GNAME, + "group_id": image.GID, + "permissions": { + "owner_u": image.PERMISSIONS.OWNER_U, + "owner_m": image.PERMISSIONS.OWNER_M, + "owner_a": image.PERMISSIONS.OWNER_A, + "group_u": image.PERMISSIONS.GROUP_U, + "group_m": image.PERMISSIONS.GROUP_M, + "group_a": image.PERMISSIONS.GROUP_A, + "other_u": image.PERMISSIONS.OTHER_U, + "other_m": image.PERMISSIONS.OTHER_M, + "other_a": image.PERMISSIONS.OTHER_A, }, - 'type': image.TYPE, - 'disk_type': image.DISK_TYPE, - 'persistent': image.PERSISTENT, - 'regtime': image.REGTIME, - 'source': image.SOURCE, - 'path': image.PATH, - 'fstype': getattr(image, 'FSTYPE', 'Null'), - 'size': image.SIZE, - 'cloning_ops': image.CLONING_OPS, - 'cloning_id': image.CLONING_ID, - 'target_snapshot': image.TARGET_SNAPSHOT, - 'datastore_id': image.DATASTORE_ID, - 'datastore': image.DATASTORE, - 'vms': self.get_image_list_id(image, 'VMS'), - 'clones': self.get_image_list_id(image, 'CLONES'), - 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), - 'snapshots': self.get_image_snapshots_list(image), - 'template': image.TEMPLATE, + "type": image.TYPE, + "disk_type": image.DISK_TYPE, + "persistent": image.PERSISTENT, + "regtime": image.REGTIME, + "source": image.SOURCE, + "path": image.PATH, + "fstype": getattr(image, "FSTYPE", "Null"), + "size": image.SIZE, + "cloning_ops": image.CLONING_OPS, + "cloning_id": image.CLONING_ID, + "target_snapshot": image.TARGET_SNAPSHOT, + "datastore_id": image.DATASTORE_ID, + "datastore": image.DATASTORE, + "vms": self.get_image_list_id(image, "VMS"), + "clones": self.get_image_list_id(image, "CLONES"), + "app_clones": self.get_image_list_id(image, "APP_CLONES"), + "snapshots": self.get_image_snapshots_list(image), + "template": image.TEMPLATE, } return info diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 85b14e77c7d..b3c0f9f1f5b 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -14,6 +14,7 @@ import logging.config import os import tempfile + # (TODO: remove next line!) from datetime import datetime # noqa: F401, pylint: disable=unused-import from operator import eq @@ -117,9 +118,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False): if supports_wait: common_args.update( wait=dict(type="bool", default=True), - wait_timeout=dict( - type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS - ), + wait_timeout=dict(type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS), wait_until=dict(type="str"), ) @@ -156,9 +155,7 @@ def get_oci_config(module, service_client_class=None): if not config_file: if "OCI_CONFIG_FILE" in os.environ: config_file = os.environ["OCI_CONFIG_FILE"] - _debug( - f"Config file through OCI_CONFIG_FILE environment variable - {config_file}" - ) + _debug(f"Config file through OCI_CONFIG_FILE environment variable - {config_file}") else: config_file = "~/.oci/config" _debug(f"Config file (fallback) - {config_file} ") @@ -170,9 +167,7 @@ def get_oci_config(module, service_client_class=None): else: config_profile = "DEFAULT" try: - config = oci.config.from_file( - file_location=config_file, profile_name=config_profile - ) + config = oci.config.from_file(file_location=config_file, profile_name=config_profile) except ( ConfigFileNotFound, InvalidConfig, @@ -183,9 +178,7 @@ def get_oci_config(module, service_client_class=None): # When auth_type is not instance_principal, config file is required module.fail_json(msg=str(ex)) else: - _debug( - f"Ignore {ex} as the auth_type is set to instance_principal" - ) + _debug(f"Ignore {ex} as the auth_type is set to instance_principal") # if instance_principal auth is used, an empty 'config' map is used below. config["additional_user_agent"] = f"Oracle-Ansible/{__version__}" @@ -234,24 +227,16 @@ def get_oci_config(module, service_client_class=None): ) # Redirect calls to home region for IAM service. - do_not_redirect = module.params.get( - "do_not_redirect_to_home_region", False - ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION") + do_not_redirect = module.params.get("do_not_redirect_to_home_region", False) or os.environ.get( + "OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION" + ) if service_client_class == IdentityClient and not do_not_redirect: _debug("Region passed for module invocation - {0} ".format(config["region"])) identity_client = IdentityClient(config) - region_subscriptions = identity_client.list_region_subscriptions( - config["tenancy"] - ).data + region_subscriptions = identity_client.list_region_subscriptions(config["tenancy"]).data # Replace the region in the config with the home region. - [config["region"]] = [ - rs.region_name for rs in region_subscriptions if rs.is_home_region is True - ] - _debug( - "Setting region in the config to home region - {0} ".format( - config["region"] - ) - ) + [config["region"]] = [rs.region_name for rs in region_subscriptions if rs.is_home_region is True] + _debug("Setting region in the config to home region - {0} ".format(config["region"])) return config @@ -282,9 +267,7 @@ def create_service_client(module, service_client_class): try: oci.config.validate_config(config, **kwargs) except oci.exceptions.InvalidConfig as ic: - module.fail_json( - msg=f"Invalid OCI configuration. Exception: {ic}" - ) + module.fail_json(msg=f"Invalid OCI configuration. Exception: {ic}") # Create service client class with the signer client = service_client_class(config, **kwargs) @@ -294,43 +277,31 @@ def create_service_client(module, service_client_class): def _is_instance_principal_auth(module): # check if auth type is overridden via module params - instance_principal_auth = ( - "auth_type" in module.params - and module.params["auth_type"] == "instance_principal" - ) + instance_principal_auth = "auth_type" in module.params and module.params["auth_type"] == "instance_principal" if not instance_principal_auth: instance_principal_auth = ( - "OCI_ANSIBLE_AUTH_TYPE" in os.environ - and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal" + "OCI_ANSIBLE_AUTH_TYPE" in os.environ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal" ) return instance_principal_auth -def _merge_auth_option( - config, module, module_option_name, env_var_name, config_attr_name -): +def _merge_auth_option(config, module, module_option_name, env_var_name, config_attr_name): """Merge the values for an authentication attribute from ansible module options and environment variables with the values specified in a configuration file""" _debug(f"Merging {module_option_name}") auth_attribute = module.params.get(module_option_name) - _debug( - f"\t Ansible module option {module_option_name} = {auth_attribute}" - ) + _debug(f"\t Ansible module option {module_option_name} = {auth_attribute}") if not auth_attribute: if env_var_name in os.environ: auth_attribute = os.environ[env_var_name] - _debug( - f"\t Environment variable {env_var_name} = {auth_attribute}" - ) + _debug(f"\t Environment variable {env_var_name} = {auth_attribute}") # An authentication attribute has been provided through an env-variable or an ansible # option and must override the corresponding attribute's value specified in the # config file [profile]. if auth_attribute: - _debug( - f"Updating config attribute {config_attr_name} -> {auth_attribute} " - ) + _debug(f"Updating config attribute {config_attr_name} -> {auth_attribute} ") config.update({config_attr_name: auth_attribute}) @@ -425,9 +396,7 @@ def setup_logging( return logging -def check_and_update_attributes( - target_instance, attr_name, input_value, existing_value, changed -): +def check_and_update_attributes(target_instance, attr_name, input_value, existing_value, changed): """ This function checks the difference between two resource attributes of literal types and sets the attribute value in the target instance type holding the attribute. @@ -460,7 +429,6 @@ def check_and_update_resource( wait_applicable=True, states=None, ): - """ This function handles update operation on a resource. It checks whether update is required and accordingly returns the resource and the changed status. @@ -486,9 +454,7 @@ def check_and_update_resource( """ try: result = dict(changed=False) - attributes_to_update, resource = get_attr_to_update( - get_fn, kwargs_get, module, update_attributes - ) + attributes_to_update, resource = get_attr_to_update(get_fn, kwargs_get, module, update_attributes) if attributes_to_update: kwargs_update = get_kwargs_update( @@ -501,9 +467,7 @@ def check_and_update_resource( resource = call_with_backoff(update_fn, **kwargs_update).data if wait_applicable: if client is None: - module.fail_json( - msg="wait_applicable is True, but client is not specified." - ) + module.fail_json(msg="wait_applicable is True, but client is not specified.") resource = wait_for_resource_lifecycle_state( client, module, True, kwargs_get, get_fn, None, resource, states ) @@ -528,10 +492,7 @@ def get_kwargs_update( update_object = param() for key in update_object.attribute_map: if key in attributes_to_update: - if ( - sub_attributes_of_update_model - and key in sub_attributes_of_update_model - ): + if sub_attributes_of_update_model and key in sub_attributes_of_update_model: setattr(update_object, key, sub_attributes_of_update_model[key]) else: setattr(update_object, key, module.params[key]) @@ -601,9 +562,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes): unequal_list_attr = ( isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list) ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) - unequal_attr = not isinstance(resources_attr_value, list) and to_dict( - resources_attr_value - ) != to_dict(user_provided_attr_value) + unequal_attr = not isinstance(resources_attr_value, list) and to_dict(resources_attr_value) != to_dict( + user_provided_attr_value + ) if unequal_list_attr or unequal_attr: # only update if the user has explicitly provided a value for this attribute # otherwise, no update is necessary because the user hasn't expressed a particular @@ -621,9 +582,7 @@ def get_taggable_arg_spec(supports_create=False, supports_wait=False): defined tags. """ tag_arg_spec = get_common_arg_spec(supports_create, supports_wait) - tag_arg_spec.update( - dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict")) - ) + tag_arg_spec.update(dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))) return tag_arg_spec @@ -723,15 +682,11 @@ def check_and_create_resource( result = dict() - attributes_to_consider = _get_attributes_to_consider( - exclude_attributes, model, module - ) + attributes_to_consider = _get_attributes_to_consider(exclude_attributes, model, module) if "defined_tags" not in default_attribute_values: default_attribute_values["defined_tags"] = {} resource_matched = None - _debug( - f"Trying to find a match within {len(existing_resources)} existing resources" - ) + _debug(f"Trying to find a match within {len(existing_resources)} existing resources") for resource in existing_resources: if _is_resource_active(resource, dead_states): @@ -888,24 +843,17 @@ def does_existing_resource_match_user_inputs( # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude - if ( - exclude_attributes.get(attr) is None - and resources_value_for_attr is not None - ): + if exclude_attributes.get(attr) is None and resources_value_for_attr is not None: if module.argument_spec.get(attr): attribute_with_default_metadata = module.argument_spec.get(attr) - default_attribute_value = attribute_with_default_metadata.get( - "default", None - ) + default_attribute_value = attribute_with_default_metadata.get("default", None) if default_attribute_value is not None: if existing_resource[attr] != default_attribute_value: return False # Check if attr has a value that is not default. For example, a custom `security_list_id` # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a # value that is not the default, then it must be considered a mismatch and false returned. - elif not is_attr_assigned_default( - default_attribute_values, attr, existing_resource[attr] - ): + elif not is_attr_assigned_default(default_attribute_values, attr, existing_resource[attr]): return False else: @@ -999,24 +947,17 @@ def check_if_user_value_matches_resources_attr( if isinstance(exclude_attributes.get(attribute_name), dict): exclude_attributes = exclude_attributes.get(attribute_name) - if isinstance(resources_value_for_attr, list) or isinstance( - user_provided_value_for_attr, list - ): + if isinstance(resources_value_for_attr, list) or isinstance(user_provided_value_for_attr, list): # Perform a deep equivalence check for a List attribute if exclude_attributes.get(attribute_name): return - if ( - user_provided_value_for_attr is None - and default_attribute_values.get(attribute_name) is not None - ): + if user_provided_value_for_attr is None and default_attribute_values.get(attribute_name) is not None: user_provided_value_for_attr = default_attribute_values.get(attribute_name) if resources_value_for_attr is None and user_provided_value_for_attr is None: return - if ( - resources_value_for_attr is None or user_provided_value_for_attr is None - ): + if resources_value_for_attr is None or user_provided_value_for_attr is None: res[0] = False return @@ -1028,17 +969,10 @@ def check_if_user_value_matches_resources_attr( res[0] = False return - if ( - user_provided_value_for_attr - and isinstance(user_provided_value_for_attr[0], dict) - ): + if user_provided_value_for_attr and isinstance(user_provided_value_for_attr[0], dict): # Process a list of dict - sorted_user_provided_value_for_attr = sort_list_of_dictionary( - user_provided_value_for_attr - ) - sorted_resources_value_for_attr = sort_list_of_dictionary( - resources_value_for_attr - ) + sorted_user_provided_value_for_attr = sort_list_of_dictionary(user_provided_value_for_attr) + sorted_resources_value_for_attr = sort_list_of_dictionary(resources_value_for_attr) else: sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr) @@ -1046,9 +980,7 @@ def check_if_user_value_matches_resources_attr( # Walk through the sorted list values of the resource's value for this attribute, and compare against user # provided values. - for index, resources_value_for_attr_part in enumerate( - sorted_resources_value_for_attr - ): + for index, resources_value_for_attr_part in enumerate(sorted_resources_value_for_attr): check_if_user_value_matches_resources_attr( attribute_name, resources_value_for_attr_part, @@ -1064,10 +996,7 @@ def check_if_user_value_matches_resources_attr( if not resources_value_for_attr and user_provided_value_for_attr: res[0] = False for key in resources_value_for_attr: - if ( - user_provided_value_for_attr is not None - and user_provided_value_for_attr - ): + if user_provided_value_for_attr is not None and user_provided_value_for_attr: check_if_user_value_matches_resources_attr( key, resources_value_for_attr.get(key), @@ -1096,17 +1025,12 @@ def check_if_user_value_matches_resources_attr( ) elif resources_value_for_attr != user_provided_value_for_attr: - if ( - exclude_attributes.get(attribute_name) is None - and default_attribute_values.get(attribute_name) is not None - ): + if exclude_attributes.get(attribute_name) is None and default_attribute_values.get(attribute_name) is not None: # As the user has not specified a value for an optional attribute, if the existing resource's # current state has a DEFAULT value for that attribute, we must not consider this incongruence # an issue and continue with other checks. If the existing resource's value for the attribute # is not the default value, then the existing resource is not a match. - if not is_attr_assigned_default( - default_attribute_values, attribute_name, resources_value_for_attr - ): + if not is_attr_assigned_default(default_attribute_values, attribute_name, resources_value_for_attr): res[0] = False elif user_provided_value_for_attr is not None: res[0] = False @@ -1123,9 +1047,7 @@ def are_dicts_equal( # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around # this optional attribute. Check if existing_resource_dict matches default. # For example, source_details attribute in volume is optional and does not have any defaults. - return is_attr_assigned_default( - default_attribute_values, option_name, existing_resource_dict - ) + return is_attr_assigned_default(default_attribute_values, option_name, existing_resource_dict) # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal if not existing_resource_dict and user_provided_dict: @@ -1145,9 +1067,7 @@ def are_dicts_equal( # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value. else: if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list): - default_value_for_dict_attr = default_attribute_values.get( - option_name, None - ) + default_value_for_dict_attr = default_attribute_values.get(option_name, None) if default_value_for_dict_attr: # if a default value for the sub-attr was provided by the module author, fail if the existing # resource's value for the sub-attr is not the default @@ -1173,7 +1093,7 @@ def are_dicts_equal( def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list): """An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map - option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """ + option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']}""" for exclude_item in exclude_list: if isinstance(exclude_item, dict): if map_option_name in exclude_item: @@ -1356,21 +1276,13 @@ def wait_for_resource_lifecycle_state( # 'Authorization failed or requested resource not found', 'status': 404}. # This is because it takes few seconds for the permissions on a compartment to be ready. # Wait for few seconds before attempting a get call on compartment. - _debug( - "Pausing execution for permission on the newly created compartment to be ready." - ) + _debug("Pausing execution for permission on the newly created compartment to be ready.") time.sleep(15) if kwargs_get: - _debug( - f"Waiting for resource to reach READY state. get_args: {kwargs_get}" - ) + _debug(f"Waiting for resource to reach READY state. get_args: {kwargs_get}") response_get = call_with_backoff(get_fn, **kwargs_get) else: - _debug( - "Waiting for resource with id {0} to reach READY state.".format( - resource["id"] - ) - ) + _debug("Waiting for resource with id {0} to reach READY state.".format(resource["id"])) response_get = call_with_backoff(get_fn, **{get_param: resource["id"]}) if states is None: states = module.params.get("wait_until") or DEFAULT_READY_STATES @@ -1379,9 +1291,7 @@ def wait_for_resource_lifecycle_state( client, response_get, evaluate_response=lambda r: r.data.lifecycle_state in states, - max_wait_seconds=module.params.get( - "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS - ), + max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), ).data ) return resource @@ -1390,28 +1300,20 @@ def wait_for_resource_lifecycle_state( def wait_on_work_request(client, response, module): try: if module.params.get("wait", None): - _debug( - f"Waiting for work request with id {response.data.id} to reach SUCCEEDED state." - ) + _debug(f"Waiting for work request with id {response.data.id} to reach SUCCEEDED state.") wait_response = oci.wait_until( client, response, evaluate_response=lambda r: r.data.status == "SUCCEEDED", - max_wait_seconds=module.params.get( - "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS - ), + max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), ) else: - _debug( - f"Waiting for work request with id {response.data.id} to reach ACCEPTED state." - ) + _debug(f"Waiting for work request with id {response.data.id} to reach ACCEPTED state.") wait_response = oci.wait_until( client, response, evaluate_response=lambda r: r.data.status == "ACCEPTED", - max_wait_seconds=module.params.get( - "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS - ), + max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), ) except MaximumWaitTimeExceeded as ex: _debug(str(ex)) @@ -1461,12 +1363,8 @@ def delete_and_wait( response = call_with_backoff(delete_fn, **kwargs_delete) if process_work_request: wr_id = response.headers.get("opc-work-request-id") - get_wr_response = call_with_backoff( - client.get_work_request, work_request_id=wr_id - ) - result["work_request"] = to_dict( - wait_on_work_request(client, get_wr_response, module) - ) + get_wr_response = call_with_backoff(client.get_work_request, work_request_id=wr_id) + result["work_request"] = to_dict(wait_on_work_request(client, get_wr_response, module)) # Set changed to True as work request has been created to delete the resource. result["changed"] = True resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data) @@ -1476,19 +1374,13 @@ def delete_and_wait( if wait_applicable and module.params.get("wait", None): if states is None: - states = ( - module.params.get("wait_until") - or DEFAULT_TERMINATED_STATES - ) + states = module.params.get("wait_until") or DEFAULT_TERMINATED_STATES try: wait_response = oci.wait_until( client, get_fn(**kwargs_get), - evaluate_response=lambda r: r.data.lifecycle_state - in states, - max_wait_seconds=module.params.get( - "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS - ), + evaluate_response=lambda r: r.data.lifecycle_state in states, + max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), succeed_on_not_found=True, ) except MaximumWaitTimeExceeded as ex: @@ -1513,17 +1405,13 @@ def delete_and_wait( result[resource_type] = resource else: - _debug( - f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False" - ) + _debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False") except ServiceError as ex: # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone # resource is not available, instead of the expected 404. So working around this for now. if isinstance(client, oci.dns.DnsClient): if ex.status == 400 and ex.code == "InvalidParameter": - _debug( - f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False" - ) + _debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False") elif ex.status != 404: module.fail_json(msg=ex.message) result[resource_type] = dict() @@ -1673,9 +1561,7 @@ def generate_subclass(parent_class): "__eq__": generic_eq, } subclass_name = "GeneratedSub" + parent_class.__name__ - generated_sub_class = type( - subclass_name, (parent_class,), dict_of_method_in_subclass - ) + generated_sub_class = type(subclass_name, (parent_class,), dict_of_method_in_subclass) return generated_sub_class @@ -1689,15 +1575,11 @@ def get_hashed_object_list(class_type, object_with_values, attributes_class_type return None hashed_class_instances = [] for object_with_value in object_with_values: - hashed_class_instances.append( - get_hashed_object(class_type, object_with_value, attributes_class_type) - ) + hashed_class_instances.append(get_hashed_object(class_type, object_with_value, attributes_class_type)) return hashed_class_instances -def get_hashed_object( - class_type, object_with_value, attributes_class_type=None, supported_attributes=None -): +def get_hashed_object(class_type, object_with_value, attributes_class_type=None, supported_attributes=None): """ Convert any class instance into hashable so that the instances are eligible for various comparison @@ -1718,9 +1600,7 @@ def get_hashed_object( hashed_class_instance = HashedClass() if supported_attributes: - class_attributes = list( - set(hashed_class_instance.attribute_map) & set(supported_attributes) - ) + class_attributes = list(set(hashed_class_instance.attribute_map) & set(supported_attributes)) else: class_attributes = hashed_class_instance.attribute_map @@ -1729,17 +1609,13 @@ def get_hashed_object( if attributes_class_type: for attribute_class_type in attributes_class_type: if isinstance(attribute_value, attribute_class_type): - attribute_value = get_hashed_object( - attribute_class_type, attribute_value - ) + attribute_value = get_hashed_object(attribute_class_type, attribute_value) hashed_class_instance.__setattr__(attribute, attribute_value) return hashed_class_instance -def update_class_type_attr_difference( - update_class_details, existing_instance, attr_name, attr_class, input_attr_value -): +def update_class_type_attr_difference(update_class_details, existing_instance, attr_name, attr_class, input_attr_value): """ Checks the difference and updates an attribute which is represented by a class instance. Not applicable if the attribute type is a primitive value. @@ -1757,9 +1633,7 @@ def update_class_type_attr_difference( """ changed = False # Here existing attribute values is an instance - existing_attr_value = get_hashed_object( - attr_class, getattr(existing_instance, attr_name) - ) + existing_attr_value = get_hashed_object(attr_class, getattr(existing_instance, attr_name)) if input_attr_value is None: update_class_details.__setattr__(attr_name, existing_attr_value) else: @@ -1791,9 +1665,7 @@ def get_existing_resource(target_fn, module, **kwargs): return existing_resource -def get_attached_instance_info( - module, lookup_attached_instance, list_attachments_fn, list_attachments_args -): +def get_attached_instance_info(module, lookup_attached_instance, list_attachments_fn, list_attachments_args): config = get_oci_config(module) identity_client = create_service_client(module, IdentityClient) @@ -1802,18 +1674,14 @@ def get_attached_instance_info( if lookup_attached_instance: # Get all the compartments in the tenancy compartments = to_dict( - identity_client.list_compartments( - config.get("tenancy"), compartment_id_in_subtree=True - ).data + identity_client.list_compartments(config.get("tenancy"), compartment_id_in_subtree=True).data ) # For each compartment, get the volume attachments for the compartment_id with the other args in # list_attachments_args. for compartment in compartments: list_attachments_args["compartment_id"] = compartment["id"] try: - volume_attachments += list_all_resources( - list_attachments_fn, **list_attachments_args - ) + volume_attachments += list_all_resources(list_attachments_fn, **list_attachments_args) # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment except ServiceError as ex: @@ -1821,9 +1689,7 @@ def get_attached_instance_info( pass else: - volume_attachments = list_all_resources( - list_attachments_fn, **list_attachments_args - ) + volume_attachments = list_all_resources(list_attachments_fn, **list_attachments_args) volume_attachments = to_dict(volume_attachments) # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or @@ -1864,15 +1730,11 @@ def check_and_return_component_list_difference( return existing_components, changed -def get_component_list_difference( - input_component_list, existing_components, purge_components, delete_components=False -): +def get_component_list_difference(input_component_list, existing_components, purge_components, delete_components=False): if delete_components: if existing_components is None: return None, False - component_differences = set(existing_components).intersection( - set(input_component_list) - ) + component_differences = set(existing_components).intersection(set(input_component_list)) if component_differences: return list(set(existing_components) - component_differences), True else: @@ -1880,16 +1742,12 @@ def get_component_list_difference( if existing_components is None: return input_component_list, True if purge_components: - components_differences = set(input_component_list).symmetric_difference( - set(existing_components) - ) + components_differences = set(input_component_list).symmetric_difference(set(existing_components)) if components_differences: return input_component_list, True - components_differences = set(input_component_list).difference( - set(existing_components) - ) + components_differences = set(input_component_list).difference(set(existing_components)) if components_differences: return list(components_differences) + existing_components, True return None, False @@ -1900,9 +1758,7 @@ def write_to_file(path, content): dest_file.write(content) -def get_target_resource_from_list( - module, list_resource_fn, target_resource_id=None, **kwargs -): +def get_target_resource_from_list(module, list_resource_fn, target_resource_id=None, **kwargs): """ Returns a resource filtered by identifier from a list of resources. This method should be used as an alternative of 'get resource' method when 'get resource' is nor provided by diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py index 355fd55cc22..b2085eff4a5 100644 --- a/plugins/module_utils/pacemaker.py +++ b/plugins/module_utils/pacemaker.py @@ -24,22 +24,26 @@ def fmt_resource_type(value): - return [":".join(value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None)] + return [ + ":".join( + value[k] for k in ["resource_standard", "resource_provider", "resource_name"] if value.get(k) is not None + ) + ] def fmt_resource_operation(value): cmd = [] for op in value: cmd.append("op") - cmd.append(op.get('operation_action')) - for operation_option in op.get('operation_option'): + cmd.append(op.get("operation_action")) + for operation_option in op.get("operation_option"): cmd.append(operation_option) return cmd def fmt_resource_argument(value): - return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option'] + return ["--group" if value["argument_action"] == "group" else value["argument_action"]] + value["argument_option"] def get_pacemaker_maintenance_mode(runner): @@ -51,7 +55,7 @@ def get_pacemaker_maintenance_mode(runner): def pacemaker_runner(module, **kwargs): - runner_command = ['pcs'] + runner_command = ["pcs"] runner = CmdRunner( module, command=runner_command, @@ -74,6 +78,6 @@ def pacemaker_runner(module, **kwargs): version=cmd_runner_fmt.as_fixed("--version"), output_format=cmd_runner_fmt.as_opt_eq_val("--output-format"), ), - **kwargs + **kwargs, ) return runner diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 3d81a6c5f22..f4bfe26c554 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -12,27 +12,27 @@ pipx_common_argspec = { - "global": dict(type='bool', default=False), - "executable": dict(type='path'), + "global": dict(type="bool", default=False), + "executable": dict(type="path"), } _state_map = dict( - install='install', - install_all='install-all', - present='install', - uninstall='uninstall', - absent='uninstall', - uninstall_all='uninstall-all', - inject='inject', - uninject='uninject', - upgrade='upgrade', - upgrade_shared='upgrade-shared', - upgrade_all='upgrade-all', - reinstall='reinstall', - reinstall_all='reinstall-all', - pin='pin', - unpin='unpin', + install="install", + install_all="install-all", + present="install", + uninstall="uninstall", + absent="uninstall", + uninstall_all="uninstall-all", + inject="inject", + uninject="uninject", + upgrade="upgrade", + upgrade_shared="upgrade-shared", + upgrade_all="upgrade-all", + reinstall="reinstall", + reinstall_all="reinstall-all", + pin="pin", + unpin="unpin", ) @@ -46,15 +46,15 @@ def pipx_runner(module, command, **kwargs): inject_packages=cmd_runner_fmt.as_list(), force=cmd_runner_fmt.as_bool("--force"), include_injected=cmd_runner_fmt.as_bool("--include-injected"), - index_url=cmd_runner_fmt.as_opt_val('--index-url'), - python=cmd_runner_fmt.as_opt_val('--python'), + index_url=cmd_runner_fmt.as_opt_val("--index-url"), + python=cmd_runner_fmt.as_opt_val("--python"), system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"), - _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']), + _list=cmd_runner_fmt.as_fixed(["list", "--include-injected", "--json"]), editable=cmd_runner_fmt.as_bool("--editable"), - pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'), - suffix=cmd_runner_fmt.as_opt_val('--suffix'), + pip_args=cmd_runner_fmt.as_opt_eq_val("--pip-args"), + suffix=cmd_runner_fmt.as_opt_val("--suffix"), spec_metadata=cmd_runner_fmt.as_list(), - version=cmd_runner_fmt.as_fixed('--version'), + version=cmd_runner_fmt.as_fixed("--version"), ) arg_formats["global"] = cmd_runner_fmt.as_bool("--global") @@ -62,23 +62,23 @@ def pipx_runner(module, command, **kwargs): module, command=command, arg_formats=arg_formats, - environ_update={'USE_EMOJI': '0', 'PIPX_USE_EMOJI': '0'}, + environ_update={"USE_EMOJI": "0", "PIPX_USE_EMOJI": "0"}, check_rc=True, - **kwargs + **kwargs, ) return runner def _make_entry(venv_name, venv, include_injected, include_deps): entry = { - 'name': venv_name, - 'version': venv['metadata']['main_package']['package_version'], - 'pinned': venv['metadata']['main_package'].get('pinned'), + "name": venv_name, + "version": venv["metadata"]["main_package"]["package_version"], + "pinned": venv["metadata"]["main_package"].get("pinned"), } if include_injected: - entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + entry["injected"] = {k: v["package_version"] for k, v in venv["metadata"]["injected_packages"].items()} if include_deps: - entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + entry["dependencies"] = list(venv["metadata"]["main_package"]["app_paths_of_dependencies"]) return entry @@ -89,7 +89,7 @@ def process_dict(rc, out, err): results = {} raw_data = json.loads(out) - for venv_name, venv in raw_data['venvs'].items(): + for venv_name, venv in raw_data["venvs"].items(): results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps) return results, raw_data @@ -111,9 +111,6 @@ def process_list(rc, out, err): if kwargs.get("include_raw"): mod_helper.vars.raw_output = raw_data - return [ - entry - for name, entry in res_dict.items() - if name == kwargs.get("name") - ] + return [entry for name, entry in res_dict.items() if name == kwargs.get("name")] + return process_list diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py index 13c824440f1..8b3297e6e2d 100644 --- a/plugins/module_utils/pkg_req.py +++ b/plugins/module_utils/pkg_req.py @@ -37,7 +37,7 @@ def _parse_spec(self, name): return name, None # Quick check for simple package names - if not any(c in name for c in '>=== 400: try: - body = error.read().decode('utf-8') + body = error.read().decode("utf-8") data = json.loads(body) - ext_info = data['error']['@Message.ExtendedInfo'] + ext_info = data["error"]["@Message.ExtendedInfo"] # if the ExtendedInfo contains a user friendly message send it # otherwise try to send the entire contents of ExtendedInfo try: - msg = ext_info[0]['Message'] + msg = ext_info[0]["Message"] except Exception: - msg = str(data['error']['@Message.ExtendedInfo']) + msg = str(data["error"]["@Message.ExtendedInfo"]) except Exception: pass return msg, data @@ -433,216 +454,196 @@ def _get_extended_message(error): def _get_vendor(self): # If we got the vendor info once, don't get it again if self._vendor is not None: - return {'ret': 'True', 'Vendor': self._vendor} + return {"ret": "True", "Vendor": self._vendor} # Find the vendor info from the service root response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: - return {'ret': False, 'Vendor': ''} - data = response['data'] + if response["ret"] is False: + return {"ret": False, "Vendor": ""} + data = response["data"] - if 'Vendor' in data: + if "Vendor" in data: # Extract the vendor string from the Vendor property self._vendor = data["Vendor"] - return {'ret': True, 'Vendor': data["Vendor"]} - elif 'Oem' in data and len(data['Oem']) > 0: + return {"ret": True, "Vendor": data["Vendor"]} + elif "Oem" in data and len(data["Oem"]) > 0: # Determine the vendor from the OEM object if needed - vendor = list(data['Oem'].keys())[0] - if vendor == 'Hpe' or vendor == 'Hp': + vendor = list(data["Oem"].keys())[0] + if vendor == "Hpe" or vendor == "Hp": # HPE uses Pascal-casing for their OEM object # Older systems reported 'Hp' (pre-split) - vendor = 'HPE' + vendor = "HPE" self._vendor = vendor - return {'ret': True, 'Vendor': vendor} + return {"ret": True, "Vendor": vendor} else: # Could not determine; use an empty string - self._vendor = '' - return {'ret': True, 'Vendor': ''} + self._vendor = "" + return {"ret": True, "Vendor": ""} def _find_accountservice_resource(self): response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'AccountService' not in data: - return {'ret': False, 'msg': "AccountService resource not found"} + data = response["data"] + if "AccountService" not in data: + return {"ret": False, "msg": "AccountService resource not found"} else: account_service = data["AccountService"]["@odata.id"] response = self.get_request(self.root_uri + account_service) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - accounts = data['Accounts']['@odata.id'] - if accounts[-1:] == '/': + data = response["data"] + accounts = data["Accounts"]["@odata.id"] + if accounts[-1:] == "/": accounts = accounts[:-1] self.accounts_uri = accounts - return {'ret': True} + return {"ret": True} def _find_sessionservice_resource(self): # Get the service root response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Check for the session service and session collection. Well-known # defaults are provided in the constructor, but services that predate # Redfish 1.6.0 might contain different values. - self.session_service_uri = data.get('SessionService', {}).get('@odata.id') - self.sessions_uri = data.get('Links', {}).get('Sessions', {}).get('@odata.id') + self.session_service_uri = data.get("SessionService", {}).get("@odata.id") + self.sessions_uri = data.get("Links", {}).get("Sessions", {}).get("@odata.id") # If one isn't found, return an error if self.session_service_uri is None: - return {'ret': False, 'msg': "SessionService resource not found"} + return {"ret": False, "msg": "SessionService resource not found"} if self.sessions_uri is None: - return {'ret': False, 'msg': "SessionCollection resource not found"} - return {'ret': True} + return {"ret": False, "msg": "SessionCollection resource not found"} + return {"ret": True} def _get_resource_uri_by_id(self, uris, id_prop): for uri in uris: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: continue - data = response['data'] - if id_prop == data.get('Id'): + data = response["data"] + if id_prop == data.get("Id"): return uri return None def _find_systems_resource(self): response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Systems' not in data: - return {'ret': False, 'msg': "Systems resource not found"} - response = self.get_request(self.root_uri + data['Systems']['@odata.id']) - if response['ret'] is False: + data = response["data"] + if "Systems" not in data: + return {"ret": False, "msg": "Systems resource not found"} + response = self.get_request(self.root_uri + data["Systems"]["@odata.id"]) + if response["ret"] is False: return response - self.systems_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] + self.systems_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.systems_uris: - return { - 'ret': False, - 'msg': "ComputerSystem's Members array is either empty or missing"} + return {"ret": False, "msg": "ComputerSystem's Members array is either empty or missing"} self.systems_uri = self.systems_uris[0] if self.data_modification: if self.resource_id: - self.systems_uri = self._get_resource_uri_by_id(self.systems_uris, - self.resource_id) + self.systems_uri = self._get_resource_uri_by_id(self.systems_uris, self.resource_id) if not self.systems_uri: - return { - 'ret': False, - 'msg': f"System resource {self.resource_id} not found"} + return {"ret": False, "msg": f"System resource {self.resource_id} not found"} elif len(self.systems_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'}) - return {'ret': True} + self.module.fail_json(msg=FAIL_MSG % {"resource": "System"}) + return {"ret": True} def _find_updateservice_resource(self): response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'UpdateService' not in data: - return {'ret': False, 'msg': "UpdateService resource not found"} + data = response["data"] + if "UpdateService" not in data: + return {"ret": False, "msg": "UpdateService resource not found"} else: update = data["UpdateService"]["@odata.id"] self.update_uri = update response = self.get_request(self.root_uri + update) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] self.firmware_uri = self.software_uri = None - if 'FirmwareInventory' in data: - self.firmware_uri = data['FirmwareInventory']['@odata.id'] - if 'SoftwareInventory' in data: - self.software_uri = data['SoftwareInventory']['@odata.id'] - return {'ret': True} + if "FirmwareInventory" in data: + self.firmware_uri = data["FirmwareInventory"]["@odata.id"] + if "SoftwareInventory" in data: + self.software_uri = data["SoftwareInventory"]["@odata.id"] + return {"ret": True} def _find_chassis_resource(self): response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Chassis' not in data: - return {'ret': False, 'msg': "Chassis resource not found"} + data = response["data"] + if "Chassis" not in data: + return {"ret": False, "msg": "Chassis resource not found"} chassis = data["Chassis"]["@odata.id"] response = self.get_request(self.root_uri + chassis) - if response['ret'] is False: + if response["ret"] is False: return response - self.chassis_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] + self.chassis_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.chassis_uris: - return {'ret': False, - 'msg': "Chassis Members array is either empty or missing"} + return {"ret": False, "msg": "Chassis Members array is either empty or missing"} self.chassis_uri = self.chassis_uris[0] if self.data_modification: if self.resource_id: - self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris, - self.resource_id) + self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris, self.resource_id) if not self.chassis_uri: - return { - 'ret': False, - 'msg': f"Chassis resource {self.resource_id} not found"} + return {"ret": False, "msg": f"Chassis resource {self.resource_id} not found"} elif len(self.chassis_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'}) - return {'ret': True} + self.module.fail_json(msg=FAIL_MSG % {"resource": "Chassis"}) + return {"ret": True} def _find_managers_resource(self): response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Managers' not in data: - return {'ret': False, 'msg': "Manager resource not found"} + data = response["data"] + if "Managers" not in data: + return {"ret": False, "msg": "Manager resource not found"} manager = data["Managers"]["@odata.id"] response = self.get_request(self.root_uri + manager) - if response['ret'] is False: + if response["ret"] is False: return response - self.manager_uris = [ - i['@odata.id'] for i in response['data'].get('Members', [])] + self.manager_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.manager_uris: - return {'ret': False, - 'msg': "Managers Members array is either empty or missing"} + return {"ret": False, "msg": "Managers Members array is either empty or missing"} self.manager_uri = self.manager_uris[0] if self.data_modification: if self.resource_id: - self.manager_uri = self._get_resource_uri_by_id(self.manager_uris, - self.resource_id) + self.manager_uri = self._get_resource_uri_by_id(self.manager_uris, self.resource_id) if not self.manager_uri: - return { - 'ret': False, - 'msg': f"Manager resource {self.resource_id} not found"} + return {"ret": False, "msg": f"Manager resource {self.resource_id} not found"} elif len(self.manager_uris) > 1: - self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'}) - return {'ret': True} + self.module.fail_json(msg=FAIL_MSG % {"resource": "Manager"}) + return {"ret": True} def _get_all_action_info_values(self, action): """Retrieve all parameter values for an Action from ActionInfo. Fall back to AllowableValue annotations if no ActionInfo found. Return the result in an ActionInfo-like dictionary, keyed - by the name of the parameter. """ + by the name of the parameter.""" ai = {} - if '@Redfish.ActionInfo' in action: - ai_uri = action['@Redfish.ActionInfo'] + if "@Redfish.ActionInfo" in action: + ai_uri = action["@Redfish.ActionInfo"] response = self.get_request(self.root_uri + ai_uri) - if response['ret'] is True: - data = response['data'] - if 'Parameters' in data: - params = data['Parameters'] - ai = {p['Name']: p for p in params if 'Name' in p} + if response["ret"] is True: + data = response["data"] + if "Parameters" in data: + params = data["Parameters"] + ai = {p["Name"]: p for p in params if "Name" in p} if not ai: - ai = { - k[:-24]: {'AllowableValues': v} - for k, v in action.items() - if k.endswith('@Redfish.AllowableValues') - } + ai = {k[:-24]: {"AllowableValues": v} for k, v in action.items() if k.endswith("@Redfish.AllowableValues")} return ai def _get_allowable_values(self, action, name, default_values=None): if default_values is None: default_values = [] ai = self._get_all_action_info_values(action) - allowable_values = ai.get(name, {}).get('AllowableValues') + allowable_values = ai.get(name, {}).get("AllowableValues") # fallback to default values if allowable_values is None: allowable_values = default_values @@ -659,168 +660,176 @@ def check_service_availability(self): # Override the timeout since the service root is expected to be readily # available. service_root = self.get_request(self.root_uri + self.service_root, timeout=10) - if service_root['ret'] is False: + if service_root["ret"] is False: # Failed, either due to a timeout or HTTP error; not available - return {'ret': True, 'available': False} + return {"ret": True, "available": False} # Successfully accessed the service root; available - return {'ret': True, 'available': True} + return {"ret": True, "available": True} def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] - properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat', - 'Message', 'MessageId', 'MessageArgs'] + properties = ["Severity", "Created", "EntryType", "OemRecordFormat", "Message", "MessageId", "MessageArgs"] # Find LogService response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'LogServices' not in data: - return {'ret': False, 'msg': "LogServices resource not found"} + data = response["data"] + if "LogServices" not in data: + return {"ret": False, "msg": "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - for log_svcs_entry in data.get('Members', []): - response = self.get_request(self.root_uri + log_svcs_entry['@odata.id']) - if response['ret'] is False: + data = response["data"] + for log_svcs_entry in data.get("Members", []): + response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) + if response["ret"] is False: return response - _data = response['data'] - if 'Entries' in _data: - log_svcs_uri_list.append(_data['Entries']['@odata.id']) + _data = response["data"] + if "Entries" in _data: + log_svcs_uri_list.append(_data["Entries"]["@odata.id"]) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: logs = {} list_of_log_entries = [] response = self.get_request(self.root_uri + log_svcs_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - logs['Description'] = data.get('Description', - 'Collection of log entries') + data = response["data"] + logs["Description"] = data.get("Description", "Collection of log entries") # Get all log entries for each type of log found - for logEntry in data.get('Members', []): + for logEntry in data.get("Members", []): entry = {} for prop in properties: if prop in logEntry: entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.rstrip('/').split('/')[-1] + log_name = log_svcs_uri.rstrip("/").split("/")[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) # list_of_logs[logs{list_of_log_entries[entry{}]}] - return {'ret': True, 'entries': list_of_logs} + return {"ret": True, "entries": list_of_logs} def clear_logs(self): # Find LogService response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'LogServices' not in data: - return {'ret': False, 'msg': "LogServices resource not found"} + data = response["data"] + if "LogServices" not in data: + return {"ret": False, "msg": "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - for log_svcs_entry in data['Members']: + for log_svcs_entry in data["Members"]: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) - if response['ret'] is False: + if response["ret"] is False: return response - _data = response['data'] + _data = response["data"] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: if "#LogService.ClearLog" in _data["Actions"]: self.post_request(self.root_uri + _data["Actions"]["#LogService.ClearLog"]["target"], {}) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True} + return {"ret": True} def aggregate(self, func, uri_list, uri_name): ret = True entries = [] for uri in uri_list: inventory = func(uri) - ret = inventory.pop('ret') and ret - if 'entries' in inventory: - entries.append(({uri_name: uri}, - inventory['entries'])) + ret = inventory.pop("ret") and ret + if "entries" in inventory: + entries.append(({uri_name: uri}, inventory["entries"])) return dict(ret=ret, entries=entries) def aggregate_chassis(self, func): - return self.aggregate(func, self.chassis_uris, 'chassis_uri') + return self.aggregate(func, self.chassis_uris, "chassis_uri") def aggregate_managers(self, func): - return self.aggregate(func, self.manager_uris, 'manager_uri') + return self.aggregate(func, self.manager_uris, "manager_uri") def aggregate_systems(self, func): - return self.aggregate(func, self.systems_uris, 'system_uri') + return self.aggregate(func, self.systems_uris, "system_uri") def get_storage_controller_inventory(self, systems_uri): result = {} controller_list = [] controller_results = [] # Get these entries, but does not fail if not found - properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', - 'Location', 'Manufacturer', 'Model', 'Name', 'Id', - 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] + properties = [ + "CacheSummary", + "FirmwareVersion", + "Identifiers", + "Location", + "Manufacturer", + "Model", + "Name", + "Id", + "PartNumber", + "SerialNumber", + "SpeedGbps", + "Status", + ] key = "Controllers" deprecated_key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - if 'Storage' not in data: - return {'ret': False, 'msg': "Storage resource not found"} + if "Storage" not in data: + return {"ret": False, "msg": "Storage resource not found"} # Get a list of all storage controllers and build respective URIs - storage_uri = data['Storage']["@odata.id"] + storage_uri = data["Storage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] # Loop through Members and their StorageControllers # and gather properties from each StorageController - if data['Members']: - for storage_member in data['Members']: - storage_member_uri = storage_member['@odata.id'] + if data["Members"]: + for storage_member in data["Members"]: + storage_member_uri = storage_member["@odata.id"] response = self.get_request(self.root_uri + storage_member_uri) - data = response['data'] + data = response["data"] if key in data: - controllers_uri = data[key]['@odata.id'] + controllers_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + controllers_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - if data['Members']: - for controller_member in data['Members']: - controller_member_uri = controller_member['@odata.id'] + if data["Members"]: + for controller_member in data["Members"]: + controller_member_uri = controller_member["@odata.id"] response = self.get_request(self.root_uri + controller_member_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] controller_result = {} for property in properties: @@ -835,90 +844,110 @@ def get_storage_controller_inventory(self, systems_uri): if property in controller: controller_result[property] = controller[property] controller_results.append(controller_result) - result['entries'] = controller_results + result["entries"] = controller_results return result else: - return {'ret': False, 'msg': "Storage resource not found"} + return {"ret": False, "msg": "Storage resource not found"} def get_multi_storage_controller_inventory(self): return self.aggregate_systems(self.get_storage_controller_inventory) def get_disk_inventory(self, systems_uri): - result = {'entries': []} + result = {"entries": []} controller_list = [] # Get these entries, but does not fail if not found - properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', - 'EncryptionAbility', 'EncryptionStatus', - 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', - 'Links', 'Manufacturer', 'MediaType', 'Model', 'Name', - 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', - 'RotationSpeedRPM', 'SerialNumber', 'Status'] + properties = [ + "BlockSizeBytes", + "CapableSpeedGbs", + "CapacityBytes", + "EncryptionAbility", + "EncryptionStatus", + "FailurePredicted", + "HotspareType", + "Id", + "Identifiers", + "Links", + "Manufacturer", + "MediaType", + "Model", + "Name", + "PartNumber", + "PhysicalLocation", + "Protocol", + "Revision", + "RotationSpeedRPM", + "SerialNumber", + "Status", + ] # Find Storage service response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - if 'SimpleStorage' not in data and 'Storage' not in data: - return {'ret': False, 'msg': "SimpleStorage and Storage resource \ - not found"} + if "SimpleStorage" not in data and "Storage" not in data: + return { + "ret": False, + "msg": "SimpleStorage and Storage resource \ + not found", + } - if 'Storage' in data: + if "Storage" in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data['Storage']['@odata.id'] + storage_uri = data["Storage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - if data['Members']: - for controller in data['Members']: - controller_list.append(controller['@odata.id']) + if data["Members"]: + for controller in data["Members"]: + controller_list.append(controller["@odata.id"]) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - controller_name = 'Controller 1' - storage_id = data['Id'] - if 'Controllers' in data: - controllers_uri = data['Controllers']['@odata.id'] + data = response["data"] + controller_name = "Controller 1" + storage_id = data["Id"] + if "Controllers" in data: + controllers_uri = data["Controllers"]["@odata.id"] response = self.get_request(self.root_uri + controllers_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - cdata = response['data'] + result["ret"] = True + cdata = response["data"] - if cdata['Members']: - controller_member_uri = cdata['Members'][0]['@odata.id'] + if cdata["Members"]: + controller_member_uri = cdata["Members"][0]["@odata.id"] response = self.get_request(self.root_uri + controller_member_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - cdata = response['data'] - controller_name = cdata['Name'] - elif 'StorageControllers' in data: - sc = data['StorageControllers'] + result["ret"] = True + cdata = response["data"] + controller_name = cdata["Name"] + elif "StorageControllers" in data: + sc = data["StorageControllers"] if sc: - if 'Name' in sc[0]: - controller_name = sc[0]['Name'] + if "Name" in sc[0]: + controller_name = sc[0]["Name"] else: - sc_id = sc[0].get('Id', '1') - controller_name = f'Controller {sc_id}' + sc_id = sc[0].get("Id", "1") + controller_name = f"Controller {sc_id}" drive_results = [] - if 'Drives' in data: - for device in data['Drives']: - disk_uri = self.root_uri + device['@odata.id'] + if "Drives" in data: + for device in data["Drives"]: + disk_uri = self.root_uri + device["@odata.id"] response = self.get_request(disk_uri) - data = response['data'] + data = response["data"] drive_result = {} - drive_result['RedfishURI'] = data['@odata.id'] + drive_result["RedfishURI"] = data["@odata.id"] for property in properties: if property in data: if data[property] is not None: @@ -929,43 +958,40 @@ def get_disk_inventory(self, systems_uri): else: drive_result[property] = data[property] drive_results.append(drive_result) - drives = {'Controller': controller_name, - 'StorageId': storage_id, - 'Drives': drive_results} + drives = {"Controller": controller_name, "StorageId": storage_id, "Drives": drive_results} result["entries"].append(drives) - if 'SimpleStorage' in data: + if "SimpleStorage" in data: # Get a list of all storage controllers and build respective URIs storage_uri = data["SimpleStorage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for controller in data['Members']: - controller_list.append(controller['@odata.id']) + for controller in data["Members"]: + controller_list.append(controller["@odata.id"]) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Name' in data: - controller_name = data['Name'] + data = response["data"] + if "Name" in data: + controller_name = data["Name"] else: - sc_id = data.get('Id', '1') - controller_name = f'Controller {sc_id}' + sc_id = data.get("Id", "1") + controller_name = f"Controller {sc_id}" drive_results = [] - for device in data['Devices']: + for device in data["Devices"]: drive_result = {} for property in properties: if property in device: drive_result[property] = device[property] drive_results.append(drive_result) - drives = {'Controller': controller_name, - 'Drives': drive_results} + drives = {"Controller": controller_name, "Drives": drive_results} result["entries"].append(drives) return result @@ -974,88 +1000,104 @@ def get_multi_disk_inventory(self): return self.aggregate_systems(self.get_disk_inventory) def get_volume_inventory(self, systems_uri): - result = {'entries': []} + result = {"entries": []} controller_list = [] volume_list = [] # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes', - 'Capacity', 'CapacityBytes', 'CapacitySources', - 'Encrypted', 'EncryptionTypes', 'Identifiers', - 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities', - 'AllocatedPools', 'Status'] + properties = [ + "Id", + "Name", + "RAIDType", + "VolumeType", + "BlockSizeBytes", + "Capacity", + "CapacityBytes", + "CapacitySources", + "Encrypted", + "EncryptionTypes", + "Identifiers", + "Operations", + "OptimumIOSizeBytes", + "AccessCapabilities", + "AllocatedPools", + "Status", + ] # Find Storage service response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - if 'SimpleStorage' not in data and 'Storage' not in data: - return {'ret': False, 'msg': "SimpleStorage and Storage resource \ - not found"} + if "SimpleStorage" not in data and "Storage" not in data: + return { + "ret": False, + "msg": "SimpleStorage and Storage resource \ + not found", + } - if 'Storage' in data: + if "Storage" in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data['Storage']['@odata.id'] + storage_uri = data["Storage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - if data.get('Members'): - for controller in data['Members']: - controller_list.append(controller['@odata.id']) + if data.get("Members"): + for controller in data["Members"]: + controller_list.append(controller["@odata.id"]) for idx, c in enumerate(controller_list): uri = self.root_uri + c response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - controller_name = f'Controller {idx}' - if 'Controllers' in data: - response = self.get_request(self.root_uri + data['Controllers']['@odata.id']) - if response['ret'] is False: + data = response["data"] + controller_name = f"Controller {idx}" + if "Controllers" in data: + response = self.get_request(self.root_uri + data["Controllers"]["@odata.id"]) + if response["ret"] is False: return response - c_data = response['data'] + c_data = response["data"] - if c_data.get('Members') and c_data['Members']: - response = self.get_request(self.root_uri + c_data['Members'][0]['@odata.id']) - if response['ret'] is False: + if c_data.get("Members") and c_data["Members"]: + response = self.get_request(self.root_uri + c_data["Members"][0]["@odata.id"]) + if response["ret"] is False: return response - member_data = response['data'] + member_data = response["data"] if member_data: - if 'Name' in member_data: - controller_name = member_data['Name'] + if "Name" in member_data: + controller_name = member_data["Name"] else: - controller_id = member_data.get('Id', '1') - controller_name = f'Controller {controller_id}' - elif 'StorageControllers' in data: - sc = data['StorageControllers'] + controller_id = member_data.get("Id", "1") + controller_name = f"Controller {controller_id}" + elif "StorageControllers" in data: + sc = data["StorageControllers"] if sc: - if 'Name' in sc[0]: - controller_name = sc[0]['Name'] + if "Name" in sc[0]: + controller_name = sc[0]["Name"] else: - sc_id = sc[0].get('Id', '1') - controller_name = f'Controller {sc_id}' + sc_id = sc[0].get("Id", "1") + controller_name = f"Controller {sc_id}" volume_results = [] volume_list = [] - if 'Volumes' in data: + if "Volumes" in data: # Get a list of all volumes and build respective URIs - volumes_uri = data['Volumes']['@odata.id'] + volumes_uri = data["Volumes"]["@odata.id"] response = self.get_request(self.root_uri + volumes_uri) - data = response['data'] + data = response["data"] - if data.get('Members'): - for volume in data['Members']: - volume_list.append(volume['@odata.id']) + if data.get("Members"): + for volume in data["Members"]: + volume_list.append(volume["@odata.id"]) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] volume_result = {} for property in properties: @@ -1065,19 +1107,18 @@ def get_volume_inventory(self, systems_uri): # Get related Drives Id drive_id_list = [] - if 'Links' in data: - if 'Drives' in data['Links']: - for link in data['Links']['Drives']: - drive_id_link = link['@odata.id'] - drive_id = drive_id_link.rstrip('/').split('/')[-1] - drive_id_list.append({'Id': drive_id}) - volume_result['Linked_drives'] = drive_id_list + if "Links" in data: + if "Drives" in data["Links"]: + for link in data["Links"]["Drives"]: + drive_id_link = link["@odata.id"] + drive_id = drive_id_link.rstrip("/").split("/")[-1] + drive_id_list.append({"Id": drive_id}) + volume_result["Linked_drives"] = drive_id_list volume_results.append(volume_result) - volumes = {'Controller': controller_name, - 'Volumes': volume_results} + volumes = {"Controller": controller_name, "Volumes": volume_results} result["entries"].append(volumes) else: - return {'ret': False, 'msg': "Storage resource not found"} + return {"ret": False, "msg": "Storage resource not found"} return result @@ -1096,23 +1137,23 @@ def manage_indicator_led(self, command, resource_uri=None): resource_uri = self.chassis_uri # Perform a PATCH on the IndicatorLED property based on the requested command - payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} + payloads = {"IndicatorLedOn": "Lit", "IndicatorLedOff": "Off", "IndicatorLedBlink": "Blinking"} if command not in payloads.keys(): - return {'ret': False, 'msg': f'Invalid command ({command})'} - payload = {'IndicatorLED': payloads[command]} + return {"ret": False, "msg": f"Invalid command ({command})"} + payload = {"IndicatorLED": payloads[command]} resp = self.patch_request(self.root_uri + resource_uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = f'Set IndicatorLED to {payloads[command]}' + if resp["ret"] and resp["changed"]: + resp["msg"] = f"Set IndicatorLED to {payloads[command]}" return resp def _map_reset_type(self, reset_type, allowable_values): equiv_types = { - 'On': 'ForceOn', - 'ForceOn': 'On', - 'ForceOff': 'GracefulShutdown', - 'GracefulShutdown': 'ForceOff', - 'GracefulRestart': 'ForceRestart', - 'ForceRestart': 'GracefulRestart' + "On": "ForceOn", + "ForceOn": "On", + "ForceOff": "GracefulShutdown", + "GracefulShutdown": "ForceOff", + "GracefulRestart": "ForceRestart", + "ForceRestart": "GracefulRestart", } if reset_type in allowable_values: @@ -1125,74 +1166,78 @@ def _map_reset_type(self, reset_type, allowable_values): return reset_type def manage_system_power(self, command): - return self.manage_power(command, self.systems_uri, - '#ComputerSystem.Reset') + return self.manage_power(command, self.systems_uri, "#ComputerSystem.Reset") def manage_manager_power(self, command, wait=False, wait_timeout=120): - return self.manage_power(command, self.manager_uri, - '#Manager.Reset', wait, wait_timeout) + return self.manage_power(command, self.manager_uri, "#Manager.Reset", wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name, wait=False, - wait_timeout=120): + def manage_power(self, command, resource_uri, action_name, wait=False, wait_timeout=120): key = "Actions" - reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', - 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle', - 'FullPowerCycle'] + reset_type_values = [ + "On", + "ForceOff", + "GracefulShutdown", + "GracefulRestart", + "ForceRestart", + "Nmi", + "ForceOn", + "PushPowerButton", + "PowerCycle", + "FullPowerCycle", + ] # command should be PowerOn, PowerForceOff, etc. - if not command.startswith('Power'): - return {'ret': False, 'msg': f'Invalid Command ({command})'} + if not command.startswith("Power"): + return {"ret": False, "msg": f"Invalid Command ({command})"} # Commands (except PowerCycle) will be stripped of the 'Power' prefix - if command == 'PowerCycle': + if command == "PowerCycle": reset_type = command else: reset_type = command[5:] # map Reboot to a ResetType that does a reboot - if reset_type == 'Reboot': - reset_type = 'GracefulRestart' + if reset_type == "Reboot": + reset_type = "GracefulRestart" if reset_type not in reset_type_values: - return {'ret': False, 'msg': f'Invalid Command ({command})'} + return {"ret": False, "msg": f"Invalid Command ({command})"} # read the resource and get the current power state response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - power_state = data.get('PowerState') + data = response["data"] + power_state = data.get("PowerState") # if power is already in target state, nothing to do - if power_state == "On" and reset_type in ['On', 'ForceOn']: - return {'ret': True, 'changed': False} - if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']: - return {'ret': True, 'changed': False} + if power_state == "On" and reset_type in ["On", "ForceOn"]: + return {"ret": True, "changed": False} + if power_state == "Off" and reset_type in ["GracefulShutdown", "ForceOff"]: + return {"ret": True, "changed": False} # get the reset Action and target URI if key not in data or action_name not in data[key]: - return {'ret': False, 'msg': f'Action {action_name} not found'} + return {"ret": False, "msg": f"Action {action_name} not found"} reset_action = data[key][action_name] - if 'target' not in reset_action: - return {'ret': False, - 'msg': f'target URI missing from Action {action_name}'} - action_uri = reset_action['target'] + if "target" not in reset_action: + return {"ret": False, "msg": f"target URI missing from Action {action_name}"} + action_uri = reset_action["target"] # get AllowableValues ai = self._get_all_action_info_values(reset_action) - allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + allowable_values = ai.get("ResetType", {}).get("AllowableValues", []) # map ResetType to an allowable value if needed if reset_type not in allowable_values: reset_type = self._map_reset_type(reset_type, allowable_values) # define payload - payload = {'ResetType': reset_type} + payload = {"ResetType": reset_type} # POST to Action URI response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response # If requested to wait for the service to be available again, block @@ -1208,7 +1253,7 @@ def manage_power(self, command, resource_uri, action_name, wait=False, # Periodically check for the service's availability. while elapsed_time <= wait_timeout: status = self.check_service_availability() - if status['available']: + if status["available"]: # It is available; we are done break time.sleep(5) @@ -1216,126 +1261,117 @@ def manage_power(self, command, resource_uri, action_name, wait=False, if elapsed_time > wait_timeout: # Exhausted the wait timer; error - return {'ret': False, 'changed': True, - 'msg': f'The service did not become available after {int(wait_timeout)} seconds'} - return {'ret': True, 'changed': True} + return { + "ret": False, + "changed": True, + "msg": f"The service did not become available after {int(wait_timeout)} seconds", + } + return {"ret": True, "changed": True} def manager_reset_to_defaults(self, command): - return self.reset_to_defaults(command, self.manager_uri, - '#Manager.ResetToDefaults') + return self.reset_to_defaults(command, self.manager_uri, "#Manager.ResetToDefaults") def reset_to_defaults(self, command, resource_uri, action_name): key = "Actions" - reset_type_values = ['ResetAll', - 'PreserveNetworkAndUsers', - 'PreserveNetwork'] + reset_type_values = ["ResetAll", "PreserveNetworkAndUsers", "PreserveNetwork"] if command not in reset_type_values: - return {'ret': False, 'msg': f'Invalid Command ({command})'} + return {"ret": False, "msg": f"Invalid Command ({command})"} # read the resource and get the current power state response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # get the reset Action and target URI if key not in data or action_name not in data[key]: - return {'ret': False, 'msg': f'Action {action_name} not found'} + return {"ret": False, "msg": f"Action {action_name} not found"} reset_action = data[key][action_name] - if 'target' not in reset_action: - return {'ret': False, - 'msg': f'target URI missing from Action {action_name}'} - action_uri = reset_action['target'] + if "target" not in reset_action: + return {"ret": False, "msg": f"target URI missing from Action {action_name}"} + action_uri = reset_action["target"] # get AllowableValues ai = self._get_all_action_info_values(reset_action) - allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + allowable_values = ai.get("ResetType", {}).get("AllowableValues", []) # map ResetType to an allowable value if needed if allowable_values and command not in allowable_values: - return {'ret': False, - 'msg': f'Specified reset type ({command}) not supported by service. Supported types: {allowable_values}'} + return { + "ret": False, + "msg": f"Specified reset type ({command}) not supported by service. Supported types: {allowable_values}", + } # define payload - payload = {'ResetType': command} + payload = {"ResetType": command} # POST to Action URI response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True} + return {"ret": True, "changed": True} def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): if not any((username, acct_id)): - return {'ret': False, 'msg': - 'Must provide either account_id or account_username'} + return {"ret": False, "msg": "Must provide either account_id or account_username"} if password_change_uri: # Password change required; go directly to the specified URI response = self.get_request(self.root_uri + password_change_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - headers = response['headers'] + data = response["data"] + headers = response["headers"] if username: - if username == data.get('UserName'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': password_change_uri} + if username == data.get("UserName"): + return {"ret": True, "data": data, "headers": headers, "uri": password_change_uri} if acct_id: - if acct_id == data.get('Id'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': password_change_uri} + if acct_id == data.get("Id"): + return {"ret": True, "data": data, "headers": headers, "uri": password_change_uri} else: # Walk the accounts collection to find the desired user response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] + uris = [a.get("@odata.id") for a in data.get("Members", []) if a.get("@odata.id")] for uri in uris: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: continue - data = response['data'] - headers = response['headers'] + data = response["data"] + headers = response["headers"] if username: - if username == data.get('UserName'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + if username == data.get("UserName"): + return {"ret": True, "data": data, "headers": headers, "uri": uri} if acct_id: - if acct_id == data.get('Id'): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + if acct_id == data.get("Id"): + return {"ret": True, "data": data, "headers": headers, "uri": uri} - return {'ret': False, 'no_match': True, 'msg': - 'No account with the given account_id or account_username found'} + return {"ret": False, "no_match": True, "msg": "No account with the given account_id or account_username found"} def _find_empty_account_slot(self): response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] + uris = [a.get("@odata.id") for a in data.get("Members", []) if a.get("@odata.id")] if uris: # first slot may be reserved, so move to end of list uris += [uris.pop(0)] for uri in uris: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: continue - data = response['data'] - headers = response['headers'] - if data.get('UserName') == "" and not data.get('Enabled', True): - return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + data = response["data"] + headers = response["headers"] + if data.get("UserName") == "" and not data.get("Enabled", True): + return {"ret": True, "data": data, "headers": headers, "uri": uri} - return {'ret': False, 'no_match': True, 'msg': - 'No empty account slot found'} + return {"ret": False, "no_match": True, "msg": "No empty account slot found"} def list_users(self): result = {} @@ -1343,25 +1379,24 @@ def list_users(self): user_list = [] users_results = [] # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled', - 'AccountTypes', 'OEMAccountTypes'] + properties = ["Id", "Name", "UserName", "RoleId", "Locked", "Enabled", "AccountTypes", "OEMAccountTypes"] response = self.get_request(self.root_uri + self.accounts_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for users in data.get('Members', []): - user_list.append(users['@odata.id']) # user_list[] are URIs + for users in data.get("Members", []): + user_list.append(users["@odata.id"]) # user_list[] are URIs # for each user, get details for uri in user_list: user = {} response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] for property in properties: if property in data: @@ -1370,7 +1405,7 @@ def list_users(self): # Filter out empty account slots # An empty account slot can be detected if the username is an empty # string and if the account is disabled - if user.get('UserName', '') == '' and not user.get('Enabled', False): + if user.get("UserName", "") == "" and not user.get("Enabled", False): continue users_results.append(user) @@ -1378,219 +1413,211 @@ def list_users(self): return result def add_user_via_patch(self, user): - if user.get('account_id'): + if user.get("account_id"): # If Id slot specified, use it - response = self._find_account_uri(acct_id=user.get('account_id')) + response = self._find_account_uri(acct_id=user.get("account_id")) else: # Otherwise find first empty slot response = self._find_empty_account_slot() - if not response['ret']: + if not response["ret"]: return response - uri = response['uri'] + uri = response["uri"] payload = {} - if user.get('account_username'): - payload['UserName'] = user.get('account_username') - if user.get('account_password'): - payload['Password'] = user.get('account_password') - if user.get('account_roleid'): - payload['RoleId'] = user.get('account_roleid') - if user.get('account_accounttypes'): - payload['AccountTypes'] = user.get('account_accounttypes') - if user.get('account_oemaccounttypes'): - payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + if user.get("account_username"): + payload["UserName"] = user.get("account_username") + if user.get("account_password"): + payload["Password"] = user.get("account_password") + if user.get("account_roleid"): + payload["RoleId"] = user.get("account_roleid") + if user.get("account_accounttypes"): + payload["AccountTypes"] = user.get("account_accounttypes") + if user.get("account_oemaccounttypes"): + payload["OEMAccountTypes"] = user.get("account_oemaccounttypes") return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def add_user(self, user): - if not user.get('account_username'): - return {'ret': False, 'msg': - 'Must provide account_username for AddUser command'} + if not user.get("account_username"): + return {"ret": False, "msg": "Must provide account_username for AddUser command"} - response = self._find_account_uri(username=user.get('account_username')) - if response['ret']: + response = self._find_account_uri(username=user.get("account_username")) + if response["ret"]: # account_username already exists, nothing to do - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} response = self.get_request(self.root_uri + self.accounts_uri) - if not response['ret']: + if not response["ret"]: return response - headers = response['headers'] + headers = response["headers"] - if 'allow' in headers: - methods = [m.strip() for m in headers.get('allow').split(',')] - if 'POST' not in methods: + if "allow" in headers: + methods = [m.strip() for m in headers.get("allow").split(",")] + if "POST" not in methods: # if Allow header present and POST not listed, add via PATCH return self.add_user_via_patch(user) payload = {} - if user.get('account_username'): - payload['UserName'] = user.get('account_username') - if user.get('account_password'): - payload['Password'] = user.get('account_password') - if user.get('account_roleid'): - payload['RoleId'] = user.get('account_roleid') - if user.get('account_accounttypes'): - payload['AccountTypes'] = user.get('account_accounttypes') - if user.get('account_oemaccounttypes'): - payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') - if user.get('account_id'): - payload['Id'] = user.get('account_id') + if user.get("account_username"): + payload["UserName"] = user.get("account_username") + if user.get("account_password"): + payload["Password"] = user.get("account_password") + if user.get("account_roleid"): + payload["RoleId"] = user.get("account_roleid") + if user.get("account_accounttypes"): + payload["AccountTypes"] = user.get("account_accounttypes") + if user.get("account_oemaccounttypes"): + payload["OEMAccountTypes"] = user.get("account_oemaccounttypes") + if user.get("account_id"): + payload["Id"] = user.get("account_id") response = self.post_request(self.root_uri + self.accounts_uri, payload) - if not response['ret']: - if response.get('status') == 405: + if not response["ret"]: + if response.get("status") == 405: # if POST returned a 405, try to add via PATCH return self.add_user_via_patch(user) else: return response - return {'ret': True} + return {"ret": True} def enable_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] + uri = response["uri"] - payload = {'Enabled': True} + payload = {"Enabled": True} return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user_via_patch(self, user, uri=None, data=None): if not uri: - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] - data = response['data'] + uri = response["uri"] + data = response["data"] - payload = {'UserName': ''} - if data.get('Enabled', False): - payload['Enabled'] = False + payload = {"UserName": ""} + if data.get("Enabled", False): + payload["Enabled"] = False return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: - if response.get('no_match'): + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: + if response.get("no_match"): # account does not exist, nothing to do - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} else: # some error encountered return response - uri = response['uri'] - headers = response['headers'] - data = response['data'] + uri = response["uri"] + headers = response["headers"] + data = response["data"] - if 'allow' in headers: - methods = [m.strip() for m in headers.get('allow').split(',')] - if 'DELETE' not in methods: + if "allow" in headers: + methods = [m.strip() for m in headers.get("allow").split(",")] + if "DELETE" not in methods: # if Allow header present and DELETE not listed, del via PATCH return self.delete_user_via_patch(user, uri=uri, data=data) response = self.delete_request(self.root_uri + uri) - if not response['ret']: - if response.get('status') == 405: + if not response["ret"]: + if response.get("status") == 405: # if DELETE returned a 405, try to delete via PATCH return self.delete_user_via_patch(user, uri=uri, data=data) else: return response - return {'ret': True} + return {"ret": True} def disable_user(self, user): - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] - payload = {'Enabled': False} + uri = response["uri"] + payload = {"Enabled": False} return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_role(self, user): - if not user.get('account_roleid'): - return {'ret': False, 'msg': - 'Must provide account_roleid for UpdateUserRole command'} + if not user.get("account_roleid"): + return {"ret": False, "msg": "Must provide account_roleid for UpdateUserRole command"} - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] - payload = {'RoleId': user['account_roleid']} + uri = response["uri"] + payload = {"RoleId": user["account_roleid"]} return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_password(self, user): - if not user.get('account_password'): - return {'ret': False, 'msg': - 'Must provide account_password for UpdateUserPassword command'} - - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id'), - password_change_uri=user.get('account_passwordchangerequired')) - if not response['ret']: + if not user.get("account_password"): + return {"ret": False, "msg": "Must provide account_password for UpdateUserPassword command"} + + response = self._find_account_uri( + username=user.get("account_username"), + acct_id=user.get("account_id"), + password_change_uri=user.get("account_passwordchangerequired"), + ) + if not response["ret"]: return response - uri = response['uri'] - payload = {'Password': user['account_password']} + uri = response["uri"] + payload = {"Password": user["account_password"]} return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_name(self, user): - if not user.get('account_updatename'): - return {'ret': False, 'msg': - 'Must provide account_updatename for UpdateUserName command'} + if not user.get("account_updatename"): + return {"ret": False, "msg": "Must provide account_updatename for UpdateUserName command"} - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] - payload = {'UserName': user['account_updatename']} + uri = response["uri"] + payload = {"UserName": user["account_updatename"]} return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_accountservice_properties(self, user): - account_properties = user.get('account_properties') + account_properties = user.get("account_properties") if account_properties is None: - return {'ret': False, 'msg': - 'Must provide account_properties for UpdateAccountServiceProperties command'} + return {"ret": False, "msg": "Must provide account_properties for UpdateAccountServiceProperties command"} # Find the AccountService resource response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] accountservice_uri = data.get("AccountService", {}).get("@odata.id") if accountservice_uri is None: - return {'ret': False, 'msg': "AccountService resource not found"} + return {"ret": False, "msg": "AccountService resource not found"} # Perform a PATCH on the AccountService resource with the requested properties resp = self.patch_request(self.root_uri + accountservice_uri, account_properties, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified account service' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified account service" return resp def update_user_accounttypes(self, user): - account_types = user.get('account_accounttypes') - oemaccount_types = user.get('account_oemaccounttypes') + account_types = user.get("account_accounttypes") + oemaccount_types = user.get("account_oemaccounttypes") if account_types is None and oemaccount_types is None: - return {'ret': False, 'msg': - 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'} + return { + "ret": False, + "msg": "Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command", + } - response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) - if not response['ret']: + response = self._find_account_uri(username=user.get("account_username"), acct_id=user.get("account_id")) + if not response["ret"]: return response - uri = response['uri'] + uri = response["uri"] payload = {} - if user.get('account_accounttypes'): - payload['AccountTypes'] = user.get('account_accounttypes') - if user.get('account_oemaccounttypes'): - payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + if user.get("account_accounttypes"): + payload["AccountTypes"] = user.get("account_accounttypes") + if user.get("account_oemaccounttypes"): + payload["OEMAccountTypes"] = user.get("account_oemaccounttypes") return self.patch_request(self.root_uri + uri, payload, check_pyld=True) @@ -1602,20 +1629,20 @@ def check_password_change_required(self, return_data): :return: None or the URI of the account to update """ uri = None - if 'data' in return_data: + if "data" in return_data: # Find the extended messages in the response payload - extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + extended_messages = return_data["data"].get("error", {}).get("@Message.ExtendedInfo", []) if len(extended_messages) == 0: - extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + extended_messages = return_data["data"].get("@Message.ExtendedInfo", []) # Go through each message and look for Base.1.X.PasswordChangeRequired for message in extended_messages: - message_id = message.get('MessageId') + message_id = message.get("MessageId") if message_id is None: # While this is invalid, treat the lack of a MessageId as "no message" continue - if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + if message_id.startswith("Base.1.") and message_id.endswith(".PasswordChangeRequired"): # Password change required; get the URI of the user account - uri = message['MessageArgs'][0] + uri = message["MessageArgs"][0] break return uri @@ -1625,24 +1652,24 @@ def get_sessions(self): session_list = [] sessions_results = [] # Get these entries, but does not fail if not found - properties = ['Description', 'Id', 'Name', 'UserName'] + properties = ["Description", "Id", "Name", "UserName"] response = self.get_request(self.root_uri + self.sessions_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for sessions in data['Members']: - session_list.append(sessions['@odata.id']) # session_list[] are URIs + for sessions in data["Members"]: + session_list.append(sessions["@odata.id"]) # session_list[] are URIs # for each session, get details for uri in session_list: session = {} response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] for property in properties: if property in data: @@ -1654,145 +1681,152 @@ def get_sessions(self): def clear_sessions(self): response = self.get_request(self.root_uri + self.sessions_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # if no active sessions, return as success - if data['Members@odata.count'] == 0: - return {'ret': True, 'changed': False, 'msg': "There are no active sessions"} + if data["Members@odata.count"] == 0: + return {"ret": True, "changed": False, "msg": "There are no active sessions"} # loop to delete every active session - for session in data['Members']: - response = self.delete_request(self.root_uri + session['@odata.id']) - if response['ret'] is False: + for session in data["Members"]: + response = self.delete_request(self.root_uri + session["@odata.id"]) + if response["ret"] is False: return response - return {'ret': True, 'changed': True, 'msg': "Cleared all sessions successfully"} + return {"ret": True, "changed": True, "msg": "Cleared all sessions successfully"} def create_session(self): - if not self.creds.get('user') or not self.creds.get('pswd'): - return {'ret': False, 'msg': - 'Must provide the username and password parameters for ' - 'the CreateSession command'} - - payload = { - 'UserName': self.creds['user'], - 'Password': self.creds['pswd'] - } + if not self.creds.get("user") or not self.creds.get("pswd"): + return { + "ret": False, + "msg": "Must provide the username and password parameters for the CreateSession command", + } + + payload = {"UserName": self.creds["user"], "Password": self.creds["pswd"]} response = self.post_request(self.root_uri + self.sessions_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - headers = response['headers'] - if 'x-auth-token' not in headers: - return {'ret': False, 'msg': - 'The service did not return the X-Auth-Token header in ' - 'the response from the Sessions collection POST'} + headers = response["headers"] + if "x-auth-token" not in headers: + return { + "ret": False, + "msg": "The service did not return the X-Auth-Token header in " + "the response from the Sessions collection POST", + } - if 'location' not in headers: + if "location" not in headers: self.module.warn( - 'The service did not return the Location header for the ' - 'session URL in the response from the Sessions collection ' - 'POST') + "The service did not return the Location header for the " + "session URL in the response from the Sessions collection " + "POST" + ) session_uri = None else: - session_uri = urlparse(headers.get('location')).path + session_uri = urlparse(headers.get("location")).path session = dict() - session['token'] = headers.get('x-auth-token') - session['uri'] = session_uri - return {'ret': True, 'changed': True, 'session': session, - 'msg': 'Session created successfully'} + session["token"] = headers.get("x-auth-token") + session["uri"] = session_uri + return {"ret": True, "changed": True, "session": session, "msg": "Session created successfully"} def delete_session(self, session_uri): if not session_uri: - return {'ret': False, 'msg': - 'Must provide the session_uri parameter for the ' - 'DeleteSession command'} + return {"ret": False, "msg": "Must provide the session_uri parameter for the DeleteSession command"} response = self.delete_request(self.root_uri + session_uri) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': 'Session deleted successfully'} + return {"ret": True, "changed": True, "msg": "Session deleted successfully"} def get_firmware_update_capabilities(self): result = {} response = self.get_request(self.root_uri + self.update_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True + result["ret"] = True - result['entries'] = {} + result["entries"] = {} - data = response['data'] + data = response["data"] - result['multipart_supported'] = 'MultipartHttpPushUri' in data + result["multipart_supported"] = "MultipartHttpPushUri" in data if "Actions" in data: - actions = data['Actions'] + actions = data["Actions"] if len(actions) > 0: for key in actions.keys(): action = actions.get(key) - if 'title' in action: - title = action['title'] + if "title" in action: + title = action["title"] else: title = key - result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues', - ["Key TransferProtocol@Redfish.AllowableValues not found"]) + result["entries"][title] = action.get( + "TransferProtocol@Redfish.AllowableValues", + ["Key TransferProtocol@Redfish.AllowableValues not found"], + ) else: - return {'ret': "False", 'msg': "Actions list is empty."} + return {"ret": "False", "msg": "Actions list is empty."} else: - return {'ret': "False", 'msg': "Key Actions not found."} + return {"ret": "False", "msg": "Key Actions not found."} return result def _software_inventory(self, uri): result = {} - result['entries'] = [] + result["entries"] = [] while uri: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True + result["ret"] = True - data = response['data'] - if data.get('Members@odata.nextLink'): - uri = data.get('Members@odata.nextLink') + data = response["data"] + if data.get("Members@odata.nextLink"): + uri = data.get("Members@odata.nextLink") else: uri = None - for member in data['Members']: - fw_uri = self.root_uri + member['@odata.id'] + for member in data["Members"]: + fw_uri = self.root_uri + member["@odata.id"] # Get details for each software or firmware member response = self.get_request(fw_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] software = {} # Get these standard properties if present - for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', - 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', - 'ReleaseDate']: + for key in [ + "Name", + "Id", + "Status", + "Version", + "Updateable", + "SoftwareId", + "LowestSupportedVersion", + "Manufacturer", + "ReleaseDate", + ]: if key in data: software[key] = data.get(key) - result['entries'].append(software) + result["entries"].append(software) return result def get_firmware_inventory(self): if self.firmware_uri is None: - return {'ret': False, 'msg': 'No FirmwareInventory resource found'} + return {"ret": False, "msg": "No FirmwareInventory resource found"} else: return self._software_inventory(self.firmware_uri) def get_software_inventory(self): if self.software_uri is None: - return {'ret': False, 'msg': 'No SoftwareInventory resource found'} + return {"ret": False, "msg": "No SoftwareInventory resource found"} else: return self._software_inventory(self.software_uri) @@ -1806,129 +1840,129 @@ def _operation_results(self, response, data, handle=None): :return: dict containing operation results """ - operation_results = {'status': None, 'messages': [], 'handle': None, 'ret': True, - 'resets_requested': []} + operation_results = {"status": None, "messages": [], "handle": None, "ret": True, "resets_requested": []} if response.status == 204: # No content; successful, but nothing to return # Use the Redfish "Completed" enum from TaskState for the operation status - operation_results['status'] = 'Completed' + operation_results["status"] = "Completed" else: # Parse the response body for details # Determine the next handle, if any - operation_results['handle'] = handle + operation_results["handle"] = handle if response.status == 202: # Task generated; get the task monitor URI - operation_results['handle'] = response.getheader('Location', handle) + operation_results["handle"] = response.getheader("Location", handle) # Pull out the status and messages based on the body format if data is not None: - response_type = data.get('@odata.type', '') - if response_type.startswith('#Task.') or response_type.startswith('#Job.'): + response_type = data.get("@odata.type", "") + if response_type.startswith("#Task.") or response_type.startswith("#Job."): # Task and Job have similar enough structures to treat the same - operation_results['status'] = data.get('TaskState', data.get('JobState')) - operation_results['messages'] = data.get('Messages', []) + operation_results["status"] = data.get("TaskState", data.get("JobState")) + operation_results["messages"] = data.get("Messages", []) else: # Error response body, which is a bit of a misnomer since it is used in successful action responses - operation_results['status'] = 'Completed' + operation_results["status"] = "Completed" if response.status >= 400: - operation_results['status'] = 'Exception' - operation_results['messages'] = data.get('error', {}).get('@Message.ExtendedInfo', []) + operation_results["status"] = "Exception" + operation_results["messages"] = data.get("error", {}).get("@Message.ExtendedInfo", []) else: # No response body (or malformed); build based on status code - operation_results['status'] = 'Completed' + operation_results["status"] = "Completed" if response.status == 202: - operation_results['status'] = 'New' + operation_results["status"] = "New" elif response.status >= 400: - operation_results['status'] = 'Exception' + operation_results["status"] = "Exception" # Clear out the handle if the operation is complete - if operation_results['status'] in ['Completed', 'Cancelled', 'Exception', 'Killed']: - operation_results['handle'] = None + if operation_results["status"] in ["Completed", "Cancelled", "Exception", "Killed"]: + operation_results["handle"] = None # Scan the messages to see if next steps are needed - for message in operation_results['messages']: - message_id = message.get('MessageId') + for message in operation_results["messages"]: + message_id = message.get("MessageId") if message_id is None: # While this is invalid, treat the lack of a MessageId as "no message" continue - if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'): + if message_id.startswith("Update.1.") and message_id.endswith(".OperationTransitionedToJob"): # Operation rerouted to a job; update the status and handle - operation_results['status'] = 'New' - operation_results['handle'] = message['MessageArgs'][0] - operation_results['resets_requested'] = [] + operation_results["status"] = "New" + operation_results["handle"] = message["MessageArgs"][0] + operation_results["resets_requested"] = [] # No need to process other messages in this case break - if message_id.startswith('Base.1.') and message_id.endswith('.ResetRequired'): + if message_id.startswith("Base.1.") and message_id.endswith(".ResetRequired"): # A reset to some device is needed to continue the update - reset = {'uri': message['MessageArgs'][0], 'type': message['MessageArgs'][1]} - operation_results['resets_requested'].append(reset) + reset = {"uri": message["MessageArgs"][0], "type": message["MessageArgs"][1]} + operation_results["resets_requested"].append(reset) return operation_results def simple_update(self, update_opts): - image_uri = update_opts.get('update_image_uri') - protocol = update_opts.get('update_protocol') - targets = update_opts.get('update_targets') - creds = update_opts.get('update_creds') - apply_time = update_opts.get('update_apply_time') + image_uri = update_opts.get("update_image_uri") + protocol = update_opts.get("update_protocol") + targets = update_opts.get("update_targets") + creds = update_opts.get("update_creds") + apply_time = update_opts.get("update_apply_time") if not image_uri: - return {'ret': False, 'msg': - 'Must specify update_image_uri for the SimpleUpdate command'} + return {"ret": False, "msg": "Must specify update_image_uri for the SimpleUpdate command"} response = self.get_request(self.root_uri + self.update_uri) - if response['ret'] is False: - return response - data = response['data'] - if 'Actions' not in data: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - if '#UpdateService.SimpleUpdate' not in data['Actions']: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - action = data['Actions']['#UpdateService.SimpleUpdate'] - if 'target' not in action: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - update_uri = action['target'] + if response["ret"] is False: + return response + data = response["data"] + if "Actions" not in data: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + if "#UpdateService.SimpleUpdate" not in data["Actions"]: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + action = data["Actions"]["#UpdateService.SimpleUpdate"] + if "target" not in action: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + update_uri = action["target"] if protocol: - default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF', - 'SCP', 'TFTP', 'OEM', 'NFS'] - allowable_values = self._get_allowable_values(action, - 'TransferProtocol', - default_values) + default_values = ["CIFS", "FTP", "SFTP", "HTTP", "HTTPS", "NSF", "SCP", "TFTP", "OEM", "NFS"] + allowable_values = self._get_allowable_values(action, "TransferProtocol", default_values) if protocol not in allowable_values: - return {'ret': False, - 'msg': f'Specified update_protocol ({protocol}) not supported by service. Supported protocols: {allowable_values}'} + return { + "ret": False, + "msg": f"Specified update_protocol ({protocol}) not supported by service. Supported protocols: {allowable_values}", + } if targets: - allowable_values = self._get_allowable_values(action, 'Targets') + allowable_values = self._get_allowable_values(action, "Targets") if allowable_values: for target in targets: if target not in allowable_values: - return {'ret': False, - 'msg': f'Specified target ({target}) not supported by service. Supported targets: {allowable_values}'} + return { + "ret": False, + "msg": f"Specified target ({target}) not supported by service. Supported targets: {allowable_values}", + } - payload = { - 'ImageURI': image_uri - } + payload = {"ImageURI": image_uri} if protocol: payload["TransferProtocol"] = protocol if targets: payload["Targets"] = targets if creds: - if creds.get('username'): - payload["Username"] = creds.get('username') - if creds.get('password'): - payload["Password"] = creds.get('password') + if creds.get("username"): + payload["Username"] = creds.get("username") + if creds.get("password"): + payload["Password"] = creds.get("password") if apply_time: payload["@Redfish.OperationApplyTime"] = apply_time response = self.post_request(self.root_uri + update_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "SimpleUpdate requested", - 'update_status': self._operation_results(response['resp'], response['data'])} + return { + "ret": True, + "changed": True, + "msg": "SimpleUpdate requested", + "update_status": self._operation_results(response["resp"], response["data"]), + } def multipath_http_push_update(self, update_opts): """ @@ -1941,35 +1975,33 @@ def multipath_http_push_update(self, update_opts): :param update_opts: The parameters for the update operation :return: dict containing the response of the update request """ - image_file = update_opts.get('update_image_file') - targets = update_opts.get('update_targets') - apply_time = update_opts.get('update_apply_time') - oem_params = update_opts.get('update_oem_params') - custom_oem_header = update_opts.get('update_custom_oem_header') - custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type') - custom_oem_params = update_opts.get('update_custom_oem_params') + image_file = update_opts.get("update_image_file") + targets = update_opts.get("update_targets") + apply_time = update_opts.get("update_apply_time") + oem_params = update_opts.get("update_oem_params") + custom_oem_header = update_opts.get("update_custom_oem_header") + custom_oem_mime_type = update_opts.get("update_custom_oem_mime_type") + custom_oem_params = update_opts.get("update_custom_oem_params") # Ensure the image file is provided if not image_file: - return {'ret': False, 'msg': - 'Must specify update_image_file for the MultipartHTTPPushUpdate command'} + return {"ret": False, "msg": "Must specify update_image_file for the MultipartHTTPPushUpdate command"} if not os.path.isfile(image_file): - return {'ret': False, 'msg': - 'Must specify a valid file for the MultipartHTTPPushUpdate command'} + return {"ret": False, "msg": "Must specify a valid file for the MultipartHTTPPushUpdate command"} try: - with open(image_file, 'rb') as f: + with open(image_file, "rb") as f: image_payload = f.read() except Exception as e: - return {'ret': False, 'msg': f'Could not read file {image_file}'} + return {"ret": False, "msg": f"Could not read file {image_file}"} # Check that multipart HTTP push updates are supported response = self.get_request(self.root_uri + self.update_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'MultipartHttpPushUri' not in data: - return {'ret': False, 'msg': 'Service does not support MultipartHttpPushUri'} - update_uri = data['MultipartHttpPushUri'] + data = response["data"] + if "MultipartHttpPushUri" not in data: + return {"ret": False, "msg": "Service does not support MultipartHttpPushUri"} + update_uri = data["MultipartHttpPushUri"] # Assemble the JSON payload portion of the request payload = {} @@ -1980,20 +2012,23 @@ def multipath_http_push_update(self, update_opts): if oem_params: payload["Oem"] = oem_params multipart_payload = { - 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'}, - 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'} + "UpdateParameters": {"content": json.dumps(payload), "mime_type": "application/json"}, + "UpdateFile": {"filename": image_file, "content": image_payload, "mime_type": "application/octet-stream"}, } if custom_oem_params: - multipart_payload[custom_oem_header] = {'content': custom_oem_params} + multipart_payload[custom_oem_header] = {"content": custom_oem_params} if custom_oem_mime_type: - multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type + multipart_payload[custom_oem_header]["mime_type"] = custom_oem_mime_type response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "MultipartHTTPPushUpdate requested", - 'update_status': self._operation_results(response['resp'], response['data'])} + return { + "ret": True, + "changed": True, + "msg": "MultipartHTTPPushUpdate requested", + "update_status": self._operation_results(response["resp"], response["data"]), + } def get_update_status(self, update_handle): """ @@ -2004,15 +2039,15 @@ def get_update_status(self, update_handle): """ if not update_handle: - return {'ret': False, 'msg': 'Must provide a handle tracking the update.'} + return {"ret": False, "msg": "Must provide a handle tracking the update."} # Get the task or job tracking the update response = self.get_request(self.root_uri + update_handle, allow_no_resp=True) - if response['ret'] is False: + if response["ret"] is False: return response # Inspect the response to build the update status - return self._operation_results(response['resp'], response['data'], update_handle) + return self._operation_results(response["resp"], response["data"], update_handle) def perform_requested_update_operations(self, update_handle): """ @@ -2024,27 +2059,27 @@ def perform_requested_update_operations(self, update_handle): # Get the current update status update_status = self.get_update_status(update_handle) - if update_status['ret'] is False: + if update_status["ret"] is False: return update_status changed = False # Perform any requested updates - for reset in update_status['resets_requested']: - resp = self.post_request(self.root_uri + reset['uri'], {'ResetType': reset['type']}) - if resp['ret'] is False: + for reset in update_status["resets_requested"]: + resp = self.post_request(self.root_uri + reset["uri"], {"ResetType": reset["type"]}) + if resp["ret"] is False: # Override the 'changed' indicator since other resets may have # been successful - resp['changed'] = changed + resp["changed"] = changed return resp changed = True - msg = 'No operations required for the update' + msg = "No operations required for the update" if changed: # Will need to consider finetuning this message if the scope of the # requested operations grow over time - msg = 'One or more components reset to continue the update' - return {'ret': True, 'changed': changed, 'msg': msg} + msg = "One or more components reset to continue the update" + return {"ret": True, "changed": changed, "msg": msg} def get_bios_attributes(self, systems_uri): result = {} @@ -2053,22 +2088,22 @@ def get_bios_attributes(self, systems_uri): # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} bios_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] - for attribute in data['Attributes'].items(): + result["ret"] = True + data = response["data"] + for attribute in data["Attributes"].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result @@ -2078,37 +2113,37 @@ def get_multi_bios_attributes(self): def _get_boot_options_dict(self, boot): # Get these entries from BootOption, if present - properties = ['DisplayName', 'BootOptionReference'] + properties = ["DisplayName", "BootOptionReference"] # Retrieve BootOptions if present - if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']: - boot_options_uri = boot['BootOptions']["@odata.id"] + if "BootOptions" in boot and "@odata.id" in boot["BootOptions"]: + boot_options_uri = boot["BootOptions"]["@odata.id"] # Get BootOptions resource response = self.get_request(self.root_uri + boot_options_uri) - if response['ret'] is False: + if response["ret"] is False: return {} - data = response['data'] + data = response["data"] # Retrieve Members array - if 'Members' not in data: + if "Members" not in data: return {} - members = data['Members'] + members = data["Members"] else: members = [] # Build dict of BootOptions keyed by BootOptionReference boot_options_dict = {} for member in members: - if '@odata.id' not in member: + if "@odata.id" not in member: return {} - boot_option_uri = member['@odata.id'] + boot_option_uri = member["@odata.id"] response = self.get_request(self.root_uri + boot_option_uri) - if response['ret'] is False: + if response["ret"] is False: return {} - data = response['data'] - if 'BootOptionReference' not in data: + data = response["data"] + if "BootOptionReference" not in data: return {} - boot_option_ref = data['BootOptionReference'] + boot_option_ref = data["BootOptionReference"] # fetch the props to display for this boot device boot_props = {} @@ -2125,24 +2160,23 @@ def get_boot_order(self, systems_uri): # Retrieve System resource response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] # Confirm needed Boot properties are present - if 'Boot' not in data or 'BootOrder' not in data['Boot']: - return {'ret': False, 'msg': "Key BootOrder not found"} + if "Boot" not in data or "BootOrder" not in data["Boot"]: + return {"ret": False, "msg": "Key BootOrder not found"} - boot = data['Boot'] - boot_order = boot['BootOrder'] + boot = data["Boot"] + boot_order = boot["BootOrder"] boot_options_dict = self._get_boot_options_dict(boot) # Build boot device list boot_device_list = [] for ref in boot_order: - boot_device_list.append( - boot_options_dict.get(ref, {'BootOptionReference': ref})) + boot_device_list.append(boot_options_dict.get(ref, {"BootOptionReference": ref})) result["entries"] = boot_device_list return result @@ -2153,19 +2187,24 @@ def get_multi_boot_order(self): def get_boot_override(self, systems_uri): result = {} - properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget", - "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"] + properties = [ + "BootSourceOverrideEnabled", + "BootSourceOverrideTarget", + "BootSourceOverrideMode", + "UefiTargetBootSourceOverride", + "BootSourceOverrideTarget@Redfish.AllowableValues", + ] response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - if 'Boot' not in data: - return {'ret': False, 'msg': "Key Boot not found"} + if "Boot" not in data: + return {"ret": False, "msg": "Key Boot not found"} - boot = data['Boot'] + boot = data["Boot"] boot_overrides = {} if "BootSourceOverrideEnabled" in boot: @@ -2175,9 +2214,9 @@ def get_boot_override(self, systems_uri): if boot[property] is not None: boot_overrides[property] = boot[property] else: - return {'ret': False, 'msg': "No boot override is enabled."} + return {"ret": False, "msg": "No boot override is enabled."} - result['entries'] = boot_overrides + result["entries"] = boot_overrides return result def get_multi_boot_override(self): @@ -2186,134 +2225,123 @@ def get_multi_boot_override(self): def set_bios_default_settings(self): # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - bios_uri = data.get('Bios', {}).get('@odata.id') + data = response["data"] + bios_uri = data.get("Bios", {}).get("@odata.id") if bios_uri is None: - return {'ret': False, 'msg': 'Bios resource not found'} + return {"ret": False, "msg": "Bios resource not found"} # Find the URI of the ResetBios action response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - reset_bios_uri = data.get('Actions', {}).get('#Bios.ResetBios', {}).get('target') + data = response["data"] + reset_bios_uri = data.get("Actions", {}).get("#Bios.ResetBios", {}).get("target") if reset_bios_uri is None: - return {'ret': False, 'msg': 'ResetBios action not found'} + return {"ret": False, "msg": "ResetBios action not found"} # Perform the ResetBios action response = self.post_request(self.root_uri + reset_bios_uri, {}) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, 'msg': "BIOS set to default settings"} + return {"ret": True, "changed": True, "msg": "BIOS set to default settings"} def set_boot_override(self, boot_opts): # Extract the requested boot override options - bootdevice = boot_opts.get('bootdevice') - uefi_target = boot_opts.get('uefi_target') - boot_next = boot_opts.get('boot_next') - override_enabled = boot_opts.get('override_enabled') - boot_override_mode = boot_opts.get('boot_override_mode') - if not bootdevice and override_enabled != 'Disabled': - return {'ret': False, - 'msg': "bootdevice option required for temporary boot override"} + bootdevice = boot_opts.get("bootdevice") + uefi_target = boot_opts.get("uefi_target") + boot_next = boot_opts.get("boot_next") + override_enabled = boot_opts.get("override_enabled") + boot_override_mode = boot_opts.get("boot_override_mode") + if not bootdevice and override_enabled != "Disabled": + return {"ret": False, "msg": "bootdevice option required for temporary boot override"} # Get the current boot override options from the Boot property response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - boot = data.get('Boot') + data = response["data"] + boot = data.get("Boot") if boot is None: - return {'ret': False, 'msg': "Boot property not found"} - cur_override_mode = boot.get('BootSourceOverrideMode') + return {"ret": False, "msg": "Boot property not found"} + cur_override_mode = boot.get("BootSourceOverrideMode") # Check if the requested target is supported by the system - if override_enabled != 'Disabled': - annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' + if override_enabled != "Disabled": + annotation = "BootSourceOverrideTarget@Redfish.AllowableValues" if annotation in boot: allowable_values = boot[annotation] if isinstance(allowable_values, list) and bootdevice not in allowable_values: - return {'ret': False, - 'msg': f"Boot device {bootdevice} not in list of allowable values ({allowable_values})"} + return { + "ret": False, + "msg": f"Boot device {bootdevice} not in list of allowable values ({allowable_values})", + } # Build the request payload based on the desired boot override options - if override_enabled == 'Disabled': - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': 'None' - } - } - elif bootdevice == 'UefiTarget': + if override_enabled == "Disabled": + payload = {"Boot": {"BootSourceOverrideEnabled": override_enabled, "BootSourceOverrideTarget": "None"}} + elif bootdevice == "UefiTarget": if not uefi_target: - return {'ret': False, - 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} + return {"ret": False, "msg": "uefi_target option required to SetOneTimeBoot for UefiTarget"} payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice, - 'UefiTargetBootSourceOverride': uefi_target + "Boot": { + "BootSourceOverrideEnabled": override_enabled, + "BootSourceOverrideTarget": bootdevice, + "UefiTargetBootSourceOverride": uefi_target, } } # If needed, also specify UEFI mode - if cur_override_mode == 'Legacy': - payload['Boot']['BootSourceOverrideMode'] = 'UEFI' - elif bootdevice == 'UefiBootNext': + if cur_override_mode == "Legacy": + payload["Boot"]["BootSourceOverrideMode"] = "UEFI" + elif bootdevice == "UefiBootNext": if not boot_next: - return {'ret': False, - 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} + return {"ret": False, "msg": "boot_next option required to SetOneTimeBoot for UefiBootNext"} payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice, - 'BootNext': boot_next + "Boot": { + "BootSourceOverrideEnabled": override_enabled, + "BootSourceOverrideTarget": bootdevice, + "BootNext": boot_next, } } # If needed, also specify UEFI mode - if cur_override_mode == 'Legacy': - payload['Boot']['BootSourceOverrideMode'] = 'UEFI' + if cur_override_mode == "Legacy": + payload["Boot"]["BootSourceOverrideMode"] = "UEFI" else: - payload = { - 'Boot': { - 'BootSourceOverrideEnabled': override_enabled, - 'BootSourceOverrideTarget': bootdevice - } - } + payload = {"Boot": {"BootSourceOverrideEnabled": override_enabled, "BootSourceOverrideTarget": bootdevice}} if boot_override_mode: - payload['Boot']['BootSourceOverrideMode'] = boot_override_mode + payload["Boot"]["BootSourceOverrideMode"] = boot_override_mode # Apply the requested boot override request resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) - if resp['ret'] is False: + if resp["ret"] is False: # WORKAROUND # Older Dell systems do not allow BootSourceOverrideEnabled to be # specified with UefiTarget as the target device - vendor = self._get_vendor()['Vendor'] - if vendor == 'Dell': - if bootdevice == 'UefiTarget' and override_enabled != 'Disabled': - payload['Boot'].pop('BootSourceOverrideEnabled', None) + vendor = self._get_vendor()["Vendor"] + if vendor == "Dell": + if bootdevice == "UefiTarget" and override_enabled != "Disabled": + payload["Boot"].pop("BootSourceOverrideEnabled", None) resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Updated the boot override settings' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Updated the boot override settings" return resp def set_bios_attributes(self, attributes): # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - bios_uri = data.get('Bios', {}).get('@odata.id') + data = response["data"] + bios_uri = data.get("Bios", {}).get("@odata.id") if bios_uri is None: - return {'ret': False, 'msg': 'Bios resource not found'} + return {"ret": False, "msg": "Bios resource not found"} # Get the current BIOS settings response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Make a copy of the attributes dict attrs_to_patch = dict(attributes) @@ -2323,14 +2351,14 @@ def set_bios_attributes(self, attributes): # Check the attributes for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data['Attributes']: + if attr_name not in data["Attributes"]: # Remove and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) del attrs_to_patch[attr_name] continue # If already set to requested value, remove it from PATCH payload - if data['Attributes'][attr_name] == attr_value: + if data["Attributes"][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" @@ -2339,14 +2367,12 @@ def set_bios_attributes(self, attributes): # Return success w/ changed=False if no attrs need to be changed if not attrs_to_patch: - return {'ret': True, 'changed': False, - 'msg': "BIOS attributes already set", - 'warning': warning} + return {"ret": True, "changed": False, "msg": "BIOS attributes already set", "warning": warning} # Get the SettingsObject URI to apply the attributes set_bios_attr_uri = data.get("@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id") if set_bios_attr_uri is None: - return {'ret': False, 'msg': "Settings resource for BIOS attributes not found."} + return {"ret": False, "msg": "Settings resource for BIOS attributes not found."} # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} @@ -2354,34 +2380,36 @@ def set_bios_attributes(self, attributes): # WORKAROUND # Dell systems require manually setting the apply time to "OnReset" # to spawn a proprietary job to apply the BIOS settings - vendor = self._get_vendor()['Vendor'] - if vendor == 'Dell': + vendor = self._get_vendor()["Vendor"] + if vendor == "Dell": payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': f"Modified BIOS attributes {attrs_to_patch}. A reboot is required", - 'warning': warning} + return { + "ret": True, + "changed": True, + "msg": f"Modified BIOS attributes {attrs_to_patch}. A reboot is required", + "warning": warning, + } def set_boot_order(self, boot_list): if not boot_list: - return {'ret': False, - 'msg': "boot_order list required for SetBootOrder command"} + return {"ret": False, "msg": "boot_order list required for SetBootOrder command"} systems_uri = self.systems_uri response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Confirm needed Boot properties are present - if 'Boot' not in data or 'BootOrder' not in data['Boot']: - return {'ret': False, 'msg': "Key BootOrder not found"} + if "Boot" not in data or "BootOrder" not in data["Boot"]: + return {"ret": False, "msg": "Key BootOrder not found"} - boot = data['Boot'] - boot_order = boot['BootOrder'] + boot = data["Boot"] + boot_order = boot["BootOrder"] boot_options_dict = self._get_boot_options_dict(boot) # Verify the requested boot options are valid @@ -2389,58 +2417,61 @@ def set_boot_order(self, boot_list): boot_option_references = boot_options_dict.keys() for ref in boot_list: if ref not in boot_option_references: - return {'ret': False, - 'msg': f"BootOptionReference {ref} not found in BootOptions"} + return {"ret": False, "msg": f"BootOptionReference {ref} not found in BootOptions"} # Apply the boot order - payload = { - 'Boot': { - 'BootOrder': boot_list - } - } + payload = {"Boot": {"BootOrder": boot_list}} resp = self.patch_request(self.root_uri + systems_uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified the boot order' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified the boot order" return resp def set_default_boot_order(self): systems_uri = self.systems_uri response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # get the #ComputerSystem.SetDefaultBootOrder Action and target URI - action = '#ComputerSystem.SetDefaultBootOrder' - if 'Actions' not in data or action not in data['Actions']: - return {'ret': False, 'msg': f'Action {action} not found'} - if 'target' not in data['Actions'][action]: - return {'ret': False, 'msg': f'target URI missing from Action {action}'} - action_uri = data['Actions'][action]['target'] + action = "#ComputerSystem.SetDefaultBootOrder" + if "Actions" not in data or action not in data["Actions"]: + return {"ret": False, "msg": f"Action {action} not found"} + if "target" not in data["Actions"][action]: + return {"ret": False, "msg": f"target URI missing from Action {action}"} + action_uri = data["Actions"][action]["target"] # POST to Action URI payload = {} response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "BootOrder set to default"} + return {"ret": True, "changed": True, "msg": "BootOrder set to default"} def get_chassis_inventory(self): result = {} chassis_results = [] # Get these entries, but does not fail if not found - properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag', - 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] + properties = [ + "Name", + "Id", + "ChassisType", + "PartNumber", + "AssetTag", + "Manufacturer", + "IndicatorLED", + "SerialNumber", + "Model", + ] # Go through list for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] chassis_result = {} for property in properties: if property in data: @@ -2455,34 +2486,34 @@ def get_fan_inventory(self): fan_results = [] key = "Thermal" # Get these entries, but does not fail if not found - properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status'] + properties = ["Name", "FanName", "Reading", "ReadingUnits", "Status"] # Go through list for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key in data: # match: found an entry for "Thermal" information = fans thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] # Checking if fans are present - if 'Fans' in data: - for device in data['Fans']: + if "Fans" in data: + for device in data["Fans"]: fan = {} for property in properties: if property in device: fan[property] = device[property] fan_results.append(fan) else: - return {'ret': False, 'msg': "No Fans present"} + return {"ret": False, "msg": "No Fans present"} result["entries"] = fan_results return result @@ -2491,36 +2522,43 @@ def get_chassis_power(self): key = "Power" # Get these entries, but does not fail if not found - properties = ['Name', 'PowerAllocatedWatts', - 'PowerAvailableWatts', 'PowerCapacityWatts', - 'PowerConsumedWatts', 'PowerMetrics', - 'PowerRequestedWatts', 'RelatedItem', 'Status'] + properties = [ + "Name", + "PowerAllocatedWatts", + "PowerAvailableWatts", + "PowerCapacityWatts", + "PowerConsumedWatts", + "PowerMetrics", + "PowerRequestedWatts", + "RelatedItem", + "Status", + ] chassis_power_results = [] # Go through list for chassis_uri in self.chassis_uris: chassis_power_result = {} response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key in data: - response = self.get_request(self.root_uri + data[key]['@odata.id']) - data = response['data'] - if 'PowerControl' in data: - if len(data['PowerControl']) > 0: - data = data['PowerControl'][0] + response = self.get_request(self.root_uri + data[key]["@odata.id"]) + data = response["data"] + if "PowerControl" in data: + if len(data["PowerControl"]) > 0: + data = data["PowerControl"][0] for property in properties: if property in data: chassis_power_result[property] = data[property] chassis_power_results.append(chassis_power_result) if len(chassis_power_results) > 0: - result['entries'] = chassis_power_results + result["entries"] = chassis_power_results return result else: - return {'ret': False, 'msg': 'Power information not found.'} + return {"ret": False, "msg": "Power information not found."} def get_chassis_thermals(self): result = {} @@ -2528,29 +2566,39 @@ def get_chassis_thermals(self): key = "Thermal" # Get these entries, but does not fail if not found - properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical', - 'UpperThresholdFatal', 'UpperThresholdNonCritical', - 'LowerThresholdCritical', 'LowerThresholdFatal', - 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', - 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', - 'SensorNumber', 'Status'] + properties = [ + "Name", + "PhysicalContext", + "UpperThresholdCritical", + "UpperThresholdFatal", + "UpperThresholdNonCritical", + "LowerThresholdCritical", + "LowerThresholdFatal", + "LowerThresholdNonCritical", + "MaxReadingRangeTemp", + "MinReadingRangeTemp", + "ReadingCelsius", + "RelatedItem", + "SensorNumber", + "Status", + ] # Go through list for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key in data: thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if "Temperatures" in data: - for sensor in data['Temperatures']: + for sensor in data["Temperatures"]: sensor_result = {} for property in properties: if property in sensor: @@ -2559,9 +2607,9 @@ def get_chassis_thermals(self): sensors.append(sensor_result) if sensors is None: - return {'ret': False, 'msg': 'Key Temperatures was not found.'} + return {"ret": False, "msg": "Key Temperatures was not found."} - result['entries'] = sensors + result["entries"] = sensors return result def get_cpu_inventory(self, systems_uri): @@ -2570,38 +2618,47 @@ def get_cpu_inventory(self, systems_uri): cpu_results = [] key = "Processors" # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', - 'ProcessorArchitecture', 'TotalCores', 'TotalThreads', 'Status'] + properties = [ + "Id", + "Name", + "Manufacturer", + "Model", + "MaxSpeedMHz", + "ProcessorArchitecture", + "TotalCores", + "TotalThreads", + "Status", + ] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} processors_uri = data[key]["@odata.id"] # Get a list of all CPUs and build respective URIs response = self.get_request(self.root_uri + processors_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for cpu in data['Members']: - cpu_list.append(cpu['@odata.id']) + for cpu in data["Members"]: + cpu_list.append(cpu["@odata.id"]) for c in cpu_list: cpu = {} uri = self.root_uri + c response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] for property in properties: if property in data: @@ -2620,38 +2677,49 @@ def get_memory_inventory(self, systems_uri): memory_results = [] key = "Memory" # Get these entries, but does not fail if not found - properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber', - 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] + properties = [ + "Id", + "SerialNumber", + "MemoryDeviceType", + "PartNumber", + "MemoryLocation", + "RankCount", + "CapacityMiB", + "OperatingMemoryModes", + "Status", + "Manufacturer", + "Name", + ] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} memory_uri = data[key]["@odata.id"] # Get a list of all DIMMs and build respective URIs response = self.get_request(self.root_uri + memory_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for dimm in data['Members']: - memory_list.append(dimm['@odata.id']) + for dimm in data["Members"]: + memory_list.append(dimm["@odata.id"]) for m in memory_list: dimm = {} uri = self.root_uri + m response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] if "Status" in data: if "State" in data["Status"]: @@ -2673,19 +2741,32 @@ def get_multi_memory_inventory(self): def get_nic(self, resource_uri): result = {} - properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', - 'NameServers', 'MACAddress', 'PermanentMACAddress', - 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus'] + properties = [ + "Name", + "Id", + "Description", + "FQDN", + "IPv4Addresses", + "IPv6Addresses", + "NameServers", + "MACAddress", + "PermanentMACAddress", + "SpeedMbps", + "MTUSize", + "AutoNeg", + "Status", + "LinkStatus", + ] response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] nic = {} for property in properties: if property in data: nic[property] = data[property] - result['entries'] = nic + result["entries"] = nic return result def get_nic_inventory(self, resource_uri): @@ -2695,30 +2776,30 @@ def get_nic_inventory(self, resource_uri): key = "EthernetInterfaces" response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} ethernetinterfaces_uri = data[key]["@odata.id"] # Get a list of all network controllers and build respective URIs response = self.get_request(self.root_uri + ethernetinterfaces_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for nic in data['Members']: - nic_list.append(nic['@odata.id']) + for nic in data["Members"]: + nic_list.append(nic["@odata.id"]) for n in nic_list: nic = self.get_nic(n) - if nic['ret']: - nic_results.append(nic['entries']) + if nic["ret"]: + nic_results.append(nic["entries"]) result["entries"] = nic_results return result @@ -2727,17 +2808,16 @@ def get_multi_nic_inventory(self, resource_type): entries = [] # Given resource_type, use the proper URI - if resource_type == 'Systems': + if resource_type == "Systems": resource_uris = self.systems_uris - elif resource_type == 'Manager': + elif resource_type == "Manager": resource_uris = self.manager_uris for resource_uri in resource_uris: inventory = self.get_nic_inventory(resource_uri) - ret = inventory.pop('ret') and ret - if 'entries' in inventory: - entries.append(({'resource_uri': resource_uri}, - inventory['entries'])) + ret = inventory.pop("ret") and ret + if "entries" in inventory: + entries.append(({"resource_uri": resource_uri}, inventory["entries"])) return dict(ret=ret, entries=entries) def get_virtualmedia(self, resource_uri): @@ -2746,38 +2826,47 @@ def get_virtualmedia(self, resource_uri): virtualmedia_results = [] key = "VirtualMedia" # Get these entries, but does not fail if not found - properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes', - 'Image', 'ImageName', 'Name', 'WriteProtected', - 'TransferMethod', 'TransferProtocolType'] + properties = [ + "Description", + "ConnectedVia", + "Id", + "MediaTypes", + "Image", + "ImageName", + "Name", + "WriteProtected", + "TransferMethod", + "TransferProtocolType", + ] response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} virtualmedia_uri = data[key]["@odata.id"] # Get a list of all virtual media and build respective URIs response = self.get_request(self.root_uri + virtualmedia_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - for virtualmedia in data['Members']: - virtualmedia_list.append(virtualmedia['@odata.id']) + for virtualmedia in data["Members"]: + virtualmedia_list.append(virtualmedia["@odata.id"]) for n in virtualmedia_list: virtualmedia = {} uri = self.root_uri + n response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] for property in properties: if property in data: @@ -2787,50 +2876,47 @@ def get_virtualmedia(self, resource_uri): result["entries"] = virtualmedia_results return result - def get_multi_virtualmedia(self, resource_type='Manager'): + def get_multi_virtualmedia(self, resource_type="Manager"): ret = True entries = [] # Given resource_type, use the proper URI - if resource_type == 'Systems': + if resource_type == "Systems": resource_uris = self.systems_uris - elif resource_type == 'Manager': + elif resource_type == "Manager": resource_uris = self.manager_uris for resource_uri in resource_uris: virtualmedia = self.get_virtualmedia(resource_uri) - ret = virtualmedia.pop('ret') and ret - if 'entries' in virtualmedia: - entries.append(({'resource_uri': resource_uri}, - virtualmedia['entries'])) + ret = virtualmedia.pop("ret") and ret + if "entries" in virtualmedia: + entries.append(({"resource_uri": resource_uri}, virtualmedia["entries"])) return dict(ret=ret, entries=entries) @staticmethod - def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True, vendor=''): + def _find_empty_virt_media_slot(resources, media_types, media_match_strict=True, vendor=""): for uri, data in resources.items(): # check MediaTypes - if 'MediaTypes' in data and media_types: - if not set(media_types).intersection(set(data['MediaTypes'])): + if "MediaTypes" in data and media_types: + if not set(media_types).intersection(set(data["MediaTypes"])): continue else: if media_match_strict: continue # Base on current Lenovo server capability, filter out slot RDOC1/2 and Remote1/2/3/4 which are not supported to Insert/Eject. - if vendor == 'Lenovo' and ('RDOC' in uri or 'Remote' in uri): + if vendor == "Lenovo" and ("RDOC" in uri or "Remote" in uri): continue # if ejected, 'Inserted' should be False and 'ImageName' cleared - if (not data.get('Inserted', False) and - not data.get('ImageName')): + if not data.get("Inserted", False) and not data.get("ImageName"): return uri, data return None, None @staticmethod def _virt_media_image_inserted(resources, image_url): for uri, data in resources.items(): - if data.get('Image'): - if urlparse(image_url) == urlparse(data.get('Image')): - if data.get('Inserted', False) and data.get('ImageName'): + if data.get("Image"): + if urlparse(image_url) == urlparse(data.get("Image")): + if data.get("Inserted", False) and data.get("ImageName"): return True return False @@ -2838,10 +2924,10 @@ def _virt_media_image_inserted(resources, image_url): def _find_virt_media_to_eject(resources, image_url): matched_uri, matched_data = None, None for uri, data in resources.items(): - if data.get('Image'): - if urlparse(image_url) == urlparse(data.get('Image')): + if data.get("Image"): + if urlparse(image_url) == urlparse(data.get("Image")): matched_uri, matched_data = uri, data - if data.get('Inserted', True) and data.get('ImageName', 'x'): + if data.get("Inserted", True) and data.get("ImageName", "x"): return uri, data, True return matched_uri, matched_data, False @@ -2850,243 +2936,226 @@ def _read_virt_media_resources(self, uri_list): headers = {} for uri in uri_list: response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: continue - resources[uri] = response['data'] - headers[uri] = response['headers'] + resources[uri] = response["data"] + headers[uri] = response["headers"] return resources, headers @staticmethod def _insert_virt_media_payload(options, param_map, data, ai): - payload = { - 'Image': options.get('image_url') - } + payload = {"Image": options.get("image_url")} for param, option in param_map.items(): if options.get(option) is not None and param in data: - allowable = ai.get(param, {}).get('AllowableValues', []) + allowable = ai.get(param, {}).get("AllowableValues", []) if allowable and options.get(option) not in allowable: - return {'ret': False, - 'msg': f"Value '{options.get(option)}' specified for option '{option}' not in list of AllowableValues {allowable}"} + return { + "ret": False, + "msg": f"Value '{options.get(option)}' specified for option '{option}' not in list of AllowableValues {allowable}", + } payload[param] = options.get(option) return payload def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = { - k[:-24]: {'AllowableValues': v} - for k, v in data.items() - if k.endswith('@Redfish.AllowableValues') - } + ai = {k[:-24]: {"AllowableValues": v} for k, v in data.items() if k.endswith("@Redfish.AllowableValues")} # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) - if 'Inserted' not in payload and not image_only: + if "Inserted" not in payload and not image_only: # Add Inserted to the payload if needed - payload['Inserted'] = True + payload["Inserted"] = True # PATCH the resource resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) - if resp['ret'] is False: + if resp["ret"] is False: # WORKAROUND # Older HPE systems with iLO 4 and Supermicro do not support # specifying Inserted or WriteProtected - vendor = self._get_vendor()['Vendor'] - if vendor == 'HPE' or vendor == 'Supermicro': - payload.pop('Inserted', None) - payload.pop('WriteProtected', None) + vendor = self._get_vendor()["Vendor"] + if vendor == "HPE" or vendor == "Supermicro": + payload.pop("Inserted", None) + payload.pop("WriteProtected", None) resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'VirtualMedia inserted' + if resp["ret"] and resp["changed"]: + resp["msg"] = "VirtualMedia inserted" return resp - def virtual_media_insert(self, options, resource_type='Manager'): + def virtual_media_insert(self, options, resource_type="Manager"): param_map = { - 'Inserted': 'inserted', - 'WriteProtected': 'write_protected', - 'UserName': 'username', - 'Password': 'password', - 'TransferProtocolType': 'transfer_protocol_type', - 'TransferMethod': 'transfer_method' + "Inserted": "inserted", + "WriteProtected": "write_protected", + "UserName": "username", + "Password": "password", + "TransferProtocolType": "transfer_protocol_type", + "TransferMethod": "transfer_method", } - image_url = options.get('image_url') + image_url = options.get("image_url") if not image_url: - return {'ret': False, - 'msg': "image_url option required for VirtualMediaInsert"} - media_types = options.get('media_types') + return {"ret": False, "msg": "image_url option required for VirtualMediaInsert"} + media_types = options.get("media_types") # locate and read the VirtualMedia resources # Given resource_type, use the proper URI - if resource_type == 'Systems': + if resource_type == "Systems": resource_uri = self.systems_uri - elif resource_type == 'Manager': + elif resource_type == "Manager": resource_uri = self.manager_uri response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} + data = response["data"] + if "VirtualMedia" not in data: + return {"ret": False, "msg": "VirtualMedia resource not found"} virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] virt_media_list = [] - for member in data['Members']: - virt_media_list.append(member['@odata.id']) + for member in data["Members"]: + virt_media_list.append(member["@odata.id"]) resources, headers = self._read_virt_media_resources(virt_media_list) # see if image already inserted; if so, nothing to do if self._virt_media_image_inserted(resources, image_url): - return {'ret': True, 'changed': False, - 'msg': f"VirtualMedia '{image_url}' already inserted"} + return {"ret": True, "changed": False, "msg": f"VirtualMedia '{image_url}' already inserted"} # find an empty slot to insert the media # try first with strict media_type matching - vendor = self._get_vendor()['Vendor'] - uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=True, vendor=vendor) + vendor = self._get_vendor()["Vendor"] + uri, data = self._find_empty_virt_media_slot(resources, media_types, media_match_strict=True, vendor=vendor) if not uri: # if not found, try without strict media_type matching uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=False, vendor=vendor) + resources, media_types, media_match_strict=False, vendor=vendor + ) if not uri: - return {'ret': False, - 'msg': f"Unable to find an available VirtualMedia resource {f'supporting {media_types}' if media_types else ''}"} + return { + "ret": False, + "msg": f"Unable to find an available VirtualMedia resource {f'supporting {media_types}' if media_types else ''}", + } # confirm InsertMedia action found - if ('Actions' not in data or - '#VirtualMedia.InsertMedia' not in data['Actions']): + if "Actions" not in data or "#VirtualMedia.InsertMedia" not in data["Actions"]: # try to insert via PATCH if no InsertMedia action found h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: + if "allow" in h: + methods = [m.strip() for m in h.get("allow").split(",")] + if "PATCH" not in methods: # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "#VirtualMedia.InsertMedia action not found and PATCH not allowed"} - return self.virtual_media_insert_via_patch(options, param_map, - uri, data) + return {"ret": False, "msg": "#VirtualMedia.InsertMedia action not found and PATCH not allowed"} + return self.virtual_media_insert_via_patch(options, param_map, uri, data) # get the action property - action = data['Actions']['#VirtualMedia.InsertMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI missing from Action " - "#VirtualMedia.InsertMedia"} - action_uri = action['target'] + action = data["Actions"]["#VirtualMedia.InsertMedia"] + if "target" not in action: + return {"ret": False, "msg": "target URI missing from Action #VirtualMedia.InsertMedia"} + action_uri = action["target"] # get ActionInfo or AllowableValues ai = self._get_all_action_info_values(action) # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) # POST to action response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False and ('Inserted' in payload or 'WriteProtected' in payload): + if response["ret"] is False and ("Inserted" in payload or "WriteProtected" in payload): # WORKAROUND # Older HPE systems with iLO 4 and Supermicro do not support # specifying Inserted or WriteProtected - vendor = self._get_vendor()['Vendor'] - if vendor == 'HPE' or vendor == 'Supermicro': - payload.pop('Inserted', None) - payload.pop('WriteProtected', None) + vendor = self._get_vendor()["Vendor"] + if vendor == "HPE" or vendor == "Supermicro": + payload.pop("Inserted", None) + payload.pop("WriteProtected", None) response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} + return {"ret": True, "changed": True, "msg": "VirtualMedia inserted"} def virtual_media_eject_via_patch(self, uri, image_only=False): # construct payload - payload = { - 'Inserted': False, - 'Image': None - } + payload = {"Inserted": False, "Image": None} # Inserted is not writable if image_only: - del payload['Inserted'] + del payload["Inserted"] # PATCH resource resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) - if resp['ret'] is False and 'Inserted' in payload: + if resp["ret"] is False and "Inserted" in payload: # WORKAROUND # Older HPE systems with iLO 4 and Supermicro do not support # specifying Inserted - vendor = self._get_vendor()['Vendor'] - if vendor == 'HPE' or vendor == 'Supermicro': - payload.pop('Inserted', None) + vendor = self._get_vendor()["Vendor"] + if vendor == "HPE" or vendor == "Supermicro": + payload.pop("Inserted", None) resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'VirtualMedia ejected' + if resp["ret"] and resp["changed"]: + resp["msg"] = "VirtualMedia ejected" return resp - def virtual_media_eject(self, options, resource_type='Manager'): - image_url = options.get('image_url') + def virtual_media_eject(self, options, resource_type="Manager"): + image_url = options.get("image_url") if not image_url: - return {'ret': False, - 'msg': "image_url option required for VirtualMediaEject"} + return {"ret": False, "msg": "image_url option required for VirtualMediaEject"} # locate and read the VirtualMedia resources # Given resource_type, use the proper URI - if resource_type == 'Systems': + if resource_type == "Systems": resource_uri = self.systems_uri - elif resource_type == 'Manager': + elif resource_type == "Manager": resource_uri = self.manager_uri response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} + data = response["data"] + if "VirtualMedia" not in data: + return {"ret": False, "msg": "VirtualMedia resource not found"} virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] virt_media_list = [] - for member in data['Members']: - virt_media_list.append(member['@odata.id']) + for member in data["Members"]: + virt_media_list.append(member["@odata.id"]) resources, headers = self._read_virt_media_resources(virt_media_list) # find the VirtualMedia resource to eject uri, data, eject = self._find_virt_media_to_eject(resources, image_url) if uri and eject: - if ('Actions' not in data or - '#VirtualMedia.EjectMedia' not in data['Actions']): + if "Actions" not in data or "#VirtualMedia.EjectMedia" not in data["Actions"]: # try to eject via PATCH if no EjectMedia action found h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: + if "allow" in h: + methods = [m.strip() for m in h.get("allow").split(",")] + if "PATCH" not in methods: # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "#VirtualMedia.EjectMedia action not found and PATCH not allowed"} + return {"ret": False, "msg": "#VirtualMedia.EjectMedia action not found and PATCH not allowed"} return self.virtual_media_eject_via_patch(uri) else: # POST to the EjectMedia Action - action = data['Actions']['#VirtualMedia.EjectMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI property missing from Action " - "#VirtualMedia.EjectMedia"} - action_uri = action['target'] + action = data["Actions"]["#VirtualMedia.EjectMedia"] + if "target" not in action: + return {"ret": False, "msg": "target URI property missing from Action #VirtualMedia.EjectMedia"} + action_uri = action["target"] # empty payload for Eject action payload = {} # POST to action - response = self.post_request(self.root_uri + action_uri, - payload) - if response['ret'] is False: + response = self.post_request(self.root_uri + action_uri, payload) + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} + return {"ret": True, "changed": True, "msg": "VirtualMedia ejected"} elif uri and not eject: # already ejected: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': f"VirtualMedia image '{image_url}' already ejected"} + return {"ret": True, "changed": False, "msg": f"VirtualMedia image '{image_url}' already ejected"} else: # return failure (no resources matching image_url found) - return {'ret': False, 'changed': False, - 'msg': f"No VirtualMedia resource found with image '{image_url}' inserted"} + return { + "ret": False, + "changed": False, + "msg": f"No VirtualMedia resource found with image '{image_url}' inserted", + } def get_psu_inventory(self): result = {} @@ -3094,30 +3163,38 @@ def get_psu_inventory(self): psu_results = [] key = "PowerSupplies" # Get these entries, but does not fail if not found - properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer', - 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType', - 'Status'] + properties = [ + "Name", + "Model", + "SerialNumber", + "PartNumber", + "Manufacturer", + "FirmwareVersion", + "PowerCapacityWatts", + "PowerSupplyType", + "Status", + ] # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] - if 'Power' in data: - power_uri = data['Power']['@odata.id'] + if "Power" in data: + power_uri = data["Power"]["@odata.id"] else: continue response = self.get_request(self.root_uri + power_uri) - data = response['data'] + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} psu_list = data[key] for psu in psu_list: @@ -3126,9 +3203,9 @@ def get_psu_inventory(self): for property in properties: if property in psu: if psu[property] is not None: - if property == 'Status': - if 'State' in psu[property]: - if psu[property]['State'] == 'Absent': + if property == "Status": + if "State" in psu[property]: + if psu[property]["State"] == "Absent": psu_not_present = True psu_data[property] = psu[property] if psu_not_present: @@ -3137,7 +3214,7 @@ def get_psu_inventory(self): result["entries"] = psu_results if not result["entries"]: - return {'ret': False, 'msg': "No PowerSupply objects found"} + return {"ret": False, "msg": "No PowerSupply objects found"} return result def get_multi_psu_inventory(self): @@ -3147,16 +3224,32 @@ def get_system_inventory(self, systems_uri): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['Status', 'HostName', 'PowerState', 'BootProgress', 'Model', 'Manufacturer', - 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', - 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', - 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] + properties = [ + "Status", + "HostName", + "PowerState", + "BootProgress", + "Model", + "Manufacturer", + "PartNumber", + "SystemType", + "AssetTag", + "ServiceTag", + "SerialNumber", + "SKU", + "BiosVersion", + "MemorySummary", + "ProcessorSummary", + "TrustedModules", + "Name", + "Id", + ] response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] for property in properties: if property in data: @@ -3173,109 +3266,134 @@ def get_network_protocols(self): service_result = {} # Find NetworkProtocol response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'NetworkProtocol' not in data: - return {'ret': False, 'msg': "NetworkProtocol resource not found"} + data = response["data"] + if "NetworkProtocol" not in data: + return {"ret": False, "msg": "NetworkProtocol resource not found"} networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] response = self.get_request(self.root_uri + networkprotocol_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', - 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', - 'RFB'] + data = response["data"] + protocol_services = [ + "SNMP", + "VirtualMedia", + "Telnet", + "SSDP", + "IPMI", + "SSH", + "KVMIP", + "NTP", + "HTTP", + "HTTPS", + "DHCP", + "DHCPv6", + "RDP", + "RFB", + ] for protocol_service in protocol_services: if protocol_service in data.keys(): service_result[protocol_service] = data[protocol_service] - result['ret'] = True + result["ret"] = True result["entries"] = service_result return result def set_network_protocols(self, manager_services): # Check input data validity - protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', - 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', - 'RFB'] - protocol_state_onlist = ['true', 'True', True, 'on', 1] - protocol_state_offlist = ['false', 'False', False, 'off', 0] + protocol_services = [ + "SNMP", + "VirtualMedia", + "Telnet", + "SSDP", + "IPMI", + "SSH", + "KVMIP", + "NTP", + "HTTP", + "HTTPS", + "DHCP", + "DHCPv6", + "RDP", + "RFB", + ] + protocol_state_onlist = ["true", "True", True, "on", 1] + protocol_state_offlist = ["false", "False", False, "off", 0] payload = {} for service_name in manager_services.keys(): if service_name not in protocol_services: - return {'ret': False, 'msg': f"Service name {service_name} is invalid"} + return {"ret": False, "msg": f"Service name {service_name} is invalid"} payload[service_name] = {} for service_property in manager_services[service_name].keys(): value = manager_services[service_name][service_property] - if service_property in ['ProtocolEnabled', 'protocolenabled']: + if service_property in ["ProtocolEnabled", "protocolenabled"]: if value in protocol_state_onlist: - payload[service_name]['ProtocolEnabled'] = True + payload[service_name]["ProtocolEnabled"] = True elif value in protocol_state_offlist: - payload[service_name]['ProtocolEnabled'] = False + payload[service_name]["ProtocolEnabled"] = False else: - return {'ret': False, 'msg': f"Value of property {service_property} is invalid"} - elif service_property in ['port', 'Port']: + return {"ret": False, "msg": f"Value of property {service_property} is invalid"} + elif service_property in ["port", "Port"]: if isinstance(value, int): - payload[service_name]['Port'] = value + payload[service_name]["Port"] = value elif isinstance(value, str) and value.isdigit(): - payload[service_name]['Port'] = int(value) + payload[service_name]["Port"] = int(value) else: - return {'ret': False, 'msg': f"Value of property {service_property} is invalid"} + return {"ret": False, "msg": f"Value of property {service_property} is invalid"} else: payload[service_name][service_property] = value # Find the ManagerNetworkProtocol resource response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] networkprotocol_uri = data.get("NetworkProtocol", {}).get("@odata.id") if networkprotocol_uri is None: - return {'ret': False, 'msg': "NetworkProtocol resource not found"} + return {"ret": False, "msg": "NetworkProtocol resource not found"} # Modify the ManagerNetworkProtocol resource resp = self.patch_request(self.root_uri + networkprotocol_uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified manager network protocol settings' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified manager network protocol settings" return resp @staticmethod def to_singular(resource_name): - if resource_name.endswith('ies'): + if resource_name.endswith("ies"): resource_name = f"{resource_name[:-3]}y" - elif resource_name.endswith('s'): + elif resource_name.endswith("s"): resource_name = resource_name[:-1] return resource_name def get_health_resource(self, subsystem, uri, health, expanded): - status = 'Status' + status = "Status" if expanded: d = expanded else: r = self.get_request(self.root_uri + uri) - if r.get('ret'): - d = r.get('data') + if r.get("ret"): + d = r.get("data") else: return - if 'Members' in d: # collections case - for m in d.get('Members'): - u = m.get('@odata.id') + if "Members" in d: # collections case + for m in d.get("Members"): + u = m.get("@odata.id") r = self.get_request(self.root_uri + u) - if r.get('ret'): - p = r.get('data') + if r.get("ret"): + p = r.get("data") if p: - e = {f"{self.to_singular(subsystem.lower())}_uri": u, - status: p.get(status, - "Status not available")} + e = { + f"{self.to_singular(subsystem.lower())}_uri": u, + status: p.get(status, "Status not available"), + } health[subsystem].append(e) else: # non-collections case - e = {f"{self.to_singular(subsystem.lower())}_uri": uri, - status: d.get(status, - "Status not available")} + e = {f"{self.to_singular(subsystem.lower())}_uri": uri, status: d.get(status, "Status not available")} health[subsystem].append(e) def get_health_subsystem(self, subsystem, data, health): @@ -3283,50 +3401,50 @@ def get_health_subsystem(self, subsystem, data, health): sub = data.get(subsystem) if isinstance(sub, list): for r in sub: - if '@odata.id' in r: - uri = r.get('@odata.id') + if "@odata.id" in r: + uri = r.get("@odata.id") expanded = None - if '#' in uri and len(r) > 1: + if "#" in uri and len(r) > 1: expanded = r self.get_health_resource(subsystem, uri, health, expanded) elif isinstance(sub, dict): - if '@odata.id' in sub: - uri = sub.get('@odata.id') + if "@odata.id" in sub: + uri = sub.get("@odata.id") self.get_health_resource(subsystem, uri, health, None) - elif 'Members' in data: - for m in data.get('Members'): - u = m.get('@odata.id') + elif "Members" in data: + for m in data.get("Members"): + u = m.get("@odata.id") r = self.get_request(self.root_uri + u) - if r.get('ret'): - d = r.get('data') + if r.get("ret"): + d = r.get("data") self.get_health_subsystem(subsystem, d, health) def get_health_report(self, category, uri, subsystems): result = {} health = {} - status = 'Status' + status = "Status" # Get health status of top level resource response = self.get_request(self.root_uri + uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] health[category] = {status: data.get(status, "Status not available")} # Get health status of subsystems for sub in subsystems: d = None - if sub.startswith('Links.'): # ex: Links.PCIeDevices - sub = sub[len('Links.'):] - d = data.get('Links', {}) - elif '.' in sub: # ex: Thermal.Fans - p, sub = sub.split('.') - u = data.get(p, {}).get('@odata.id') + if sub.startswith("Links."): # ex: Links.PCIeDevices + sub = sub[len("Links.") :] + d = data.get("Links", {}) + elif "." in sub: # ex: Thermal.Fans + p, sub = sub.split(".") + u = data.get(p, {}).get("@odata.id") if u: r = self.get_request(self.root_uri + u) - if r['ret']: - d = r['data'] + if r["ret"]: + d = r["data"] if not d: continue else: # ex: Memory @@ -3340,25 +3458,30 @@ def get_health_report(self, category, uri, subsystems): return result def get_system_health_report(self, systems_uri): - subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage', - 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts', - 'NetworkInterfaces.NetworkDeviceFunctions'] - return self.get_health_report('System', systems_uri, subsystems) + subsystems = [ + "Processors", + "Memory", + "SimpleStorage", + "Storage", + "EthernetInterfaces", + "NetworkInterfaces.NetworkPorts", + "NetworkInterfaces.NetworkDeviceFunctions", + ] + return self.get_health_report("System", systems_uri, subsystems) def get_multi_system_health_report(self): return self.aggregate_systems(self.get_system_health_report) def get_chassis_health_report(self, chassis_uri): - subsystems = ['Power.PowerSupplies', 'Thermal.Fans', - 'Links.PCIeDevices'] - return self.get_health_report('Chassis', chassis_uri, subsystems) + subsystems = ["Power.PowerSupplies", "Thermal.Fans", "Links.PCIeDevices"] + return self.get_health_report("Chassis", chassis_uri, subsystems) def get_multi_chassis_health_report(self): return self.aggregate_chassis(self.get_chassis_health_report) def get_manager_health_report(self, manager_uri): subsystems = [] - return self.get_health_report('Manager', manager_uri, subsystems) + return self.get_health_report("Manager", manager_uri, subsystems) def get_multi_manager_health_report(self): return self.aggregate_managers(self.get_manager_health_report) @@ -3367,11 +3490,11 @@ def set_manager_nic(self, nic_addr, nic_config): # Get the manager ethernet interface uri nic_info = self.get_manager_ethernet_uri(nic_addr) - if nic_info.get('nic_addr') is None: + if nic_info.get("nic_addr") is None: return nic_info else: - target_ethernet_uri = nic_info['nic_addr'] - target_ethernet_current_setting = nic_info['ethernet_setting'] + target_ethernet_uri = nic_info["nic_addr"] + target_ethernet_current_setting = nic_info["ethernet_setting"] # Convert input to payload and check validity # Note: Some properties in the EthernetInterface resource are arrays of @@ -3384,7 +3507,11 @@ def set_manager_nic(self, nic_addr, nic_config): payload = {} for property in nic_config.keys(): value = nic_config[property] - if property in target_ethernet_current_setting and isinstance(value, dict) and isinstance(target_ethernet_current_setting[property], list): + if ( + property in target_ethernet_current_setting + and isinstance(value, dict) + and isinstance(target_ethernet_current_setting[property], list) + ): payload[property] = list() payload[property].append(value) else: @@ -3392,39 +3519,38 @@ def set_manager_nic(self, nic_addr, nic_config): # Modify the EthernetInterface resource resp = self.patch_request(self.root_uri + target_ethernet_uri, payload, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified manager NIC' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified manager NIC" return resp # A helper function to get the EthernetInterface URI - def get_manager_ethernet_uri(self, nic_addr='null'): + def get_manager_ethernet_uri(self, nic_addr="null"): # Get EthernetInterface collection response = self.get_request(self.root_uri + self.manager_uri) - if not response['ret']: + if not response["ret"]: return response - data = response['data'] - if 'EthernetInterfaces' not in data: - return {'ret': False, 'msg': "EthernetInterfaces resource not found"} + data = response["data"] + if "EthernetInterfaces" not in data: + return {"ret": False, "msg": "EthernetInterfaces resource not found"} ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"] response = self.get_request(self.root_uri + ethernetinterfaces_uri) - if not response['ret']: + if not response["ret"]: return response - data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] + data = response["data"] + uris = [a.get("@odata.id") for a in data.get("Members", []) if a.get("@odata.id")] # Find target EthernetInterface target_ethernet_uri = None target_ethernet_current_setting = None - if nic_addr == 'null': + if nic_addr == "null": # Find root_uri matched EthernetInterface when nic_addr is not specified - nic_addr = (self.root_uri).split('/')[-1] - nic_addr = nic_addr.split(':')[0] # split port if existing + nic_addr = (self.root_uri).split("/")[-1] + nic_addr = nic_addr.split(":")[0] # split port if existing for uri in uris: response = self.get_request(self.root_uri + uri) - if not response['ret']: + if not response["ret"]: return response - data = response['data'] + data = response["data"] data_string = json.dumps(data) if nic_addr.lower() in data_string.lower(): target_ethernet_uri = uri @@ -3432,8 +3558,8 @@ def get_manager_ethernet_uri(self, nic_addr='null'): break nic_info = {} - nic_info['nic_addr'] = target_ethernet_uri - nic_info['ethernet_setting'] = target_ethernet_current_setting + nic_info["nic_addr"] = target_ethernet_uri + nic_info["ethernet_setting"] = target_ethernet_current_setting if target_ethernet_uri is None: return {} @@ -3442,71 +3568,79 @@ def get_manager_ethernet_uri(self, nic_addr='null'): def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None): if hostinterface_config is None: - return {'ret': False, 'msg': - 'Must provide hostinterface_config for SetHostInterface command'} + return {"ret": False, "msg": "Must provide hostinterface_config for SetHostInterface command"} # Find the HostInterfaceCollection resource response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") if hostinterfaces_uri is None: - return {'ret': False, 'msg': "HostInterface resource not found"} + return {"ret": False, "msg": "HostInterface resource not found"} response = self.get_request(self.root_uri + hostinterfaces_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] + data = response["data"] + uris = [a.get("@odata.id") for a in data.get("Members", []) if a.get("@odata.id")] # Capture list of URIs that match a specified HostInterface resource Id if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip("/").split("/")[-1]] if hostinterface_id and matching_hostinterface_uris: hostinterface_uri = list.pop(matching_hostinterface_uris) elif hostinterface_id and not matching_hostinterface_uris: - return {'ret': False, 'msg': f"HostInterface ID {hostinterface_id} not present."} + return {"ret": False, "msg": f"HostInterface ID {hostinterface_id} not present."} elif len(uris) == 1: hostinterface_uri = list.pop(uris) else: - return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."} + return {"ret": False, "msg": "HostInterface ID not defined and multiple interfaces detected."} # Modify the HostInterface resource resp = self.patch_request(self.root_uri + hostinterface_uri, hostinterface_config, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified host interface' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified host interface" return resp def get_hostinterfaces(self): result = {} hostinterface_results = [] - properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status', - 'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes', - 'AuthNoneRoleId', 'CredentialBootstrapping'] + properties = [ + "Id", + "Name", + "Description", + "HostInterfaceType", + "Status", + "InterfaceEnabled", + "ExternallyAccessible", + "AuthenticationModes", + "AuthNoneRoleId", + "CredentialBootstrapping", + ] manager_uri_list = self.manager_uris for manager_uri in manager_uri_list: response = self.get_request(self.root_uri + manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") if hostinterfaces_uri is None: continue response = self.get_request(self.root_uri + hostinterfaces_uri) - data = response['data'] + data = response["data"] - if 'Members' in data: - for hostinterface in data['Members']: - hostinterface_uri = hostinterface['@odata.id'] + if "Members" in data: + for hostinterface in data["Members"]: + hostinterface_uri = hostinterface["@odata.id"] hostinterface_response = self.get_request(self.root_uri + hostinterface_uri) # dictionary for capturing individual HostInterface properties hostinterface_data_temp = {} - if hostinterface_response['ret'] is False: + if hostinterface_response["ret"] is False: return hostinterface_response - hostinterface_data = hostinterface_response['data'] + hostinterface_data = hostinterface_response["data"] for property in properties: if property in hostinterface_data: if hostinterface_data[property] is not None: @@ -3514,57 +3648,67 @@ def get_hostinterfaces(self): # Check for the presence of a ManagerEthernetInterface # object, a link to a _single_ EthernetInterface that the # BMC uses to communicate with the host. - if 'ManagerEthernetInterface' in hostinterface_data: - interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id'] + if "ManagerEthernetInterface" in hostinterface_data: + interface_uri = hostinterface_data["ManagerEthernetInterface"]["@odata.id"] interface_response = self.get_nic(interface_uri) - if interface_response['ret'] is False: + if interface_response["ret"] is False: return interface_response - hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries'] + hostinterface_data_temp["ManagerEthernetInterface"] = interface_response["entries"] # Check for the presence of a HostEthernetInterfaces # object, a link to a _collection_ of EthernetInterfaces # that the host uses to communicate with the BMC. - if 'HostEthernetInterfaces' in hostinterface_data: - interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id'] + if "HostEthernetInterfaces" in hostinterface_data: + interfaces_uri = hostinterface_data["HostEthernetInterfaces"]["@odata.id"] interfaces_response = self.get_request(self.root_uri + interfaces_uri) - if interfaces_response['ret'] is False: + if interfaces_response["ret"] is False: return interfaces_response - interfaces_data = interfaces_response['data'] - if 'Members' in interfaces_data: - for interface in interfaces_data['Members']: - interface_uri = interface['@odata.id'] + interfaces_data = interfaces_response["data"] + if "Members" in interfaces_data: + for interface in interfaces_data["Members"]: + interface_uri = interface["@odata.id"] interface_response = self.get_nic(interface_uri) - if interface_response['ret'] is False: + if interface_response["ret"] is False: return interface_response # Check if this is the first # HostEthernetInterfaces item and create empty # list if so. - if 'HostEthernetInterfaces' not in hostinterface_data_temp: - hostinterface_data_temp['HostEthernetInterfaces'] = [] + if "HostEthernetInterfaces" not in hostinterface_data_temp: + hostinterface_data_temp["HostEthernetInterfaces"] = [] - hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries']) + hostinterface_data_temp["HostEthernetInterfaces"].append(interface_response["entries"]) hostinterface_results.append(hostinterface_data_temp) else: continue result["entries"] = hostinterface_results if not result["entries"]: - return {'ret': False, 'msg': "No HostInterface objects found"} + return {"ret": False, "msg": "No HostInterface objects found"} return result def get_manager_inventory(self, manager_uri): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model', - 'PartNumber', 'PowerState', 'SerialNumber', 'ServiceIdentification', - 'Status', 'UUID'] + properties = [ + "Id", + "FirmwareVersion", + "ManagerType", + "Manufacturer", + "Model", + "PartNumber", + "PowerState", + "SerialNumber", + "ServiceIdentification", + "Status", + "UUID", + ] response = self.get_request(self.root_uri + manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] for property in properties: if property in data: @@ -3580,24 +3724,27 @@ def get_service_identification(self, manager): result = {} if manager is None: if len(self.manager_uris) == 1: - manager = self.manager_uris[0].rstrip('/').split('/')[-1] + manager = self.manager_uris[0].rstrip("/").split("/")[-1] elif len(self.manager_uris) > 1: - entries = self.get_multi_manager_inventory()['entries'] - managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] + entries = self.get_multi_manager_inventory()["entries"] + managers = [m[0]["manager_uri"] for m in entries if m[1].get("ServiceIdentification")] if len(managers) == 1: - manager = managers[0].rstrip('/').split('/')[-1] + manager = managers[0].rstrip("/").split("/")[-1] else: - self.module.fail_json(msg=[ - f"Multiple managers with ServiceIdentification were found: {managers}", - "Please specify by using the 'manager' parameter in your playbook"]) + self.module.fail_json( + msg=[ + f"Multiple managers with ServiceIdentification were found: {managers}", + "Please specify by using the 'manager' parameter in your playbook", + ] + ) elif len(self.manager_uris) == 0: self.module.fail_json(msg="No manager identities were found") response = self.get_request(f"{self.root_uri}/redfish/v1/Managers/{manager}", override_headers=None) try: - result['service_identification'] = response['data']['ServiceIdentification'] + result["service_identification"] = response["data"]["ServiceIdentification"] except Exception as e: self.module.fail_json(msg=f"Service ID not found for manager {manager}") - result['ret'] = True + result["ret"] = True return result def set_service_identification(self, service_id): @@ -3607,12 +3754,11 @@ def set_service_identification(self, service_id): def set_session_service(self, sessions_config): if sessions_config is None: - return {'ret': False, 'msg': - 'Must provide sessions_config for SetSessionService command'} + return {"ret": False, "msg": "Must provide sessions_config for SetSessionService command"} resp = self.patch_request(self.root_uri + self.session_service_uri, sessions_config, check_pyld=True) - if resp['ret'] and resp['changed']: - resp['msg'] = 'Modified session service' + if resp["ret"] and resp["changed"]: + resp["msg"] = "Modified session service" return resp def verify_bios_attributes(self, bios_attributes): @@ -3633,22 +3779,12 @@ def verify_bios_attributes(self, bios_attributes): wrong_param.update({key: value}) if wrong_param: - return { - "ret": False, - "msg": f"Wrong parameters are provided: {wrong_param}" - } + return {"ret": False, "msg": f"Wrong parameters are provided: {wrong_param}"} if bios_dict: - return { - "ret": False, - "msg": f"BIOS parameters are not matching: {bios_dict}" - } + return {"ret": False, "msg": f"BIOS parameters are not matching: {bios_dict}"} - return { - "ret": True, - "changed": False, - "msg": "BIOS verification completed" - } + return {"ret": True, "changed": False, "msg": "BIOS verification completed"} def enable_secure_boot(self): # This function enable Secure Boot on an OOB controller @@ -3694,11 +3830,11 @@ def get_hpe_thermal_config(self): # Go through list for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] - val = data.get('Oem', {}).get('Hpe', {}).get('ThermalConfiguration') + result["ret"] = True + data = response["data"] + val = data.get("Oem", {}).get("Hpe", {}).get("ThermalConfiguration") if val is not None: return {"ret": True, "current_thermal_config": val} return {"ret": False} @@ -3709,10 +3845,10 @@ def get_hpe_fan_percent_min(self): # Go through list for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - val = data.get('Oem', {}).get('Hpe', {}).get('FanPercentMinimum') + data = response["data"] + val = data.get("Oem", {}).get("Hpe", {}).get("FanPercentMinimum") if val is not None: return {"ret": True, "fan_percent_min": val} return {"ret": False} @@ -3720,133 +3856,135 @@ def get_hpe_fan_percent_min(self): def delete_volumes(self, storage_subsystem_id, volume_ids): # Find the Storage resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - storage_uri = data.get('Storage', {}).get('@odata.id') + data = response["data"] + storage_uri = data.get("Storage", {}).get("@odata.id") if storage_uri is None: - return {'ret': False, 'msg': 'Storage resource not found'} + return {"ret": False, "msg": "Storage resource not found"} # Get Storage Collection response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Collect Storage Subsystems - self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + self.storage_subsystems_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.storage_subsystems_uris: - return { - 'ret': False, - 'msg': "StorageCollection's Members array is either empty or missing"} + return {"ret": False, "msg": "StorageCollection's Members array is either empty or missing"} # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + if storage_subsystem_uri.rstrip("/").split("/")[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: return { - 'ret': False, - 'msg': f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server"} + "ret": False, + "msg": f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server", + } # Get Volume Collection response = self.get_request(self.root_uri + self.storage_subsystem_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] - response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) - if response['ret'] is False: + response = self.get_request(self.root_uri + data["Volumes"]["@odata.id"]) + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Collect Volumes - self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + self.volume_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.volume_uris: - return { - 'ret': True, 'changed': False, - 'msg': "VolumeCollection's Members array is either empty or missing"} + return {"ret": True, "changed": False, "msg": "VolumeCollection's Members array is either empty or missing"} # Delete each volume for volume in self.volume_uris: - if volume.rstrip('/').split('/')[-1] in volume_ids: + if volume.rstrip("/").split("/")[-1] in volume_ids: response = self.delete_request(self.root_uri + volume) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': f"The following volumes were deleted: {volume_ids}"} + return {"ret": True, "changed": True, "msg": f"The following volumes were deleted: {volume_ids}"} def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): # Find the Storage resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - storage_uri = data.get('Storage', {}).get('@odata.id') + data = response["data"] + storage_uri = data.get("Storage", {}).get("@odata.id") if storage_uri is None: - return {'ret': False, 'msg': 'Storage resource not found'} + return {"ret": False, "msg": "Storage resource not found"} # Get Storage Collection response = self.get_request(self.root_uri + storage_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Collect Storage Subsystems - self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + self.storage_subsystems_uris = [i["@odata.id"] for i in response["data"].get("Members", [])] if not self.storage_subsystems_uris: - return { - 'ret': False, - 'msg': "StorageCollection's Members array is either empty or missing"} + return {"ret": False, "msg": "StorageCollection's Members array is either empty or missing"} # Matching Storage Subsystem ID with user input self.storage_subsystem_uri = "" for storage_subsystem_uri in self.storage_subsystems_uris: - if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + if storage_subsystem_uri.rstrip("/").split("/")[-1] == storage_subsystem_id: self.storage_subsystem_uri = storage_subsystem_uri if not self.storage_subsystem_uri: return { - 'ret': False, - 'msg': f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server"} + "ret": False, + "msg": f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server", + } # Validate input parameters - required_parameters = ['RAIDType', 'Drives'] - allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', - 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] + required_parameters = ["RAIDType", "Drives"] + allowed_parameters = [ + "CapacityBytes", + "DisplayName", + "InitializeMethod", + "MediaSpanCount", + "Name", + "ReadCachePolicy", + "StripSizeBytes", + "VolumeUsage", + "WriteCachePolicy", + ] for parameter in required_parameters: if not volume_details.get(parameter): - return { - 'ret': False, - 'msg': f"{required_parameters} are required parameter to create a volume"} + return {"ret": False, "msg": f"{required_parameters} are required parameter to create a volume"} # Navigate to the volume uri of the correct storage subsystem response = self.get_request(self.root_uri + self.storage_subsystem_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Deleting any volumes of RAIDType None present on the Storage Subsystem if storage_none_volume_deletion: - response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) - if response['ret'] is False: + response = self.get_request(self.root_uri + data["Volumes"]["@odata.id"]) + if response["ret"] is False: return response - volume_data = response['data'] + volume_data = response["data"] if "Members" in volume_data: for member in volume_data["Members"]: - response = self.get_request(self.root_uri + member['@odata.id']) - if response['ret'] is False: + response = self.get_request(self.root_uri + member["@odata.id"]) + if response["ret"] is False: return response - member_data = response['data'] + member_data = response["data"] if member_data["RAIDType"] == "None": - response = self.delete_request(self.root_uri + member['@odata.id']) - if response['ret'] is False: + response = self.delete_request(self.root_uri + member["@odata.id"]) + if response["ret"] is False: return response # Construct payload and issue POST command to create volume @@ -3856,12 +3994,11 @@ def create_volume(self, volume_details, storage_subsystem_id, storage_none_volum volume_details["Links"]["Drives"].append({"@odata.id": drive}) del volume_details["Drives"] payload = volume_details - response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload) - if response['ret'] is False: + response = self.post_request(self.root_uri + data["Volumes"]["@odata.id"], payload) + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "Volume Created"} + return {"ret": True, "changed": True, "msg": "Volume Created"} def get_bios_registries(self): # Get /redfish/v1 @@ -3874,10 +4011,7 @@ def get_bios_registries(self): # Get Registries URI if "Bios" not in server_details: msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s" - return { - "ret": False, - "msg": msg % str(server_details) - } + return {"ret": False, "msg": msg % str(server_details)} bios_uri = server_details["Bios"]["@odata.id"] bios_resp = self.get_request(self.root_uri + bios_uri) @@ -3905,85 +4039,65 @@ def get_bios_registries(self): rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"] if "RegistryEntries" not in rsp_data: - return { - "msg": f"'RegistryEntries' not present in {rsp_uri} response, {rsp_data}", - "ret": False - } + return {"msg": f"'RegistryEntries' not present in {rsp_uri} response, {rsp_data}", "ret": False} - return { - "bios_registry": rsp_data, - "bios_registry_uri": rsp_uri, - "ret": True - } + return {"bios_registry": rsp_data, "bios_registry_uri": rsp_uri, "ret": True} def check_location_uri(self, resp_data, resp_uri): # Get the location URI response # return {"msg": self.creds, "ret": False} - vendor = self._get_vendor()['Vendor'] + vendor = self._get_vendor()["Vendor"] rsp_uri = "" - for loc in resp_data['Location']: - if loc['Language'].startswith("en"): - rsp_uri = loc['Uri'] - if vendor == 'HPE': + for loc in resp_data["Location"]: + if loc["Language"].startswith("en"): + rsp_uri = loc["Uri"] + if vendor == "HPE": # WORKAROUND # HPE systems with iLO 4 will have BIOS Attribute Registries location URI as a dictionary with key 'extref' # Hence adding condition to fetch the Uri - if isinstance(loc['Uri'], dict) and "extref" in loc['Uri'].keys(): - rsp_uri = loc['Uri']['extref'] + if isinstance(loc["Uri"], dict) and "extref" in loc["Uri"].keys(): + rsp_uri = loc["Uri"]["extref"] if not rsp_uri: msg = "Language 'en' not found in BIOS Attribute Registries location, URI: %s, response: %s" - return { - "ret": False, - "msg": msg % (resp_uri, str(resp_data)) - } + return {"ret": False, "msg": msg % (resp_uri, str(resp_data))} res = self.get_request(self.root_uri + rsp_uri) - if res['ret'] is False: + if res["ret"] is False: # WORKAROUND # HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs # Hence adding encoding to the header - if vendor == 'HPE': + if vendor == "HPE": override_headers = {"Accept-Encoding": "gzip"} res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers) - if res['ret']: - return { - "ret": True, - "rsp_data": res["data"], - "rsp_uri": rsp_uri - } + if res["ret"]: + return {"ret": True, "rsp_data": res["data"], "rsp_uri": rsp_uri} return res def get_accountservice_properties(self): # Find the AccountService resource response = self.get_request(self.root_uri + self.service_root) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] accountservice_uri = data.get("AccountService", {}).get("@odata.id") if accountservice_uri is None: - return {'ret': False, 'msg': "AccountService resource not found"} + return {"ret": False, "msg": "AccountService resource not found"} response = self.get_request(self.root_uri + accountservice_uri) - if response['ret'] is False: + if response["ret"] is False: return response - return { - 'ret': True, - 'entries': response['data'] - } + return {"ret": True, "entries": response["data"]} def get_power_restore_policy(self, systems_uri): # Retrieve System resource response = self.get_request(self.root_uri + systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - return { - 'ret': True, - 'entries': response['data']['PowerRestorePolicy'] - } + return {"ret": True, "entries": response["data"]["PowerRestorePolicy"]} def get_multi_power_restore_policy(self): return self.aggregate_systems(self.get_power_restore_policy) def set_power_restore_policy(self, policy): - body = {'PowerRestorePolicy': policy} + body = {"PowerRestorePolicy": policy} return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True) diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index d09ca0242fa..dc00330b90c 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -13,6 +13,7 @@ try: from redis import Redis from redis import __version__ as redis_version + HAS_REDIS_PACKAGE = True REDIS_IMP_ERR = None except ImportError: @@ -21,6 +22,7 @@ try: import certifi + HAS_CERTIFI_PACKAGE = True CERTIFI_IMPORT_ERROR = None except ImportError: @@ -32,65 +34,63 @@ def fail_imports(module, needs_certifi=True): errors = [] traceback = [] if not HAS_REDIS_PACKAGE: - errors.append(missing_required_lib('redis')) + errors.append(missing_required_lib("redis")) traceback.append(REDIS_IMP_ERR) if not HAS_CERTIFI_PACKAGE and needs_certifi: - errors.append(missing_required_lib('certifi')) + errors.append(missing_required_lib("certifi")) traceback.append(CERTIFI_IMPORT_ERROR) if errors: - module.fail_json(msg='\n'.join(errors), traceback='\n'.join(traceback)) + module.fail_json(msg="\n".join(errors), traceback="\n".join(traceback)) def redis_auth_argument_spec(tls_default=True): return dict( - login_host=dict(type='str', - default='localhost',), - login_user=dict(type='str'), - login_password=dict(type='str', - no_log=True - ), - login_port=dict(type='int', default=6379), - tls=dict(type='bool', - default=tls_default), - validate_certs=dict(type='bool', - default=True - ), - ca_certs=dict(type='str'), - client_cert_file=dict(type='str'), - client_key_file=dict(type='str'), + login_host=dict( + type="str", + default="localhost", + ), + login_user=dict(type="str"), + login_password=dict(type="str", no_log=True), + login_port=dict(type="int", default=6379), + tls=dict(type="bool", default=tls_default), + validate_certs=dict(type="bool", default=True), + ca_certs=dict(type="str"), + client_cert_file=dict(type="str"), + client_key_file=dict(type="str"), ) def redis_auth_params(module): - login_host = module.params['login_host'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_port = module.params['login_port'] - tls = module.params['tls'] - validate_certs = 'required' if module.params['validate_certs'] else None - ca_certs = module.params['ca_certs'] + login_host = module.params["login_host"] + login_user = module.params["login_user"] + login_password = module.params["login_password"] + login_port = module.params["login_port"] + tls = module.params["tls"] + validate_certs = "required" if module.params["validate_certs"] else None + ca_certs = module.params["ca_certs"] if tls and ca_certs is None: ca_certs = str(certifi.where()) - client_cert_file = module.params['client_cert_file'] - client_key_file = module.params['client_key_file'] - if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: - module.fail_json( - msg='The option `username` in only supported with redis >= 3.4.0.') - params = {'host': login_host, - 'port': login_port, - 'password': login_password, - 'ssl_ca_certs': ca_certs, - 'ssl_certfile': client_cert_file, - 'ssl_keyfile': client_key_file, - 'ssl_cert_reqs': validate_certs, - 'ssl': tls} + client_cert_file = module.params["client_cert_file"] + client_key_file = module.params["client_key_file"] + if tuple(map(int, redis_version.split("."))) < (3, 4, 0) and login_user is not None: + module.fail_json(msg="The option `username` in only supported with redis >= 3.4.0.") + params = { + "host": login_host, + "port": login_port, + "password": login_password, + "ssl_ca_certs": ca_certs, + "ssl_certfile": client_cert_file, + "ssl_keyfile": client_key_file, + "ssl_cert_reqs": validate_certs, + "ssl": tls, + } if login_user is not None: - params['username'] = login_user + params["username"] = login_user return params class RedisAnsible: - '''Base class for Redis module''' + """Base class for Redis module""" def __init__(self, module): self.module = module @@ -100,5 +100,5 @@ def _connect(self): try: return Redis(**redis_auth_params(self.module)) except Exception as e: - self.module.fail_json(msg=f'{e}') + self.module.fail_json(msg=f"{e}") return None diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 1f06839d391..ce3efc2a1d4 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -14,8 +14,10 @@ from __future__ import annotations import traceback + try: from pylxca import connect, disconnect + HAS_PYLXCA = True except ImportError: HAS_PYLXCA = False @@ -59,12 +61,11 @@ def setup_conn(module): """ lxca_con = None try: - lxca_con = connect(module.params['auth_url'], - module.params['login_user'], - module.params['login_password'], - "True") + lxca_con = connect( + module.params["auth_url"], module.params["login_user"], module.params["login_password"], "True" + ) except Exception as exception: - error_msg = '; '.join(exception.args) + error_msg = "; ".join(exception.args) module.fail_json(msg=error_msg, exception=traceback.format_exc()) return lxca_con diff --git a/plugins/module_utils/rundeck.py b/plugins/module_utils/rundeck.py index 7b9f56339af..dca79639bec 100644 --- a/plugins/module_utils/rundeck.py +++ b/plugins/module_utils/rundeck.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Phillipe Smith # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -12,16 +11,18 @@ def api_argument_spec(): - ''' + """ Creates an argument spec that can be used with any module that will be requesting content via Rundeck API - ''' + """ api_argument_spec = url_argument_spec() - api_argument_spec.update(dict( - url=dict(required=True, type="str"), - api_version=dict(type="int", default=39), - api_token=dict(required=True, type="str", no_log=True) - )) + api_argument_spec.update( + dict( + url=dict(required=True, type="str"), + api_version=dict(type="int", default=39), + api_token=dict(required=True, type="str", no_log=True), + ) + ) return api_argument_spec @@ -59,21 +60,18 @@ def api_request(module, endpoint, data=None, method="GET", content_type="applica headers={ "Content-Type": content_type, "Accept": "application/json", - "X-Rundeck-Auth-Token": module.params["api_token"] - } + "X-Rundeck-Auth-Token": module.params["api_token"], + }, ) if info["status"] == 403: - module.fail_json(msg="Token authorization failed", - execution_info=json.loads(info["body"])) + module.fail_json(msg="Token authorization failed", execution_info=json.loads(info["body"])) elif info["status"] == 404: return None, info elif info["status"] == 409: - module.fail_json(msg="Job executions limit reached", - execution_info=json.loads(info["body"])) + module.fail_json(msg="Job executions limit reached", execution_info=json.loads(info["body"])) elif info["status"] >= 500: - module.fail_json(msg="Rundeck API error", - execution_info=json.loads(info["body"])) + module.fail_json(msg="Rundeck API error", execution_info=json.loads(info["body"])) try: content = response.read() @@ -84,14 +82,6 @@ def api_request(module, endpoint, data=None, method="GET", content_type="applica json_response = json.loads(content) return json_response, info except AttributeError as error: - module.fail_json( - msg="Rundeck API request error", - exception=to_native(error), - execution_info=info - ) + module.fail_json(msg="Rundeck API request error", exception=to_native(error), execution_info=info) except ValueError as error: - module.fail_json( - msg="No valid JSON response", - exception=to_native(error), - execution_info=content - ) + module.fail_json(msg="No valid JSON response", exception=to_native(error), execution_info=content) diff --git a/plugins/module_utils/saslprep.py b/plugins/module_utils/saslprep.py index b02cedd8748..48ddfff5383 100644 --- a/plugins/module_utils/saslprep.py +++ b/plugins/module_utils/saslprep.py @@ -1,4 +1,3 @@ - # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -50,7 +49,7 @@ def mapping_profile(string): if in_table_c12(c): # map non-ASCII space characters # (that can be mapped) to Unicode space - tmp.append(' ') + tmp.append(" ") else: tmp.append(c) @@ -67,7 +66,7 @@ def is_ral_string(string): # RandALCat character MUST be the last character of the string. if in_table_d1(string[0]): if not in_table_d1(string[-1]): - raise ValueError('RFC3454: incorrect bidirectional RandALCat string.') + raise ValueError("RFC3454: incorrect bidirectional RandALCat string.") return True return False @@ -95,41 +94,41 @@ def prohibited_output_profile(string): # If a string contains any RandALCat characters, # The string MUST NOT contain any LCat character: is_prohibited_bidi_ch = in_table_d2 - bidi_table = 'D.2' + bidi_table = "D.2" else: # Forbid RandALCat characters in LCat string: is_prohibited_bidi_ch = in_table_d1 - bidi_table = 'D.1' + bidi_table = "D.1" - RFC = 'RFC4013' + RFC = "RFC4013" for c in string: # RFC4013 2.3. Prohibited Output: if in_table_c12(c): - raise ValueError(f'{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).') + raise ValueError(f"{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).") if in_table_c21_c22(c): - raise ValueError(f'{RFC}: prohibited control characters (C.2.1).') + raise ValueError(f"{RFC}: prohibited control characters (C.2.1).") if in_table_c3(c): - raise ValueError(f'{RFC}: prohibited private Use characters (C.3).') + raise ValueError(f"{RFC}: prohibited private Use characters (C.3).") if in_table_c4(c): - raise ValueError(f'{RFC}: prohibited non-character code points (C.4).') + raise ValueError(f"{RFC}: prohibited non-character code points (C.4).") if in_table_c5(c): - raise ValueError(f'{RFC}: prohibited surrogate code points (C.5).') + raise ValueError(f"{RFC}: prohibited surrogate code points (C.5).") if in_table_c6(c): - raise ValueError(f'{RFC}: prohibited inappropriate for plain text characters (C.6).') + raise ValueError(f"{RFC}: prohibited inappropriate for plain text characters (C.6).") if in_table_c7(c): - raise ValueError(f'{RFC}: prohibited inappropriate for canonical representation characters (C.7).') + raise ValueError(f"{RFC}: prohibited inappropriate for canonical representation characters (C.7).") if in_table_c8(c): - raise ValueError(f'{RFC}: prohibited change display properties / deprecated characters (C.8).') + raise ValueError(f"{RFC}: prohibited change display properties / deprecated characters (C.8).") if in_table_c9(c): - raise ValueError(f'{RFC}: prohibited tagging characters (C.9).') + raise ValueError(f"{RFC}: prohibited tagging characters (C.9).") # RFC4013, 2.4. Bidirectional Characters: if is_prohibited_bidi_ch(c): - raise ValueError(f'{RFC}: prohibited bidi characters ({bidi_table}).') + raise ValueError(f"{RFC}: prohibited bidi characters ({bidi_table}).") # RFC4013, 2.5. Unassigned Code Points: if in_table_a1(c): - raise ValueError(f'{RFC}: prohibited unassigned code points (A.1).') + raise ValueError(f"{RFC}: prohibited unassigned code points (A.1).") def saslprep(string): @@ -151,16 +150,16 @@ def saslprep(string): # comprised of characters from the Unicode [Unicode] character set." # Validate the string is a Unicode string if not is_unicode_str(string): - raise TypeError(f'input must be of type str, not {type(string)}') + raise TypeError(f"input must be of type str, not {type(string)}") # RFC4013: 2.1. Mapping. string = mapping_profile(string) # RFC4013: 2.2. Normalization. # "This profile specifies using Unicode normalization form KC." - string = normalize('NFKC', string) + string = normalize("NFKC", string) if not string: - return '' + return "" # RFC4013: 2.3. Prohibited Output. # RFC4013: 2.4. Bidirectional Characters. diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 6e6e317d64d..3d472064f0e 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -22,6 +22,7 @@ SCALEWAY_SECRET_IMP_ERR: str | None = None try: from passlib.hash import argon2 + HAS_SCALEWAY_SECRET_PACKAGE = True except Exception: SCALEWAY_SECRET_IMP_ERR = traceback.format_exc() @@ -30,12 +31,18 @@ def scaleway_argument_spec(): return dict( - api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']), - no_log=True, aliases=['oauth_token']), - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']), - api_timeout=dict(type='int', default=30, aliases=['timeout']), - query_parameters=dict(type='dict', default={}), - validate_certs=dict(default=True, type='bool'), + api_token=dict( + required=True, + fallback=(env_fallback, ["SCW_TOKEN", "SCW_API_KEY", "SCW_OAUTH_TOKEN", "SCW_API_TOKEN"]), + no_log=True, + aliases=["oauth_token"], + ), + api_url=dict( + fallback=(env_fallback, ["SCW_API_URL"]), default="https://api.scaleway.com", aliases=["base_url"] + ), + api_timeout=dict(type="int", default=30, aliases=["timeout"]), + query_parameters=dict(type="dict", default={}), + validate_certs=dict(default=True, type="bool"), ) @@ -48,47 +55,42 @@ def scaleway_waitable_resource_argument_spec(): def payload_from_object(scw_object): - return { - k: v - for k, v in scw_object.items() - if k != 'id' and v is not None - } + return {k: v for k, v in scw_object.items() if k != "id" and v is not None} class ScalewayException(Exception): - def __init__(self, message): self.message = message # Specify a complete Link header, for validation purposes -R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)" - (,<[^>]+>;\srel="(first|previous|next|last)")*''' +R_LINK_HEADER = r"""<[^>]+>;\srel="(first|previous|next|last)" + (,<[^>]+>;\srel="(first|previous|next|last)")*""" # Specify a single relation, for iteration and string extraction purposes R_RELATION = r'[^>]+)>; rel="(?Pfirst|previous|next|last)"' def parse_pagination_link(header): if not re.match(R_LINK_HEADER, header, re.VERBOSE): - raise ScalewayException('Scaleway API answered with an invalid Link pagination header') + raise ScalewayException("Scaleway API answered with an invalid Link pagination header") else: - relations = header.split(',') + relations = header.split(",") parsed_relations = {} rc_relation = re.compile(R_RELATION) for relation in relations: match = rc_relation.match(relation) if not match: - raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header') + raise ScalewayException("Scaleway API answered with an invalid relation in the Link pagination header") data = match.groupdict() - parsed_relations[data['relation']] = data['target_IRI'] + parsed_relations[data["relation"]] = data["target_IRI"] return parsed_relations def filter_sensitive_attributes(container, attributes): - ''' + """ WARNING: This function is effectively private, **do not use it**! It will be removed or renamed once changing its name no longer triggers a pylint bug. - ''' + """ for attr in attributes: container[attr] = "SENSITIVE_VALUE" @@ -100,8 +102,8 @@ class SecretVariables: def ensure_scaleway_secret_package(module): if not HAS_SCALEWAY_SECRET_PACKAGE: module.fail_json( - msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'), - exception=SCALEWAY_SECRET_IMP_ERR + msg=missing_required_lib("passlib[argon2]", url="https://passlib.readthedocs.io/en/stable/"), + exception=SCALEWAY_SECRET_IMP_ERR, ) @staticmethod @@ -110,8 +112,8 @@ def dict_to_list(source_dict): @staticmethod def list_to_dict(source_list, hashed=False): - key_value = 'hashed_value' if hashed else 'value' - return {var['key']: var[key_value] for var in source_list} + key_value = "hashed_value" if hashed else "value" + return {var["key"]: var[key_value] for var in source_list} @classmethod def decode(cls, secrets_list, values_list): @@ -140,7 +142,6 @@ def resource_attributes_should_be_changed(target, wished, verifiable_mutable_att class Response: - def __init__(self, resp, info): self.body = None if resp: @@ -168,32 +169,32 @@ def ok(self): class Scaleway: - def __init__(self, module): self.module = module self.headers = { - 'X-Auth-Token': self.module.params.get('api_token'), - 'User-Agent': self.get_user_agent_string(module), - 'Content-Type': 'application/json', + "X-Auth-Token": self.module.params.get("api_token"), + "User-Agent": self.get_user_agent_string(module), + "Content-Type": "application/json", } self.name = None def get_resources(self): - results = self.get(f'/{self.name}') + results = self.get(f"/{self.name}") if not results.ok: raise ScalewayException( - f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]") + f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]" + ) return results.json.get(self.name) def _url_builder(self, path, params): - d = self.module.params.get('query_parameters') + d = self.module.params.get("query_parameters") if params is not None: d.update(params) query_string = urlencode(d, doseq=True) - if path[0] == '/': + if path[0] == "/": path = path[1:] return f"{self.module.params.get('api_url')}/{path}?{query_string}" @@ -204,17 +205,21 @@ def send(self, method, path, data=None, headers=None, params=None): if headers is not None: self.headers.update(headers) - if self.headers['Content-Type'] == "application/json": + if self.headers["Content-Type"] == "application/json": data = self.module.jsonify(data) resp, info = fetch_url( - self.module, url, data=data, headers=self.headers, method=method, - timeout=self.module.params.get('api_timeout') + self.module, + url, + data=data, + headers=self.headers, + method=method, + timeout=self.module.params.get("api_timeout"), ) # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases - if info['status'] == -1: - self.module.fail_json(msg=info['msg']) + if info["status"] == -1: + self.module.fail_json(msg=info["msg"]) return Response(resp, info) @@ -223,16 +228,16 @@ def get_user_agent_string(module): return f"ansible {module.ansible_version} Python {sys.version.split(' ', 1)[0]}" def get(self, path, data=None, headers=None, params=None): - return self.send(method='GET', path=path, data=data, headers=headers, params=params) + return self.send(method="GET", path=path, data=data, headers=headers, params=params) def put(self, path, data=None, headers=None, params=None): - return self.send(method='PUT', path=path, data=data, headers=headers, params=params) + return self.send(method="PUT", path=path, data=data, headers=headers, params=params) def post(self, path, data=None, headers=None, params=None): - return self.send(method='POST', path=path, data=data, headers=headers, params=params) + return self.send(method="POST", path=path, data=data, headers=headers, params=params) def delete(self, path, data=None, headers=None, params=None): - return self.send(method='DELETE', path=path, data=data, headers=headers, params=params) + return self.send(method="DELETE", path=path, data=data, headers=headers, params=params) def patch(self, path, data=None, headers=None, params=None): return self.send(method="PATCH", path=path, data=data, headers=headers, params=params) @@ -251,7 +256,7 @@ def fetch_state(self, resource): return "absent" if not response.ok: - msg = f'Error during state fetching: ({response.status_code}) {response.json}' + msg = f"Error during state fetching: ({response.status_code}) {response.json}" self.module.fail_json(msg=msg) try: @@ -261,13 +266,13 @@ def fetch_state(self, resource): self.module.fail_json(msg=f"Could not fetch state in {response.json}") def fetch_paginated_resources(self, resource_key, **pagination_kwargs): - response = self.get( - path=self.api_path, - params=pagination_kwargs) + response = self.get(path=self.api_path, params=pagination_kwargs) status_code = response.status_code if not response.ok: - self.module.fail_json(msg=f"Error getting {resource_key} [{response.status_code}: {response.json['message']}]") + self.module.fail_json( + msg=f"Error getting {resource_key} [{response.status_code}: {response.json['message']}]" + ) return response.json[resource_key] @@ -278,10 +283,10 @@ def fetch_all_resources(self, resource_key, **pagination_kwargs): while len(result) != 0: result = self.fetch_paginated_resources(resource_key, **pagination_kwargs) resources += result - if 'page' in pagination_kwargs: - pagination_kwargs['page'] += 1 + if "page" in pagination_kwargs: + pagination_kwargs["page"] += 1 else: - pagination_kwargs['page'] = 2 + pagination_kwargs["page"] = 2 return resources @@ -315,95 +320,83 @@ def wait_to_complete_state_transition(self, resource, stable_states, force_wait= SCALEWAY_LOCATION = { - 'par1': { - 'name': 'Paris 1', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' + "par1": { + "name": "Paris 1", + "country": "FR", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-1", }, - - 'EMEA-FR-PAR1': { - 'name': 'Paris 1', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1' + "EMEA-FR-PAR1": { + "name": "Paris 1", + "country": "FR", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-1", }, - - 'par2': { - 'name': 'Paris 2', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' + "par2": { + "name": "Paris 2", + "country": "FR", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-2", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-2", }, - - 'EMEA-FR-PAR2': { - 'name': 'Paris 2', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' + "EMEA-FR-PAR2": { + "name": "Paris 2", + "country": "FR", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-2", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-2", }, - - 'par3': { - 'name': 'Paris 3', - 'country': 'FR', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-3', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-3' + "par3": { + "name": "Paris 3", + "country": "FR", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-3", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-3", }, - - 'ams1': { - 'name': 'Amsterdam 1', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' + "ams1": { + "name": "Amsterdam 1", + "country": "NL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-1", }, - - 'EMEA-NL-EVS': { - 'name': 'Amsterdam 1', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' + "EMEA-NL-EVS": { + "name": "Amsterdam 1", + "country": "NL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-1", }, - - 'ams2': { - 'name': 'Amsterdam 2', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-2' + "ams2": { + "name": "Amsterdam 2", + "country": "NL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-2", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-2", }, - - 'ams3': { - 'name': 'Amsterdam 3', - 'country': 'NL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-3', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-3' + "ams3": { + "name": "Amsterdam 3", + "country": "NL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-3", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-3", }, - - 'waw1': { - 'name': 'Warsaw 1', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' + "waw1": { + "name": "Warsaw 1", + "country": "PL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-1", }, - - 'EMEA-PL-WAW1': { - 'name': 'Warsaw 1', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' + "EMEA-PL-WAW1": { + "name": "Warsaw 1", + "country": "PL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-1", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-1", }, - - 'waw2': { - 'name': 'Warsaw 2', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-2', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-2' + "waw2": { + "name": "Warsaw 2", + "country": "PL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-2", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-2", }, - - 'waw3': { - 'name': 'Warsaw 3', - 'country': 'PL', - 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-3', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-3' + "waw3": { + "name": "Warsaw 3", + "country": "PL", + "api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-3", + "api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-3", }, } diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py index d672a7b5195..b0b41c7bd3e 100644 --- a/plugins/module_utils/snap.py +++ b/plugins/module_utils/snap.py @@ -8,17 +8,17 @@ _alias_state_map = dict( - present='alias', - absent='unalias', - info='aliases', + present="alias", + absent="unalias", + info="aliases", ) _state_map = dict( - present='install', - absent='remove', - enabled='enable', - disabled='disable', - refresh='refresh', + present="install", + absent="remove", + enabled="enable", + disabled="disable", + refresh="refresh", ) @@ -29,20 +29,20 @@ def snap_runner(module, **kwargs): arg_formats=dict( state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only name=cmd_runner_fmt.as_list(), - alias=cmd_runner_fmt.as_list(), # snap_alias only + alias=cmd_runner_fmt.as_list(), # snap_alias only state=cmd_runner_fmt.as_map(_state_map), _list=cmd_runner_fmt.as_fixed("list"), _set=cmd_runner_fmt.as_fixed("set"), get=cmd_runner_fmt.as_fixed(["get", "-d"]), classic=cmd_runner_fmt.as_bool("--classic"), - channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', f'{v}']), + channel=cmd_runner_fmt.as_func(lambda v: [] if v == "stable" else ["--channel", f"{v}"]), options=cmd_runner_fmt.as_list(), info=cmd_runner_fmt.as_fixed("info"), dangerous=cmd_runner_fmt.as_bool("--dangerous"), version=cmd_runner_fmt.as_fixed("version"), ), check_rc=False, - **kwargs + **kwargs, ) return runner diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index a3d3fa5f2fa..c2f08d20d95 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -12,7 +12,7 @@ class BitbucketHelper: - BITBUCKET_API_URL = 'https://api.bitbucket.org' + BITBUCKET_API_URL = "https://api.bitbucket.org" def __init__(self, module): self.module = module @@ -21,58 +21,66 @@ def __init__(self, module): @staticmethod def bitbucket_argument_spec(): return dict( - client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])), - client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])), + client_id=dict(type="str", fallback=(env_fallback, ["BITBUCKET_CLIENT_ID"])), + client_secret=dict(type="str", no_log=True, fallback=(env_fallback, ["BITBUCKET_CLIENT_SECRET"])), # TODO: # - Rename user to username once current usage of username is removed # - Alias user to username and deprecate it - user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])), - password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])), + user=dict(type="str", aliases=["username"], fallback=(env_fallback, ["BITBUCKET_USERNAME"])), + password=dict(type="str", no_log=True, fallback=(env_fallback, ["BITBUCKET_PASSWORD"])), ) @staticmethod def bitbucket_required_one_of(): - return [['client_id', 'client_secret', 'user', 'password']] + return [["client_id", "client_secret", "user", "password"]] @staticmethod def bitbucket_required_together(): - return [['client_id', 'client_secret'], ['user', 'password']] + return [["client_id", "client_secret"], ["user", "password"]] def fetch_access_token(self): - if self.module.params['client_id'] and self.module.params['client_secret']: + if self.module.params["client_id"] and self.module.params["client_secret"]: headers = { - 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']), + "Authorization": basic_auth_header( + self.module.params["client_id"], self.module.params["client_secret"] + ), } info, content = self.request( - api_url='https://bitbucket.org/site/oauth2/access_token', - method='POST', - data='grant_type=client_credentials', + api_url="https://bitbucket.org/site/oauth2/access_token", + method="POST", + data="grant_type=client_credentials", headers=headers, ) - if info['status'] == 200: - self.access_token = content['access_token'] + if info["status"] == 200: + self.access_token = content["access_token"] else: - self.module.fail_json(msg=f'Failed to retrieve access token: {info}') + self.module.fail_json(msg=f"Failed to retrieve access token: {info}") def request(self, api_url, method, data=None, headers=None): headers = headers or {} if self.access_token: - headers.update({ - 'Authorization': f'Bearer {self.access_token}', - }) - elif self.module.params['user'] and self.module.params['password']: - headers.update({ - 'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']), - }) + headers.update( + { + "Authorization": f"Bearer {self.access_token}", + } + ) + elif self.module.params["user"] and self.module.params["password"]: + headers.update( + { + "Authorization": basic_auth_header(self.module.params["user"], self.module.params["password"]), + } + ) if isinstance(data, dict): data = self.module.jsonify(data) - headers.update({ - 'Content-type': 'application/json', - }) + headers.update( + { + "Content-type": "application/json", + } + ) response, info = fetch_url( module=self.module, diff --git a/plugins/module_utils/ssh.py b/plugins/module_utils/ssh.py index 851efcbe86f..83a390e4dd3 100644 --- a/plugins/module_utils/ssh.py +++ b/plugins/module_utils/ssh.py @@ -13,7 +13,7 @@ def determine_config_file(user, config_file): if user: - config_file = os.path.join(os.path.expanduser(f'~{user}'), '.ssh', 'config') + config_file = os.path.join(os.path.expanduser(f"~{user}"), ".ssh", "config") elif config_file is None: - config_file = '/etc/ssh/ssh_config' + config_file = "/etc/ssh/ssh_config" return config_file diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index b6a4d304630..1e6aebaa290 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -6,8 +6,7 @@ emc_vnx_argument_spec = { - 'sp_address': dict(type='str', required=True), - 'sp_user': dict(type='str', required=False, default='sysadmin'), - 'sp_password': dict(type='str', required=False, default='sysadmin', - no_log=True), + "sp_address": dict(type="str", required=True), + "sp_user": dict(type="str", required=False, default="sysadmin"), + "sp_password": dict(type="str", required=False, default="sysadmin", no_log=True), } diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index da88db1ce6d..2aeb9c1b403 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -11,7 +11,7 @@ def convert_to_binary_multiple(size_with_unit): if size_with_unit is None: return -1 - valid_units = ['MiB', 'GiB', 'TiB'] + valid_units = ["MiB", "GiB", "TiB"] valid_unit = False for unit in valid_units: if size_with_unit.strip().endswith(unit): @@ -22,47 +22,24 @@ def convert_to_binary_multiple(size_with_unit): if not valid_unit: raise ValueError(f"{size_with_unit} does not have a valid unit. The unit must be one of {valid_units}") - size = size_with_unit.replace(" ", "").split('iB')[0] + size = size_with_unit.replace(" ", "").split("iB")[0] size_kib = basic.human_to_bytes(size) return int(size_kib / (1024 * 1024)) storage_system_spec = { - "storage_system_ip": { - "required": True, - "type": "str" - }, - "storage_system_username": { - "required": True, - "type": "str", - "no_log": True - }, - "storage_system_password": { - "required": True, - "type": "str", - "no_log": True - }, - "secure": { - "type": "bool", - "default": False - } + "storage_system_ip": {"required": True, "type": "str"}, + "storage_system_username": {"required": True, "type": "str", "no_log": True}, + "storage_system_password": {"required": True, "type": "str", "no_log": True}, + "secure": {"type": "bool", "default": False}, } def cpg_argument_spec(): spec = { - "state": { - "required": True, - "choices": ['present', 'absent'], - "type": 'str' - }, - "cpg_name": { - "required": True, - "type": "str" - }, - "domain": { - "type": "str" - }, + "state": {"required": True, "choices": ["present", "absent"], "type": "str"}, + "cpg_name": {"required": True, "type": "str"}, + "domain": {"type": "str"}, "growth_increment": { "type": "str", }, @@ -72,23 +49,10 @@ def cpg_argument_spec(): "growth_warning": { "type": "str", }, - "raid_type": { - "required": False, - "type": "str", - "choices": ['R0', 'R1', 'R5', 'R6'] - }, - "set_size": { - "required": False, - "type": "int" - }, - "high_availability": { - "type": "str", - "choices": ['PORT', 'CAGE', 'MAG'] - }, - "disk_type": { - "type": "str", - "choices": ['FC', 'NL', 'SSD'] - } + "raid_type": {"required": False, "type": "str", "choices": ["R0", "R1", "R5", "R6"]}, + "set_size": {"required": False, "type": "int"}, + "high_availability": {"type": "str", "choices": ["PORT", "CAGE", "MAG"]}, + "disk_type": {"type": "str", "choices": ["FC", "NL", "SSD"]}, } spec.update(storage_system_spec) return spec diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py index 00ce292febf..533ce6e729a 100644 --- a/plugins/module_utils/systemd.py +++ b/plugins/module_utils/systemd.py @@ -22,11 +22,5 @@ def systemd_runner(module, command, **kwargs): unit=cmd_runner_fmt.as_list(), ) - runner = CmdRunner( - module, - command=command, - arg_formats=arg_formats, - check_rc=True, - **kwargs - ) + runner = CmdRunner(module, command=command, arg_formats=arg_formats, check_rc=True, **kwargs) return runner diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index 5f72927db1c..7f8f58d8eca 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,3 @@ - # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -44,12 +43,12 @@ __all__ = [ - 'ldap_search', - 'config_registry', - 'base_dn', - 'uldap', - 'umc_module_for_add', - 'umc_module_for_edit', + "ldap_search", + "config_registry", + "base_dn", + "uldap", + "umc_module_for_add", + "umc_module_for_edit", ] @@ -58,6 +57,7 @@ def ldap_module(): import ldap as orig_ldap + return orig_ldap @@ -69,18 +69,18 @@ def _singleton(name: str, constructor): def config_registry(): - def construct(): import univention.config_registry + ucr = univention.config_registry.ConfigRegistry() ucr.load() return ucr - return _singleton('config_registry', construct) + return _singleton("config_registry", construct) def base_dn(): - return config_registry()['ldap/base'] + return config_registry()["ldap/base"] def uldap(): @@ -88,52 +88,59 @@ def uldap(): def construct(): try: - secret_file = open('/etc/ldap.secret', 'r') - bind_dn = f'cn=admin,{base_dn()}' + secret_file = open("/etc/ldap.secret", "r") + bind_dn = f"cn=admin,{base_dn()}" except IOError: # pragma: no cover - secret_file = open('/etc/machine.secret', 'r') + secret_file = open("/etc/machine.secret", "r") bind_dn = config_registry()["ldap/hostdn"] pwd_line = secret_file.readline() - pwd = re.sub('\n', '', pwd_line) + pwd = re.sub("\n", "", pwd_line) import univention.admin.uldap + return univention.admin.uldap.access( - host=config_registry()['ldap/master'], + host=config_registry()["ldap/master"], base=base_dn(), binddn=bind_dn, bindpw=pwd, start_tls=1, ) - return _singleton('uldap', construct) + return _singleton("uldap", construct) def config(): def construct(): import univention.admin.config + return univention.admin.config.config() - return _singleton('config', construct) + + return _singleton("config", construct) def init_modules(): def construct(): import univention.admin.modules + univention.admin.modules.update() return True - return _singleton('modules_initialized', construct) + + return _singleton("modules_initialized", construct) def position_base_dn(): def construct(): import univention.admin.uldap + return univention.admin.uldap.position(base_dn()) - return _singleton('position_base_dn', construct) + + return _singleton("position_base_dn", construct) def ldap_dn_tree_parent(dn, count=1): - dn_array = dn.split(',') + dn_array = dn.split(",") dn_array[0:count] = [] - return ','.join(dn_array) + return ",".join(dn_array) def ldap_search(filter, base=None, attr=None): @@ -142,12 +149,7 @@ def ldap_search(filter, base=None, attr=None): if base is None: base = base_dn() - msgid = uldap().lo.lo.search( - base, - ldap_module().SCOPE_SUBTREE, - filterstr=filter, - attrlist=attr - ) + msgid = uldap().lo.lo.search(base, ldap_module().SCOPE_SUBTREE, filterstr=filter, attrlist=attr) # I used to have a try: finally: here but there seems to be a bug in python # which swallows the KeyboardInterrupt # The abandon now doesn't make too much sense @@ -181,12 +183,13 @@ def module_by_name(module_name_): def construct(): import univention.admin.modules + init_modules() module = univention.admin.modules.get(module_name_) univention.admin.modules.init(uldap(), position_base_dn(), module) return module - return _singleton(f'module/{module_name_}', construct) + return _singleton(f"module/{module_name_}", construct) def get_umc_admin_objects(): @@ -196,6 +199,7 @@ def get_umc_admin_objects(): are not loaded until this function is called. """ import univention.admin + return univention.admin.objects @@ -226,14 +230,14 @@ def umc_module_for_add(module, container_dn, superordinate=None): def umc_module_for_edit(module, object_dn, superordinate=None): """Returns an UMC module object prepared for editing an existing entry. - The module is a module specification according to the udm commandline. - Example values are: - * users/user - * shares/share - * groups/group + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group - The object_dn MUST be the dn of the object itself, not the container! - """ + The object_dn MUST be the dn of the object itself, not the container! + """ mod = module_by_name(module) objects = get_umc_admin_objects() @@ -241,14 +245,7 @@ def umc_module_for_edit(module, object_dn, superordinate=None): position = position_base_dn() position.setDn(ldap_dn_tree_parent(object_dn)) - obj = objects.get( - mod, - config(), - uldap(), - position=position, - superordinate=superordinate, - dn=object_dn - ) + obj = objects.get(mod, config(), uldap(), position=position, superordinate=superordinate, dn=object_dn) obj.open() return obj @@ -257,21 +254,16 @@ def umc_module_for_edit(module, object_dn, superordinate=None): def create_containers_and_parents(container_dn): """Create a container and if needed the parents containers""" import univention.admin.uexceptions as uexcp + if not container_dn.startswith("cn="): raise AssertionError() try: parent = ldap_dn_tree_parent(container_dn) - obj = umc_module_for_add( - 'container/cn', - parent - ) - obj['name'] = container_dn.split(',')[0].split('=')[1] - obj['description'] = "container created by import" + obj = umc_module_for_add("container/cn", parent) + obj["name"] = container_dn.split(",")[0].split("=")[1] + obj["description"] = "container created by import" except uexcp.ldapError: create_containers_and_parents(parent) - obj = umc_module_for_add( - 'container/cn', - parent - ) - obj['name'] = container_dn.split(',')[0].split('=')[1] - obj['description'] = "container created by import" + obj = umc_module_for_add("container/cn", parent) + obj["name"] = container_dn.split(",")[0].split("=")[1] + obj["description"] = "container created by import" diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 89ae06693a4..496d05840d1 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -19,7 +19,6 @@ class UTMModuleConfigurationError(Exception): - def __init__(self, msg, **args): super().__init__(self, msg) self.msg = msg @@ -37,21 +36,38 @@ class UTMModule(AnsibleModule): See the other modules like utm_aaa_group for example. """ - def __init__(self, argument_spec, bypass_checks=False, no_log=False, - mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, - supports_check_mode=False, required_if=None): + def __init__( + self, + argument_spec, + bypass_checks=False, + no_log=False, + mutually_exclusive=None, + required_together=None, + required_one_of=None, + add_file_common_args=False, + supports_check_mode=False, + required_if=None, + ): default_specs = dict( - headers=dict(type='dict', required=False, default={}), - utm_host=dict(type='str', required=True), - utm_port=dict(type='int', default=4444), - utm_token=dict(type='str', required=True, no_log=True), - utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]), - validate_certs=dict(type='bool', required=False, default=True), - state=dict(default='present', choices=['present', 'absent']) + headers=dict(type="dict", required=False, default={}), + utm_host=dict(type="str", required=True), + utm_port=dict(type="int", default=4444), + utm_token=dict(type="str", required=True, no_log=True), + utm_protocol=dict(type="str", required=False, default="https", choices=["https", "http"]), + validate_certs=dict(type="bool", required=False, default=True), + state=dict(default="present", choices=["present", "absent"]), + ) + super().__init__( + self._merge_specs(default_specs, argument_spec), + bypass_checks, + no_log, + mutually_exclusive, + required_together, + required_one_of, + add_file_common_args, + supports_check_mode, + required_if, ) - super().__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log, - mutually_exclusive, required_together, required_one_of, - add_file_common_args, supports_check_mode, required_if) def _merge_specs(self, default_specs, custom_specs): result = default_specs.copy() @@ -60,7 +76,6 @@ def _merge_specs(self, default_specs, custom_specs): class UTM: - def __init__(self, module, endpoint, change_relevant_keys, info_only=False): """ Initialize UTM Class @@ -71,16 +86,14 @@ def __init__(self, module, endpoint, change_relevant_keys, info_only=False): """ self.info_only = info_only self.module = module - self.request_url = ( - f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/" - ) + self.request_url = f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/" """ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated """ self.change_relevant_keys = change_relevant_keys - self.module.params['url_username'] = 'token' - self.module.params['url_password'] = module.params.get('utm_token') + self.module.params["url_username"] = "token" + self.module.params["url_password"] = module.params.get("utm_token") if all(elem in self.change_relevant_keys for elem in module.params.keys()): raise UTMModuleConfigurationError( f"The keys {self.change_relevant_keys} to check are not in the modules keys:\n{list(module.params.keys())}" @@ -89,9 +102,9 @@ def __init__(self, module, endpoint, change_relevant_keys, info_only=False): def execute(self): try: if not self.info_only: - if self.module.params.get('state') == 'present': + if self.module.params.get("state") == "present": self._add() - elif self.module.params.get('state') == 'absent': + elif self.module.params.get("state") == "absent": self._remove() else: self._info() @@ -125,19 +138,23 @@ def _add(self): else: data_as_json_string = self.module.jsonify(self.module.params) if result is None: - response, info = fetch_url(self.module, self.request_url, method="POST", - headers=combined_headers, - data=data_as_json_string) + response, info = fetch_url( + self.module, self.request_url, method="POST", headers=combined_headers, data=data_as_json_string + ) if info["status"] >= 400: self.module.fail_json(msg=json.loads(info["body"])) is_changed = True result = self._clean_result(json.loads(response.read())) else: if self._is_object_changed(self.change_relevant_keys, self.module, result): - response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT", - headers=combined_headers, - data=data_as_json_string) - if info['status'] >= 400: + response, info = fetch_url( + self.module, + self.request_url + result["_ref"], + method="PUT", + headers=combined_headers, + data=data_as_json_string, + ) + if info["status"] >= 400: self.module.fail_json(msg=json.loads(info["body"])) is_changed = True result = self._clean_result(json.loads(response.read())) @@ -149,9 +166,9 @@ def _combine_headers(self): :return: A combined headers dict """ default_headers = {"Accept": "application/json", "Content-type": "application/json"} - if self.module.params.get('headers') is not None: + if self.module.params.get("headers") is not None: result = default_headers.copy() - result.update(self.module.params.get('headers')) + result.update(self.module.params.get("headers")) else: result = default_headers return result @@ -163,9 +180,13 @@ def _remove(self): is_changed = False info, result = self._lookup_entry(self.module, self.request_url) if result is not None: - response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE", - headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"}, - data=self.module.jsonify(self.module.params)) + response, info = fetch_url( + self.module, + self.request_url + result["_ref"], + method="DELETE", + headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"}, + data=self.module.jsonify(self.module.params), + ) if info["status"] >= 400: self.module.fail_json(msg=json.loads(info["body"])) else: @@ -183,7 +204,7 @@ def _lookup_entry(self, module, request_url): result = None if response is not None: results = json.loads(response.read()) - result = next((d for d in results if d['name'] == module.params.get('name')), None) + result = next((d for d in results if d["name"] == module.params.get("name")), None) return info, result def _clean_result(self, result): @@ -192,14 +213,14 @@ def _clean_result(self, result): :param result: The result from the query :return: The modified result """ - del result['utm_host'] - del result['utm_port'] - del result['utm_token'] - del result['utm_protocol'] - del result['validate_certs'] - del result['url_username'] - del result['url_password'] - del result['state'] + del result["utm_host"] + del result["utm_port"] + del result["utm_token"] + del result["utm_protocol"] + del result["validate_certs"] + del result["url_username"] + del result["url_password"] + del result["state"] return result def _is_object_changed(self, keys, module, result): diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py index cb94408d48b..195ec4d8479 100644 --- a/plugins/module_utils/vardict.py +++ b/plugins/module_utils/vardict.py @@ -94,7 +94,7 @@ def has_changed(self): @property def diff_result(self): if self.diff and self.has_changed: - return {'before': self.initial_value, 'after': self.value} + return {"before": self.initial_value, "after": self.value} return def __str__(self): @@ -105,7 +105,19 @@ def __str__(self): class VarDict: - reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict') + reserved_names = ( + "__vars__", + "_var", + "var", + "set_meta", + "get_meta", + "set", + "output", + "diff", + "facts", + "has_changed", + "as_dict", + ) def __init__(self): self.__vars__ = dict() @@ -123,7 +135,7 @@ def __getattr__(self, item): return getattr(super(), item) def __setattr__(self, key, value): - if key == '__vars__': + if key == "__vars__": super().__setattr__(key, value) else: self.set(key, value) @@ -177,11 +189,13 @@ def output(self, verbosity=0): return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} def diff(self, verbosity=0): - diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] + diff_results = [ + (n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity) + ] if diff_results: - before = {n: dr['before'] for n, dr in diff_results} - after = {n: dr['after'] for n, dr in diff_results} - return {'before': before, 'after': after} + before = {n: dr["before"] for n, dr in diff_results} + after = {n: dr["after"] for n, dr in diff_results} + return {"before": before, "after": after} return None def facts(self, verbosity=0): diff --git a/plugins/module_utils/version.py b/plugins/module_utils/version.py index 18cd6d12fe8..496800057c8 100644 --- a/plugins/module_utils/version.py +++ b/plugins/module_utils/version.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index ed0b11480c2..1ea0ecf17c4 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -19,76 +19,71 @@ def get_version(iocs_json): if not iocs_json: - raise Exception('Invalid IOC json') - active = next((x for x in iocs_json if x['mgmtRole']), None) + raise Exception("Invalid IOC json") + active = next((x for x in iocs_json if x["mgmtRole"]), None) if active is None: - raise Exception('Unable to detect active IOC') - ver = active['swVersion'] - if ver[0] != 'v': - raise Exception('Illegal version string') - ver = ver[1:ver.find('-')] - ver = map(int, ver.split('.')) + raise Exception("Unable to detect active IOC") + ver = active["swVersion"] + if ver[0] != "v": + raise Exception("Illegal version string") + ver = ver[1 : ver.find("-")] + ver = map(int, ver.split(".")) return tuple(ver) def get_array(module): """Return storage array object or fail""" global VXOS_VERSION - array = module.params['array'] - user = module.params.get('user', None) - password = module.params.get('password', None) - validate = module.params.get('validate_certs') + array = module.params["array"] + user = module.params.get("user", None) + password = module.params.get("password", None) + validate = module.params.get("validate_certs") if not HAS_VEXATAPI: - module.fail_json(msg='vexatapi library is required for this module. ' - 'To install, use `pip install vexatapi`') + module.fail_json(msg="vexatapi library is required for this module. To install, use `pip install vexatapi`") if user and password: system = VexataAPIProxy(array, user, password, verify_cert=validate) else: - module.fail_json(msg='The user/password are required to be passed in to ' - 'the module as arguments or by setting the ' - 'VEXATA_USER and VEXATA_PASSWORD environment variables.') + module.fail_json( + msg="The user/password are required to be passed in to " + "the module as arguments or by setting the " + "VEXATA_USER and VEXATA_PASSWORD environment variables." + ) try: if system.test_connection(): VXOS_VERSION = get_version(system.iocs()) return system else: - module.fail_json(msg='Test connection to array failed.') + module.fail_json(msg="Test connection to array failed.") except Exception as e: - module.fail_json(msg=f'Vexata API access failed: {e}') + module.fail_json(msg=f"Vexata API access failed: {e}") def argument_spec(): """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" return dict( - array=dict(type='str', - required=True), - user=dict(type='str', - fallback=(env_fallback, ['VEXATA_USER'])), - password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['VEXATA_PASSWORD'])), - validate_certs=dict(type='bool', - required=False, - default=False), + array=dict(type="str", required=True), + user=dict(type="str", fallback=(env_fallback, ["VEXATA_USER"])), + password=dict(type="str", no_log=True, fallback=(env_fallback, ["VEXATA_PASSWORD"])), + validate_certs=dict(type="bool", required=False, default=False), ) def required_together(): """Return the default list used for the required_together argument to AnsibleModule""" - return [['user', 'password']] + return [["user", "password"]] def size_to_MiB(size): """Convert a '[MGT]' string to MiB, return -1 on error.""" quant = size[:-1] exponent = size[-1] - if not quant.isdigit() or exponent not in 'MGT': + if not quant.isdigit() or exponent not in "MGT": return -1 quant = int(quant) - if exponent == 'G': + if exponent == "G": quant <<= 10 - elif exponent == 'T': + elif exponent == "T": quant <<= 20 return quant diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py index 2d5a0db7a81..f27102d61b1 100644 --- a/plugins/module_utils/wdc_redfish_utils.py +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -1,4 +1,3 @@ - # Copyright (c) 2022 Western Digital Corporation # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,6 +17,7 @@ class WdcRedfishUtils(RedfishUtils): """Extension to RedfishUtils to support WDC enclosures.""" + # Status codes returned by WDC FW Update Status UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE = 0 UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS = 1 @@ -41,19 +41,15 @@ class WdcRedfishUtils(RedfishUtils): CHASSIS_LOCATE = "#Chassis.Locate" CHASSIS_POWER_MODE = "#Chassis.PowerMode" - def __init__(self, - creds, - root_uris, - timeout, - module, - resource_id, - data_modification): - super().__init__(creds=creds, - root_uri=root_uris[0], - timeout=timeout, - module=module, - resource_id=resource_id, - data_modification=data_modification) + def __init__(self, creds, root_uris, timeout, module, resource_id, data_modification): + super().__init__( + creds=creds, + root_uri=root_uris[0], + timeout=timeout, + module=module, + resource_id=resource_id, + data_modification=data_modification, + ) # Update the root URI if we cannot perform a Redfish GET to the first one self._set_root_uri(root_uris) @@ -66,14 +62,14 @@ def _set_root_uri(self, root_uris): for root_uri in root_uris: uri = f"{root_uri}/redfish/v1" response = self.get_request(uri) - if response['ret']: + if response["ret"]: self.root_uri = root_uri break def _find_updateservice_resource(self): """Find the update service resource as well as additional WDC-specific resources.""" response = super()._find_updateservice_resource() - if not response['ret']: + if not response["ret"]: return response return self._find_updateservice_additional_uris() @@ -87,47 +83,47 @@ def _is_enclosure_multi_tenant_and_fetch_gen(self): None if unable to determine. """ response = self.get_request(f"{self.root_uri}{self.service_root}Chassis/Enclosure") - if response['ret'] is False: + if response["ret"] is False: return None pattern = r".*-[A,B]" - data = response['data'] - if 'EnclVersion' not in data: - enc_version = 'G1' + data = response["data"] + if "EnclVersion" not in data: + enc_version = "G1" else: - enc_version = data['EnclVersion'] - return re.match(pattern, data['SerialNumber']) is not None, enc_version + enc_version = data["EnclVersion"] + return re.match(pattern, data["SerialNumber"]) is not None, enc_version def _find_updateservice_additional_uris(self): """Find & set WDC-specific update service URIs""" response = self.get_request(self.root_uri + self._update_uri()) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Actions' not in data: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - if '#UpdateService.SimpleUpdate' not in data['Actions']: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - action = data['Actions']['#UpdateService.SimpleUpdate'] - if 'target' not in action: - return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} - self.simple_update_uri = action['target'] + data = response["data"] + if "Actions" not in data: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + if "#UpdateService.SimpleUpdate" not in data["Actions"]: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + action = data["Actions"]["#UpdateService.SimpleUpdate"] + if "target" not in action: + return {"ret": False, "msg": "Service does not support SimpleUpdate"} + self.simple_update_uri = action["target"] # Simple update status URI is not provided via GET /redfish/v1/UpdateService # So we have to hard code it. self.simple_update_status_uri = f"{self.simple_update_uri}/Status" # FWActivate URI - if 'Oem' not in data['Actions']: - return {'ret': False, 'msg': 'Service does not support OEM operations'} - if 'WDC' not in data['Actions']['Oem']: - return {'ret': False, 'msg': 'Service does not support WDC operations'} - if '#UpdateService.FWActivate' not in data['Actions']['Oem']['WDC']: - return {'ret': False, 'msg': 'Service does not support FWActivate'} - action = data['Actions']['Oem']['WDC']['#UpdateService.FWActivate'] - if 'target' not in action: - return {'ret': False, 'msg': 'Service does not support FWActivate'} - self.firmware_activate_uri = action['target'] - return {'ret': True} + if "Oem" not in data["Actions"]: + return {"ret": False, "msg": "Service does not support OEM operations"} + if "WDC" not in data["Actions"]["Oem"]: + return {"ret": False, "msg": "Service does not support WDC operations"} + if "#UpdateService.FWActivate" not in data["Actions"]["Oem"]["WDC"]: + return {"ret": False, "msg": "Service does not support FWActivate"} + action = data["Actions"]["Oem"]["WDC"]["#UpdateService.FWActivate"] + if "target" not in action: + return {"ret": False, "msg": "Service does not support FWActivate"} + self.firmware_activate_uri = action["target"] + return {"ret": True} def _simple_update_status_uri(self): return self.simple_update_status_uri @@ -142,39 +138,37 @@ def get_simple_update_status(self): """Issue Redfish HTTP GET to return the simple update status""" result = {} response = self.get_request(self.root_uri + self._simple_update_status_uri()) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] - result['entries'] = data + result["ret"] = True + data = response["data"] + result["entries"] = data return result def firmware_activate(self, update_opts): """Perform FWActivate using Redfish HTTP API.""" - creds = update_opts.get('update_creds') + creds = update_opts.get("update_creds") payload = {} if creds: - if creds.get('username'): - payload["Username"] = creds.get('username') - if creds.get('password'): - payload["Password"] = creds.get('password') + if creds.get("username"): + payload["Username"] = creds.get("username") + if creds.get("password"): + payload["Password"] = creds.get("password") # Make sure the service supports FWActivate response = self.get_request(self.root_uri + self._update_uri()) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'Actions' not in data: - return {'ret': False, 'msg': 'Service does not support FWActivate'} + data = response["data"] + if "Actions" not in data: + return {"ret": False, "msg": "Service does not support FWActivate"} response = self.post_request(self.root_uri + self._firmware_activate_uri(), payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "FWActivate requested"} + return {"ret": True, "changed": True, "msg": "FWActivate requested"} - def _get_bundle_version(self, - bundle_uri): + def _get_bundle_version(self, bundle_uri): """Get the firmware version from a bundle file, and whether or not it is multi-tenant. Only supports HTTP at this time. Assumes URI exists and is a tarfile. @@ -192,8 +186,7 @@ def _get_bundle_version(self, and bundle generation. Either value will be None if unable to determine. :rtype: str or None, bool or None """ - bundle_temp_filename = fetch_file(module=self.module, - url=bundle_uri) + bundle_temp_filename = fetch_file(module=self.module, url=bundle_uri) bundle_version = None is_multi_tenant = None gen = None @@ -210,9 +203,9 @@ def _get_bundle_version(self, # It is anticipated that DP firmware bundle will be having the value "DPG2" # for cookie1 in the header if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": - file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + file_name, ext = os.path.splitext(str(bundle_uri.rsplit("/", 1)[1])) # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 - parsedFileName = file_name.split('_') + parsedFileName = file_name.split("_") if len(parsedFileName) == 5: bundle_version = parsedFileName[4] # MM G2 is always single tanant @@ -237,7 +230,7 @@ def _get_bundle_version(self, bin_file = tf.extractfile(bin_filename) bin_file.seek(11) byte_11 = bin_file.read(1) - is_multi_tenant = byte_11 == b'\x80' + is_multi_tenant = byte_11 == b"\x80" gen = "G1" return bundle_version, is_multi_tenant, gen @@ -251,7 +244,7 @@ def uri_is_http(uri): :rtype: bool """ parsed_bundle_uri = urlparse(uri) - return parsed_bundle_uri.scheme.lower() in ['http', 'https'] + return parsed_bundle_uri.scheme.lower() in ["http", "https"] def update_and_activate(self, update_opts): """Update and activate the firmware in a single action. @@ -262,12 +255,18 @@ def update_and_activate(self, update_opts): """ # Convert credentials to standard HTTP format - if update_opts.get("update_creds") is not None and "username" in update_opts["update_creds"] and "password" in update_opts["update_creds"]: + if ( + update_opts.get("update_creds") is not None + and "username" in update_opts["update_creds"] + and "password" in update_opts["update_creds"] + ): update_creds = update_opts["update_creds"] parsed_url = urlparse(update_opts["update_image_uri"]) if update_creds: original_netloc = parsed_url.netloc - parsed_url = parsed_url._replace(netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}") + parsed_url = parsed_url._replace( + netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}" + ) update_opts["update_image_uri"] = urlunparse(parsed_url) del update_opts["update_creds"] @@ -275,24 +274,19 @@ def update_and_activate(self, update_opts): bundle_uri = update_opts["update_image_uri"] if not self.uri_is_http(bundle_uri): - return { - 'ret': False, - 'msg': 'Bundle URI must be HTTP or HTTPS' - } + return {"ret": False, "msg": "Bundle URI must be HTTP or HTTPS"} # Make sure IOM is ready for update result = self.get_simple_update_status() - if result['ret'] is False: + if result["ret"] is False: return result - update_status = result['entries'] - status_code = update_status['StatusCode'] - status_description = update_status['Description'] - if status_code not in [ - self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, - self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED - ]: + update_status = result["entries"] + status_code = update_status["StatusCode"] + status_description = update_status["Description"] + if status_code not in [self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED]: return { - 'ret': False, - 'msg': f'Target is not ready for FW update. Current status: {status_code} ({status_description})'} + "ret": False, + "msg": f"Target is not ready for FW update. Current status: {status_code} ({status_description})", + } # Check the FW version in the bundle file, and compare it to what is already on the IOMs @@ -300,8 +294,8 @@ def update_and_activate(self, update_opts): bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: return { - 'ret': False, - 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' + "ret": False, + "msg": "Unable to extract bundle version or multi-tenant status or generation from update image file", } is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() @@ -309,16 +303,13 @@ def update_and_activate(self, update_opts): # Verify that the bundle is correctly multi-tenant or not if is_enclosure_multi_tenant != is_bundle_multi_tenant: return { - 'ret': False, - 'msg': f'Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}' + "ret": False, + "msg": f"Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}", } # Verify that the bundle is compliant with the target enclosure if enclosure_gen != bundle_gen: - return { - 'ret': False, - 'msg': f'Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}' - } + return {"ret": False, "msg": f"Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}"} # Version number installed on IOMs firmware_inventory = self.get_firmware_inventory() @@ -334,27 +325,22 @@ def update_and_activate(self, update_opts): if is_enclosure_multi_tenant: # For multi-tenant, only one of the IOMs will be affected by the firmware update, # so see if that IOM already has the same firmware version as the bundle. - firmware_already_installed = bundle_firmware_version == self._get_installed_firmware_version_of_multi_tenant_system( - iom_a_firmware_version, - iom_b_firmware_version) + firmware_already_installed = ( + bundle_firmware_version + == self._get_installed_firmware_version_of_multi_tenant_system( + iom_a_firmware_version, iom_b_firmware_version + ) + ) else: # For single-tenant, see if both IOMs already have the same firmware version as the bundle. firmware_already_installed = bundle_firmware_version == iom_a_firmware_version == iom_b_firmware_version # If this FW already installed, return changed: False, and do not update the firmware. if firmware_already_installed: - return { - 'ret': True, - 'changed': False, - 'msg': f'Version {bundle_firmware_version} already installed' - } + return {"ret": True, "changed": False, "msg": f"Version {bundle_firmware_version} already installed"} # Version numbers don't match the bundle -- proceed with update (unless we are in check mode) if self.module.check_mode: - return { - 'ret': True, - 'changed': True, - 'msg': 'Update not performed in check mode.' - } + return {"ret": True, "changed": True, "msg": "Update not performed in check mode."} update_successful = False retry_interval_seconds = 5 max_number_of_retries = 5 @@ -365,14 +351,14 @@ def update_and_activate(self, update_opts): retry_number += 1 result = self.simple_update(update_opts) - if result['ret'] is not True: + if result["ret"] is not True: # Sometimes a timeout error is returned even though the update actually was requested. # Check the update status to see if the update is in progress. status_result = self.get_simple_update_status() - if status_result['ret'] is False: + if status_result["ret"] is False: continue - update_status = status_result['entries'] - status_code = update_status['StatusCode'] + update_status = status_result["entries"] + status_code = update_status["StatusCode"] if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: # Update is not in progress -- retry until max number of retries continue @@ -393,20 +379,22 @@ def update_and_activate(self, update_opts): # to "update in progress" status_codes_for_update_incomplete = [ self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS, - self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE + self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, ] iteration = 0 - while status_code in status_codes_for_update_incomplete \ - and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes): + while ( + status_code in status_codes_for_update_incomplete + and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes) + ): if iteration != 0: time.sleep(polling_interval_seconds) iteration += 1 result = self.get_simple_update_status() - if result['ret'] is False: + if result["ret"] is False: continue # We may get timeouts, just keep trying until we give up - update_status = result['entries'] - status_code = update_status['StatusCode'] - status_description = update_status['Description'] + update_status = result["entries"] + status_code = update_status["StatusCode"] + status_description = update_status["Description"] if status_code == self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: # Once it says update in progress, "ready for update" is no longer a valid status code status_codes_for_update_incomplete = [self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS] @@ -414,16 +402,14 @@ def update_and_activate(self, update_opts): # Update no longer in progress -- verify that it finished if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION: return { - 'ret': False, - 'msg': f'Target is not ready for FW activation after update. Current status: {status_code} ({status_description})'} + "ret": False, + "msg": f"Target is not ready for FW activation after update. Current status: {status_code} ({status_description})", + } self.firmware_activate(update_opts) - return {'ret': True, 'changed': True, - 'msg': "Firmware updated and activation initiated."} + return {"ret": True, "changed": True, "msg": "Firmware updated and activation initiated."} - def _get_installed_firmware_version_of_multi_tenant_system(self, - iom_a_firmware_version, - iom_b_firmware_version): + def _get_installed_firmware_version_of_multi_tenant_system(self, iom_a_firmware_version, iom_b_firmware_version): """Return the version for the active IOM on a multi-tenant system. Only call this on a multi-tenant system. @@ -434,18 +420,18 @@ def _get_installed_firmware_version_of_multi_tenant_system(self, # The one we are on will return valid data. # The other will return an error with message "IOM Module A/B cannot be read" which_iom_is_this = None - for iom_letter in ['A', 'B']: + for iom_letter in ["A", "B"]: iom_uri = f"Chassis/IOModule{iom_letter}FRU" response = self.get_request(self.root_uri + self.service_root + iom_uri) - if response['ret'] is False: + if response["ret"] is False: continue - data = response['data'] + data = response["data"] if "Id" in data: # Assume if there is an "Id", it is valid which_iom_is_this = iom_letter break - if which_iom_is_this == 'A': + if which_iom_is_this == "A": return iom_a_firmware_version - elif which_iom_is_this == 'B': + elif which_iom_is_this == "B": return iom_b_firmware_version else: return None @@ -459,11 +445,21 @@ def _get_led_locate_uri(data): return None if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: return None - if WdcRedfishUtils.CHASSIS_LOCATE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + if ( + WdcRedfishUtils.CHASSIS_LOCATE + not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC] + ): return None - if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE]: + if ( + WdcRedfishUtils.TARGET + not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][ + WdcRedfishUtils.CHASSIS_LOCATE + ] + ): return None - return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][WdcRedfishUtils.TARGET] + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][ + WdcRedfishUtils.TARGET + ] @staticmethod def _get_power_mode_uri(data): @@ -474,41 +470,51 @@ def _get_power_mode_uri(data): return None if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: return None - if WdcRedfishUtils.CHASSIS_POWER_MODE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + if ( + WdcRedfishUtils.CHASSIS_POWER_MODE + not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC] + ): return None - if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE]: + if ( + WdcRedfishUtils.TARGET + not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][ + WdcRedfishUtils.CHASSIS_POWER_MODE + ] + ): return None - return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE][WdcRedfishUtils.TARGET] + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][ + WdcRedfishUtils.CHASSIS_POWER_MODE + ][WdcRedfishUtils.TARGET] def manage_indicator_led(self, command, resource_uri): - key = 'IndicatorLED' + key = "IndicatorLED" - payloads = {'IndicatorLedOn': 'On', 'IndicatorLedOff': 'Off'} - current_led_status_map = {'IndicatorLedOn': 'Blinking', 'IndicatorLedOff': 'Off'} + payloads = {"IndicatorLedOn": "On", "IndicatorLedOff": "Off"} + current_led_status_map = {"IndicatorLedOn": "Blinking", "IndicatorLedOff": "Off"} result = {} response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} current_led_status = data[key] if current_led_status == current_led_status_map[command]: - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} led_locate_uri = self._get_led_locate_uri(data) if led_locate_uri is None: - return {'ret': False, 'msg': 'LED locate URI not found.'} + return {"ret": False, "msg": "LED locate URI not found."} if command in payloads.keys(): - payload = {'LocateState': payloads[command]} + payload = {"LocateState": payloads[command]} response = self.post_request(self.root_uri + led_locate_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response else: - return {'ret': False, 'msg': 'Invalid command'} + return {"ret": False, "msg": "Invalid command"} return result @@ -519,35 +525,38 @@ def manage_power_mode(self, command, resource_uri=None): if resource_uri is None: resource_uri = self.chassis_uri - payloads = {'PowerModeNormal': 'Normal', 'PowerModeLow': 'Low'} + payloads = {"PowerModeNormal": "Normal", "PowerModeLow": "Low"} requested_power_mode = payloads[command] result = {} response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] # Make sure the response includes Oem.WDC.PowerMode, and get current power mode - power_mode = 'PowerMode' - if WdcRedfishUtils.OEM not in data or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] or\ - power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: - return {'ret': False, 'msg': 'Resource does not support Oem.WDC.PowerMode'} + power_mode = "PowerMode" + if ( + WdcRedfishUtils.OEM not in data + or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] + or power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC] + ): + return {"ret": False, "msg": "Resource does not support Oem.WDC.PowerMode"} current_power_mode = data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][power_mode] if current_power_mode == requested_power_mode: - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} power_mode_uri = self._get_power_mode_uri(data) if power_mode_uri is None: - return {'ret': False, 'msg': 'Power Mode URI not found.'} + return {"ret": False, "msg": "Power Mode URI not found."} if command in payloads.keys(): - payload = {'PowerMode': payloads[command]} + payload = {"PowerMode": payloads[command]} response = self.post_request(self.root_uri + power_mode_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response else: - return {'ret': False, 'msg': 'Invalid command'} + return {"ret": False, "msg": "Invalid command"} return result diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py index d02002737bb..220d9f9391a 100644 --- a/plugins/module_utils/xdg_mime.py +++ b/plugins/module_utils/xdg_mime.py @@ -11,15 +11,15 @@ def xdg_mime_runner(module, **kwargs): return CmdRunner( module, - command=['xdg-mime'], + command=["xdg-mime"], arg_formats=dict( - default=cmd_runner_fmt.as_fixed('default'), - query=cmd_runner_fmt.as_fixed('query'), + default=cmd_runner_fmt.as_fixed("default"), + query=cmd_runner_fmt.as_fixed("query"), mime_types=cmd_runner_fmt.as_list(), handler=cmd_runner_fmt.as_list(), - version=cmd_runner_fmt.as_fixed('--version'), + version=cmd_runner_fmt.as_fixed("--version"), ), - **kwargs + **kwargs, ) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py index becc9bb1097..363fef0fbeb 100644 --- a/plugins/module_utils/xenserver.py +++ b/plugins/module_utils/xenserver.py @@ -13,6 +13,7 @@ XENAPI_IMP_ERR = None try: import XenAPI + HAS_XENAPI = True except ImportError: HAS_XENAPI = False @@ -24,22 +25,19 @@ def xenserver_common_argument_spec(): return dict( - hostname=dict(type='str', - aliases=['host', 'pool'], - default='localhost', - fallback=(env_fallback, ['XENSERVER_HOST']), - ), - username=dict(type='str', - aliases=['user', 'admin'], - default='root', - fallback=(env_fallback, ['XENSERVER_USER'])), - password=dict(type='str', - aliases=['pass', 'pwd'], - no_log=True, - fallback=(env_fallback, ['XENSERVER_PASSWORD'])), - validate_certs=dict(type='bool', - default=True, - fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), + hostname=dict( + type="str", + aliases=["host", "pool"], + default="localhost", + fallback=(env_fallback, ["XENSERVER_HOST"]), + ), + username=dict( + type="str", aliases=["user", "admin"], default="root", fallback=(env_fallback, ["XENSERVER_USER"]) + ), + password=dict( + type="str", aliases=["pass", "pwd"], no_log=True, fallback=(env_fallback, ["XENSERVER_PASSWORD"]) + ), + validate_certs=dict(type="bool", default=True, fallback=(env_fallback, ["XENSERVER_VALIDATE_CERTS"])), ) @@ -49,7 +47,7 @@ def xapi_to_module_vm_power_state(power_state): "running": "poweredon", "halted": "poweredoff", "suspended": "suspended", - "paused": "paused" + "paused": "paused", } return module_power_state_map.get(power_state) @@ -78,7 +76,7 @@ def is_valid_ip_addr(ip_addr): Returns: bool: True if string is valid IPv4 address, else False. """ - ip_addr_split = ip_addr.split('.') + ip_addr_split = ip_addr.split(".") if len(ip_addr_split) != 4: return False @@ -104,22 +102,24 @@ def is_valid_ip_netmask(ip_netmask): Returns: bool: True if string is valid IPv4 netmask, else False. """ - ip_netmask_split = ip_netmask.split('.') + ip_netmask_split = ip_netmask.split(".") if len(ip_netmask_split) != 4: return False - valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255'] + valid_octet_values = ["0", "128", "192", "224", "240", "248", "252", "254", "255"] for ip_netmask_octet in ip_netmask_split: if ip_netmask_octet not in valid_octet_values: return False - if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): + if ip_netmask_split[0] != "255" and ( + ip_netmask_split[1] != "0" or ip_netmask_split[2] != "0" or ip_netmask_split[3] != "0" + ): return False - elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): + elif ip_netmask_split[1] != "255" and (ip_netmask_split[2] != "0" or ip_netmask_split[3] != "0"): return False - elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0': + elif ip_netmask_split[2] != "255" and ip_netmask_split[3] != "0": return False return True @@ -163,7 +163,7 @@ def ip_prefix_to_netmask(ip_prefix, skip_check=False): ip_prefix_valid = is_valid_ip_prefix(ip_prefix) if ip_prefix_valid: - return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]]) + return ".".join([str((0xFFFFFFFF << (32 - int(ip_prefix)) >> i) & 0xFF) for i in [24, 16, 8, 0]]) else: return "" @@ -201,7 +201,7 @@ def is_valid_ip6_addr(ip6_addr): bool: True if string is valid IPv6 address, else False. """ ip6_addr = ip6_addr.lower() - ip6_addr_split = ip6_addr.split(':') + ip6_addr_split = ip6_addr.split(":") if ip6_addr_split[0] == "": ip6_addr_split.pop(0) @@ -220,7 +220,7 @@ def is_valid_ip6_addr(ip6_addr): if len(ip6_addr_split) != 8: return False - ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$') + ip6_addr_hextet_regex = re.compile("^[0-9a-f]{1,4}$") for ip6_addr_hextet in ip6_addr_split: if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)): @@ -337,63 +337,67 @@ def gather_vm_params(module, vm_ref): # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced. # Affinity. - if vm_params['affinity'] != "OpaqueRef:NULL": - vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity']) - vm_params['affinity'] = vm_affinity + if vm_params["affinity"] != "OpaqueRef:NULL": + vm_affinity = xapi_session.xenapi.host.get_record(vm_params["affinity"]) + vm_params["affinity"] = vm_affinity else: - vm_params['affinity'] = {} + vm_params["affinity"] = {} # VBDs. - vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']] + vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params["VBDs"]] # List of VBDs is usually sorted by userdevice but we sort just # in case. We need this list sorted by userdevice so that we can # make positional pairing with module.params['disks']. - vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice'])) - vm_params['VBDs'] = vm_vbd_params_list + vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params["userdevice"])) + vm_params["VBDs"] = vm_vbd_params_list # VDIs. - for vm_vbd_params in vm_params['VBDs']: - if vm_vbd_params['VDI'] != "OpaqueRef:NULL": - vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI']) + for vm_vbd_params in vm_params["VBDs"]: + if vm_vbd_params["VDI"] != "OpaqueRef:NULL": + vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params["VDI"]) else: vm_vdi_params = {} - vm_vbd_params['VDI'] = vm_vdi_params + vm_vbd_params["VDI"] = vm_vdi_params # VIFs. - vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']] + vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params["VIFs"]] # List of VIFs is usually sorted by device but we sort just # in case. We need this list sorted by device so that we can # make positional pairing with module.params['networks']. - vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device'])) - vm_params['VIFs'] = vm_vif_params_list + vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params["device"])) + vm_params["VIFs"] = vm_vif_params_list # Networks. - for vm_vif_params in vm_params['VIFs']: - if vm_vif_params['network'] != "OpaqueRef:NULL": - vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network']) + for vm_vif_params in vm_params["VIFs"]: + if vm_vif_params["network"] != "OpaqueRef:NULL": + vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params["network"]) else: vm_network_params = {} - vm_vif_params['network'] = vm_network_params + vm_vif_params["network"] = vm_network_params # Guest metrics. - if vm_params['guest_metrics'] != "OpaqueRef:NULL": - vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics']) - vm_params['guest_metrics'] = vm_guest_metrics + if vm_params["guest_metrics"] != "OpaqueRef:NULL": + vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params["guest_metrics"]) + vm_params["guest_metrics"] = vm_guest_metrics else: - vm_params['guest_metrics'] = {} + vm_params["guest_metrics"] = {} # Detect customization agent. xenserver_version = get_xenserver_version(module) - if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and - "feature-static-ip-setting" in vm_params['guest_metrics']['other']): - vm_params['customization_agent'] = "native" + if ( + xenserver_version[0] >= 7 + and xenserver_version[1] >= 0 + and vm_params.get("guest_metrics") + and "feature-static-ip-setting" in vm_params["guest_metrics"]["other"] + ): + vm_params["customization_agent"] = "native" else: - vm_params['customization_agent'] = "custom" + vm_params["customization_agent"] = "custom" except XenAPI.Failure as f: module.fail_json(msg=f"XAPI ERROR: {f.details}") @@ -420,88 +424,90 @@ def gather_vm_facts(module, vm_params): # Gather facts. vm_facts = { - "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()), - "name": vm_params['name_label'], - "name_desc": vm_params['name_description'], - "uuid": vm_params['uuid'], - "is_template": vm_params['is_a_template'], - "folder": vm_params['other_config'].get('folder', ''), + "state": xapi_to_module_vm_power_state(vm_params["power_state"].lower()), + "name": vm_params["name_label"], + "name_desc": vm_params["name_description"], + "uuid": vm_params["uuid"], + "is_template": vm_params["is_a_template"], + "folder": vm_params["other_config"].get("folder", ""), "hardware": { - "num_cpus": int(vm_params['VCPUs_max']), - "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')), - "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576), + "num_cpus": int(vm_params["VCPUs_max"]), + "num_cpu_cores_per_socket": int(vm_params["platform"].get("cores-per-socket", "1")), + "memory_mb": int(int(vm_params["memory_dynamic_max"]) / 1048576), }, "disks": [], "cdrom": {}, "networks": [], - "home_server": vm_params['affinity'].get('name_label', ''), - "domid": vm_params['domid'], - "platform": vm_params['platform'], - "other_config": vm_params['other_config'], - "xenstore_data": vm_params['xenstore_data'], - "customization_agent": vm_params['customization_agent'], + "home_server": vm_params["affinity"].get("name_label", ""), + "domid": vm_params["domid"], + "platform": vm_params["platform"], + "other_config": vm_params["other_config"], + "xenstore_data": vm_params["xenstore_data"], + "customization_agent": vm_params["customization_agent"], } - for vm_vbd_params in vm_params['VBDs']: - if vm_vbd_params['type'] == "Disk": - vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR']) + for vm_vbd_params in vm_params["VBDs"]: + if vm_vbd_params["type"] == "Disk": + vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params["VDI"]["SR"]) vm_disk_params = { - "size": int(vm_vbd_params['VDI']['virtual_size']), - "name": vm_vbd_params['VDI']['name_label'], - "name_desc": vm_vbd_params['VDI']['name_description'], - "sr": vm_disk_sr_params['name_label'], - "sr_uuid": vm_disk_sr_params['uuid'], - "os_device": vm_vbd_params['device'], - "vbd_userdevice": vm_vbd_params['userdevice'], + "size": int(vm_vbd_params["VDI"]["virtual_size"]), + "name": vm_vbd_params["VDI"]["name_label"], + "name_desc": vm_vbd_params["VDI"]["name_description"], + "sr": vm_disk_sr_params["name_label"], + "sr_uuid": vm_disk_sr_params["uuid"], + "os_device": vm_vbd_params["device"], + "vbd_userdevice": vm_vbd_params["userdevice"], } - vm_facts['disks'].append(vm_disk_params) - elif vm_vbd_params['type'] == "CD": - if vm_vbd_params['empty']: - vm_facts['cdrom'].update(type="none") + vm_facts["disks"].append(vm_disk_params) + elif vm_vbd_params["type"] == "CD": + if vm_vbd_params["empty"]: + vm_facts["cdrom"].update(type="none") else: - vm_facts['cdrom'].update(type="iso") - vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label']) + vm_facts["cdrom"].update(type="iso") + vm_facts["cdrom"].update(iso_name=vm_vbd_params["VDI"]["name_label"]) - for vm_vif_params in vm_params['VIFs']: - vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {}) + for vm_vif_params in vm_params["VIFs"]: + vm_guest_metrics_networks = vm_params["guest_metrics"].get("networks", {}) vm_network_params = { - "name": vm_vif_params['network']['name_label'], - "mac": vm_vif_params['MAC'], - "vif_device": vm_vif_params['device'], - "mtu": vm_vif_params['MTU'], - "ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ''), + "name": vm_vif_params["network"]["name_label"], + "mac": vm_vif_params["MAC"], + "vif_device": vm_vif_params["device"], + "mtu": vm_vif_params["MTU"], + "ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ""), "prefix": "", "netmask": "", "gateway": "", - "ip6": [vm_guest_metrics_networks[ipv6] - for ipv6 in sorted(vm_guest_metrics_networks.keys()) - if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")], + "ip6": [ + vm_guest_metrics_networks[ipv6] + for ipv6 in sorted(vm_guest_metrics_networks.keys()) + if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/") + ], "prefix6": "", "gateway6": "", } - if vm_params['customization_agent'] == "native": - if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: - vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1] - vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix']) + if vm_params["customization_agent"] == "native": + if vm_vif_params["ipv4_addresses"] and vm_vif_params["ipv4_addresses"][0]: + vm_network_params["prefix"] = vm_vif_params["ipv4_addresses"][0].split("/")[1] + vm_network_params["netmask"] = ip_prefix_to_netmask(vm_network_params["prefix"]) - vm_network_params['gateway'] = vm_vif_params['ipv4_gateway'] + vm_network_params["gateway"] = vm_vif_params["ipv4_gateway"] - if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: - vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1] + if vm_vif_params["ipv6_addresses"] and vm_vif_params["ipv6_addresses"][0]: + vm_network_params["prefix6"] = vm_vif_params["ipv6_addresses"][0].split("/")[1] - vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway'] + vm_network_params["gateway6"] = vm_vif_params["ipv6_gateway"] - elif vm_params['customization_agent'] == "custom": - vm_xenstore_data = vm_params['xenstore_data'] + elif vm_params["customization_agent"] == "custom": + vm_xenstore_data = vm_params["xenstore_data"] - for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: + for f in ["prefix", "netmask", "gateway", "prefix6", "gateway6"]: vm_network_params[f] = vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/{f}", "") - vm_facts['networks'].append(vm_network_params) + vm_facts["networks"].append(vm_network_params) return vm_facts @@ -535,7 +541,7 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): xapi_session = XAPI.connect(module) - power_state = power_state.replace('_', '').replace('-', '').lower() + power_state = power_state.replace("_", "").replace("-", "").lower() vm_power_state_resulting = module_to_xapi_vm_power_state(power_state) state_changed = False @@ -697,7 +703,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): # consistent with module VM power states. vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) - if vm_power_state != 'poweredon': + if vm_power_state != "poweredon": module.fail_json(msg=f"Cannot wait for VM IP address when VM is in state '{vm_power_state}'!") interval = 2 @@ -714,7 +720,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): if vm_guest_metrics_ref != "OpaqueRef:NULL": vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref) - vm_ips = vm_guest_metrics['networks'] + vm_ips = vm_guest_metrics["networks"] if "0/ip" in vm_ips: break @@ -749,7 +755,10 @@ def get_xenserver_version(module): host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session) try: - xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')] + xenserver_version = [ + int(version_number) + for version_number in xapi_session.xenapi.host.get_software_version(host_ref)["product_version"].split(".") + ] except ValueError: xenserver_version = [0, 0, 0] @@ -758,6 +767,7 @@ def get_xenserver_version(module): class XAPI: """Class for XAPI session management.""" + _xapi_session = None @classmethod @@ -779,15 +789,15 @@ def connect(cls, module, disconnect_atexit=True): if cls._xapi_session is not None: return cls._xapi_session - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - ignore_ssl = not module.params['validate_certs'] + hostname = module.params["hostname"] + username = module.params["username"] + password = module.params["password"] + ignore_ssl = not module.params["validate_certs"] - if hostname == 'localhost': + if hostname == "localhost": cls._xapi_session = XenAPI.xapi_local() - username = '' - password = '' + username = "" + password = "" else: # If scheme is not specified we default to http:// because https:// # is problematic in most setups. @@ -806,10 +816,10 @@ def connect(cls, module, disconnect_atexit=True): cls._xapi_session = XenAPI.Session(hostname) if not password: - password = '' + password = "" try: - cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') + cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, "Ansible") except XenAPI.Failure as f: module.fail_json(msg=f"Unable to log on to XenServer at {hostname} as {username}: {f.details}") diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py index 8febbf450d3..2903af62cf1 100644 --- a/plugins/module_utils/xfconf.py +++ b/plugins/module_utils/xfconf.py @@ -12,16 +12,16 @@ def _values_fmt(values, value_types): result = [] for value, value_type in zip(values, value_types): - if value_type == 'bool': - value = 'true' if boolean(value) else 'false' - result.extend(['--type', f'{value_type}', '--set', f'{value}']) + if value_type == "bool": + value = "true" if boolean(value) else "false" + result.extend(["--type", f"{value_type}", "--set", f"{value}"]) return result def xfconf_runner(module, **kwargs): runner = CmdRunner( module, - command='xfconf-query', + command="xfconf-query", arg_formats=dict( channel=cmd_runner_fmt.as_opt_val("--channel"), property=cmd_runner_fmt.as_opt_val("--property"), @@ -32,7 +32,7 @@ def xfconf_runner(module, **kwargs): values_and_types=_values_fmt, version=cmd_runner_fmt.as_fixed("--version"), ), - **kwargs + **kwargs, ) return runner diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py index 5258f89c78f..c474a27a474 100644 --- a/plugins/modules/aerospike_migrations.py +++ b/plugins/modules/aerospike_migrations.py @@ -177,42 +177,34 @@ def run_module(): """run ansible module""" module_args = dict( - host=dict(type='str', default='localhost'), - port=dict(type='int', default=3000), - connect_timeout=dict(type='int', default=1000), - consecutive_good_checks=dict(type='int', default=3), - sleep_between_checks=dict(type='int', default=60), - tries_limit=dict(type='int', default=300), - local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', default=1), - target_cluster_size=dict(type='int'), - fail_on_cluster_change=dict(type='bool', default=True), - migrate_tx_key=dict(type='str', no_log=False, - default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', no_log=False, - default="migrate_rx_partitions_remaining") + host=dict(type="str", default="localhost"), + port=dict(type="int", default=3000), + connect_timeout=dict(type="int", default=1000), + consecutive_good_checks=dict(type="int", default=3), + sleep_between_checks=dict(type="int", default=60), + tries_limit=dict(type="int", default=300), + local_only=dict(type="bool", required=True), + min_cluster_size=dict(type="int", default=1), + target_cluster_size=dict(type="int"), + fail_on_cluster_change=dict(type="bool", default=True), + migrate_tx_key=dict(type="str", no_log=False, default="migrate_tx_partitions_remaining"), + migrate_rx_key=dict(type="str", no_log=False, default="migrate_rx_partitions_remaining"), ) result = dict( changed=False, ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not LIB_FOUND: - module.fail_json(msg=missing_required_lib('aerospike'), - exception=LIB_FOUND_ERR) + module.fail_json(msg=missing_required_lib("aerospike"), exception=LIB_FOUND_ERR) try: if module.check_mode: has_migrations, skip_reason = False, None else: migrations = Migrations(module) - has_migrations, skip_reason = migrations.has_migs( - module.params['local_only'] - ) + has_migrations, skip_reason = migrations.has_migs(module.params["local_only"]) if has_migrations: module.fail_json(msg="Failed.", skip_reason=skip_reason) @@ -223,7 +215,7 @@ def run_module(): class Migrations: - """ Check or wait for migrations between nodes """ + """Check or wait for migrations between nodes""" def __init__(self, module): self.module = module @@ -236,25 +228,20 @@ def __init__(self, module): self._update_cluster_namespace_list() self._build_list = set() self._update_build_list() - self._start_cluster_key = \ - self._cluster_statistics[self._nodes[0]]['cluster_key'] + self._start_cluster_key = self._cluster_statistics[self._nodes[0]]["cluster_key"] def _create_client(self): - """ TODO: add support for auth, tls, and other special features - I won't use those features, so I'll wait until somebody complains - or does it for me (Cross fingers) - create the client object""" + """TODO: add support for auth, tls, and other special features + I won't use those features, so I'll wait until somebody complains + or does it for me (Cross fingers) + create the client object""" config = { - 'hosts': [ - (self.module.params['host'], self.module.params['port']) - ], - 'policies': { - 'timeout': self.module.params['connect_timeout'] - } + "hosts": [(self.module.params["host"], self.module.params["port"])], + "policies": {"timeout": self.module.params["connect_timeout"]}, } return aerospike.client(config) - def _info_cmd_helper(self, cmd, node=None, delimiter=';'): + def _info_cmd_helper(self, cmd, node=None, delimiter=";"): """delimiter is for separate stats that come back, NOT for kv separation which is =""" if node is None: # If no node passed, use the first one (local) @@ -262,9 +249,7 @@ def _info_cmd_helper(self, cmd, node=None, delimiter=';'): data = self._client.info_node(cmd, node) data = data.split("\t") if len(data) != 1 and len(data) != 2: - self.module.fail_json( - msg=f"Unexpected number of values returned in info command: {len(data)}" - ) + self.module.fail_json(msg=f"Unexpected number of values returned in info command: {len(data)}") # data will be in format 'command\touput' data = data[-1] data = data.rstrip("\n\r") @@ -272,10 +257,8 @@ def _info_cmd_helper(self, cmd, node=None, delimiter=';'): # some commands don't return in kv format # so we dont want a dict from those. - if '=' in data: - retval = dict( - metric.split("=", 1) for metric in data_arr - ) + if "=" in data: + retval = dict(metric.split("=", 1) for metric in data_arr) else: # if only 1 element found, and not kv, return just the value. if len(data_arr) == 1: @@ -289,7 +272,7 @@ def _update_build_list(self): of build versions.""" self._build_list = set() for node in self._nodes: - build = self._info_cmd_helper('build', node) + build = self._info_cmd_helper("build", node) self._build_list.add(build) # just checks to see if the version is 4.3 or greater @@ -297,26 +280,25 @@ def _can_use_cluster_stable(self): # if version <4.3 we can't use cluster-stable info cmd # regex hack to check for versions beginning with 0-3 or # beginning with 4.0,4.1,4.2 - if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): + if re.search(R"^([0-3]\.|4\.[0-2])", min(self._build_list)): return False return True def _update_cluster_namespace_list(self): - """ make a unique list of namespaces + """make a unique list of namespaces TODO: does this work on a rolling namespace add/deletion? thankfully if it doesn't, we dont need this on builds >=4.3""" self._namespaces = set() for node in self._nodes: - namespaces = self._info_cmd_helper('namespaces', node) + namespaces = self._info_cmd_helper("namespaces", node) for namespace in namespaces: self._namespaces.add(namespace) def _update_cluster_statistics(self): - """create a dict of nodes with their related stats """ + """create a dict of nodes with their related stats""" self._cluster_statistics = {} for node in self._nodes: - self._cluster_statistics[node] = \ - self._info_cmd_helper('statistics', node) + self._cluster_statistics[node] = self._info_cmd_helper("statistics", node) def _update_nodes_list(self): """get a fresh list of all the nodes""" @@ -330,10 +312,8 @@ def _namespace_has_migs(self, namespace, node=None): If no node passed, uses the local node or the first one in the list""" namespace_stats = self._info_cmd_helper(f"namespace/{namespace}", node) try: - namespace_tx = \ - int(namespace_stats[self.module.params['migrate_tx_key']]) - namespace_rx = \ - int(namespace_stats[self.module.params['migrate_rx_key']]) + namespace_tx = int(namespace_stats[self.module.params["migrate_tx_key"]]) + namespace_rx = int(namespace_stats[self.module.params["migrate_rx_key"]]) except KeyError: self.module.fail_json( msg=( @@ -342,9 +322,7 @@ def _namespace_has_migs(self, namespace, node=None): ) ) except TypeError: - self.module.fail_json( - msg="namespace stat returned was not numerical" - ) + self.module.fail_json(msg="namespace stat returned was not numerical") return namespace_tx != 0 or namespace_rx != 0 def _node_has_migs(self, node=None): @@ -363,22 +341,20 @@ def _cluster_key_consistent(self): with the key being the cluster key.""" cluster_keys = {} for node in self._nodes: - cluster_key = self._cluster_statistics[node][ - 'cluster_key'] + cluster_key = self._cluster_statistics[node]["cluster_key"] if cluster_key not in cluster_keys: cluster_keys[cluster_key] = 1 else: cluster_keys[cluster_key] += 1 - if len(cluster_keys.keys()) == 1 and \ - self._start_cluster_key in cluster_keys: + if len(cluster_keys.keys()) == 1 and self._start_cluster_key in cluster_keys: return True return False def _cluster_migrates_allowed(self): """ensure all nodes have 'migrate_allowed' in their stats output""" for node in self._nodes: - node_stats = self._info_cmd_helper('statistics', node) - allowed = node_stats['migrate_allowed'] + node_stats = self._info_cmd_helper("statistics", node) + allowed = node_stats["migrate_allowed"] if allowed == "false": return False return True @@ -406,11 +382,11 @@ def _is_min_cluster_size(self): minimum cluster size specified in their statistics output""" sizes = set() for node in self._cluster_statistics: - sizes.add(int(self._cluster_statistics[node]['cluster_size'])) + sizes.add(int(self._cluster_statistics[node]["cluster_size"])) if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no return False - if (min(sizes)) >= self.module.params['min_cluster_size']: + if (min(sizes)) >= self.module.params["min_cluster_size"]: return True return False @@ -425,16 +401,16 @@ def _cluster_stable(self): the target node's migrations counts must be zero for the provided 'namespace' or all namespaces if 'namespace' is not provided.""" cluster_key = set() - cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) + cluster_key.add(self._info_cmd_helper("statistics")["cluster_key"]) cmd = "cluster-stable:" - target_cluster_size = self.module.params['target_cluster_size'] + target_cluster_size = self.module.params["target_cluster_size"] if target_cluster_size is not None: cmd = f"{cmd}size={target_cluster_size};" for node in self._nodes: try: cluster_key.add(self._info_cmd_helper(cmd, node)) except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception - if 'unstable-cluster' in e.msg: + if "unstable-cluster" in e.msg: return False raise e if len(cluster_key) == 1: @@ -458,11 +434,9 @@ def has_migs(self, local=True): consecutive_good = 0 try_num = 0 skip_reason = list() - while \ - try_num < int(self.module.params['tries_limit']) and \ - consecutive_good < \ - int(self.module.params['consecutive_good_checks']): - + while try_num < int(self.module.params["tries_limit"]) and consecutive_good < int( + self.module.params["consecutive_good_checks"] + ): self._update_nodes_list() self._update_cluster_statistics() @@ -470,33 +444,26 @@ def has_migs(self, local=True): # we probably want to skip & sleep instead of failing entirely stable, reason = self._cluster_good_state() if stable is not True: - skip_reason.append( - f"Skipping on try#{try_num} for reason:{reason}" - ) + skip_reason.append(f"Skipping on try#{try_num} for reason:{reason}") else: if self._can_use_cluster_stable(): if self._cluster_stable(): consecutive_good += 1 else: consecutive_good = 0 - skip_reason.append( - f"Skipping on try#{try_num} for reason: cluster_stable" - ) + skip_reason.append(f"Skipping on try#{try_num} for reason: cluster_stable") elif self._has_migs(local): # print("_has_migs") - skip_reason.append( - f"Skipping on try#{try_num} for reason: migrations" - ) + skip_reason.append(f"Skipping on try#{try_num} for reason: migrations") consecutive_good = 0 else: consecutive_good += 1 - if consecutive_good == self.module.params[ - 'consecutive_good_checks']: + if consecutive_good == self.module.params["consecutive_good_checks"]: break try_num += 1 - sleep(self.module.params['sleep_between_checks']) + sleep(self.module.params["sleep_between_checks"]) # print(skip_reason) - if consecutive_good == self.module.params['consecutive_good_checks']: + if consecutive_good == self.module.params["consecutive_good_checks"]: return False, None return True, skip_reason @@ -506,5 +473,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py index 745d3fce5d2..2a7bc4abfcf 100644 --- a/plugins/modules/airbrake_deployment.py +++ b/plugins/modules/airbrake_deployment.py @@ -105,19 +105,19 @@ # Module execution. # -def main(): +def main(): module = AnsibleModule( argument_spec=dict( - project_id=dict(required=True, no_log=True, type='str'), - project_key=dict(required=True, no_log=True, type='str'), - environment=dict(required=True, type='str'), - user=dict(type='str'), - repo=dict(type='str'), - revision=dict(type='str'), - version=dict(type='str'), - url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'), - validate_certs=dict(default=True, type='bool'), + project_id=dict(required=True, no_log=True, type="str"), + project_key=dict(required=True, no_log=True, type="str"), + environment=dict(required=True, type="str"), + user=dict(type="str"), + repo=dict(type="str"), + revision=dict(type="str"), + version=dict(type="str"), + url=dict(default="https://api.airbrake.io/api/v4/projects/", type="str"), + validate_certs=dict(default=True, type="bool"), ), supports_check_mode=True, ) @@ -150,17 +150,16 @@ def main(): json_body = module.jsonify(params) # Build header - headers = {'Content-Type': 'application/json'} + headers = {"Content-Type": "application/json"} # Notify Airbrake of deploy - response, info = fetch_url(module, url, data=json_body, - headers=headers, method='POST') + response, info = fetch_url(module, url, data=json_body, headers=headers, method="POST") - if info['status'] == 200 or info['status'] == 201: + if info["status"] == 200 or info["status"] == 201: module.exit_json(changed=True) else: module.fail_json(msg=f"HTTP result code: {info['status']} connecting to {url}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aix_devices.py b/plugins/modules/aix_devices.py index 8176b740bb9..283bb2eb299 100644 --- a/plugins/modules/aix_devices.py +++ b/plugins/modules/aix_devices.py @@ -140,8 +140,8 @@ def _check_device(module, device): Returns: bool, device state """ - lsdev_cmd = module.get_bin_path('lsdev', True) - rc, lsdev_out, err = module.run_command([lsdev_cmd, '-C', '-l', device]) + lsdev_cmd = module.get_bin_path("lsdev", True) + rc, lsdev_out, err = module.run_command([lsdev_cmd, "-C", "-l", device]) if rc != 0: module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) @@ -165,15 +165,14 @@ def _check_device_attr(module, device, attr): Returns: """ - lsattr_cmd = module.get_bin_path('lsattr', True) - rc, lsattr_out, err = module.run_command([lsattr_cmd, '-El', device, '-a', f"{attr}"]) + lsattr_cmd = module.get_bin_path("lsattr", True) + rc, lsattr_out, err = module.run_command([lsattr_cmd, "-El", device, "-a", f"{attr}"]) - hidden_attrs = ['delalias4', 'delalias6'] + hidden_attrs = ["delalias4", "delalias6"] if rc == 255: - if attr in hidden_attrs: - current_param = '' + current_param = "" else: current_param = None @@ -187,17 +186,17 @@ def _check_device_attr(module, device, attr): def discover_device(module, device): - """ Discover AIX devices.""" - cfgmgr_cmd = module.get_bin_path('cfgmgr', True) + """Discover AIX devices.""" + cfgmgr_cmd = module.get_bin_path("cfgmgr", True) if device is not None: device = f"-l {device}" else: - device = '' + device = "" changed = True - msg = '' + msg = "" if not module.check_mode: rc, cfgmgr_out, err = module.run_command([cfgmgr_cmd, device]) changed = True @@ -207,12 +206,12 @@ def discover_device(module, device): def change_device_attr(module, attributes, device, force): - """ Change AIX device attribute. """ + """Change AIX device attribute.""" attr_changed = [] attr_not_changed = [] attr_invalid = [] - chdev_cmd = module.get_bin_path('chdev', True) + chdev_cmd = module.get_bin_path("chdev", True) for attr in list(attributes.keys()): new_param = attributes[attr] @@ -223,9 +222,9 @@ def change_device_attr(module, attributes, device, force): elif current_param != new_param: if force: - cmd = [chdev_cmd, '-l', device, '-a', f"{attr}={attributes[attr]}", f"{force}"] + cmd = [chdev_cmd, "-l", device, "-a", f"{attr}={attributes[attr]}", f"{force}"] else: - cmd = [chdev_cmd, '-l', device, '-a', f"{attr}={attributes[attr]}"] + cmd = [chdev_cmd, "-l", device, "-a", f"{attr}={attributes[attr]}"] if not module.check_mode: rc, chdev_out, err = module.run_command(cmd) @@ -241,17 +240,17 @@ def change_device_attr(module, attributes, device, force): attr_changed_msg = f"Attributes changed: {','.join(attr_changed)}. " else: changed = False - attr_changed_msg = '' + attr_changed_msg = "" if len(attr_not_changed) > 0: attr_not_changed_msg = f"Attributes already set: {','.join(attr_not_changed)}. " else: - attr_not_changed_msg = '' + attr_not_changed_msg = "" if len(attr_invalid) > 0: attr_invalid_msg = f"Invalid attributes: {', '.join(attr_invalid)} " else: - attr_invalid_msg = '' + attr_invalid_msg = "" msg = f"{attr_changed_msg}{attr_not_changed_msg}{attr_invalid_msg}" @@ -259,25 +258,18 @@ def change_device_attr(module, attributes, device, force): def remove_device(module, device, force, recursive, state): - """ Puts device in defined state or removes device. """ + """Puts device in defined state or removes device.""" - state_opt = { - 'removed': '-d', - 'absent': '-d', - 'defined': '' - } + state_opt = {"removed": "-d", "absent": "-d", "defined": ""} - recursive_opt = { - True: '-R', - False: '' - } + recursive_opt = {True: "-R", False: ""} recursive = recursive_opt[recursive] state = state_opt[state] changed = True - msg = '' - rmdev_cmd = module.get_bin_path('rmdev', True) + msg = "" + rmdev_cmd = module.get_bin_path("rmdev", True) if not module.check_mode: if state: @@ -294,81 +286,80 @@ def remove_device(module, device, force, recursive, state): def main(): - module = AnsibleModule( argument_spec=dict( - attributes=dict(type='dict'), - device=dict(type='str'), - force=dict(type='bool', default=False), - recursive=dict(type='bool', default=False), - state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), + attributes=dict(type="dict"), + device=dict(type="str"), + force=dict(type="bool", default=False), + recursive=dict(type="bool", default=False), + state=dict(type="str", default="available", choices=["available", "defined", "removed"]), ), supports_check_mode=True, ) force_opt = { - True: '-f', - False: '', + True: "-f", + False: "", } - attributes = module.params['attributes'] - device = module.params['device'] - force = force_opt[module.params['force']] - recursive = module.params['recursive'] - state = module.params['state'] + attributes = module.params["attributes"] + device = module.params["device"] + force = force_opt[module.params["force"]] + recursive = module.params["recursive"] + state = module.params["state"] result = dict( changed=False, - msg='', + msg="", ) - if state == 'available' or state == 'present': + if state == "available" or state == "present": if attributes: # change attributes on device device_status, device_state = _check_device(module, device) if device_status: - result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) + result["changed"], result["msg"] = change_device_attr(module, attributes, device, force) else: - result['msg'] = f"Device {device} does not exist." + result["msg"] = f"Device {device} does not exist." else: # discovery devices (cfgmgr) - if device and device != 'all': + if device and device != "all": device_status, device_state = _check_device(module, device) if device_status: # run cfgmgr on specific device - result['changed'], result['msg'] = discover_device(module, device) + result["changed"], result["msg"] = discover_device(module, device) else: - result['msg'] = f"Device {device} does not exist." + result["msg"] = f"Device {device} does not exist." else: - result['changed'], result['msg'] = discover_device(module, device) + result["changed"], result["msg"] = discover_device(module, device) - elif state == 'removed' or state == 'absent' or state == 'defined': + elif state == "removed" or state == "absent" or state == "defined": if not device: - result['msg'] = "device is required to removed or defined state." + result["msg"] = "device is required to removed or defined state." else: # Remove device check_device, device_state = _check_device(module, device) if check_device: - if state == 'defined' and device_state == 'Defined': - result['changed'] = False - result['msg'] = f'Device {device} already in Defined' + if state == "defined" and device_state == "Defined": + result["changed"] = False + result["msg"] = f"Device {device} already in Defined" else: - result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) + result["changed"], result["msg"] = remove_device(module, device, force, recursive, state) else: - result['msg'] = f"Device {device} does not exist." + result["msg"] = f"Device {device} does not exist." else: - result['msg'] = f"Unexpected state {state}." + result["msg"] = f"Unexpected state {state}." module.fail_json(**result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py index 3b90848fe60..2019e2bf834 100644 --- a/plugins/modules/aix_filesystem.py +++ b/plugins/modules/aix_filesystem.py @@ -177,7 +177,7 @@ def _fs_exists(module, filesystem): :param community.general.filesystem: filesystem name. :return: True or False. """ - lsfs_cmd = module.get_bin_path('lsfs', True) + lsfs_cmd = module.get_bin_path("lsfs", True) rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem]) if rc == 1: if re.findall("No record matching", err): @@ -187,7 +187,6 @@ def _fs_exists(module, filesystem): module.fail_json(msg=f"Failed to run lsfs. Error message: {err}") else: - return True @@ -200,14 +199,14 @@ def _check_nfs_device(module, nfs_host, device): :param device: device parameter, remote export. :return: True or False. """ - showmount_cmd = module.get_bin_path('showmount', True) + showmount_cmd = module.get_bin_path("showmount", True) rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host]) if rc != 0: module.fail_json(msg=f"Failed to run showmount. Error message: {err}") else: showmount_data = showmount_out.splitlines() for line in showmount_data: - if line.split(':')[1] == device: + if line.split(":")[1] == device: return True return False @@ -222,7 +221,7 @@ def _validate_vg(module, vg): :return: True (VG in varyon state) or False (VG in varyoff state) or None (VG does not exist), message. """ - lsvg_cmd = module.get_bin_path('lsvg', True) + lsvg_cmd = module.get_bin_path("lsvg", True) rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) if rc != 0: module.fail_json(msg=f"Failed executing {lsvg_cmd} command.") @@ -243,9 +242,9 @@ def _validate_vg(module, vg): def resize_fs(module, filesystem, size): - """ Resize LVM file system. """ + """Resize LVM file system.""" - chfs_cmd = module.get_bin_path('chfs', True) + chfs_cmd = module.get_bin_path("chfs", True) if not module.check_mode: rc, chfs_out, err = module.run_command([chfs_cmd, "-a", f"size={size}", filesystem]) @@ -253,14 +252,14 @@ def resize_fs(module, filesystem, size): changed = False return changed, chfs_out elif rc != 0: - if re.findall('Maximum allocation for logical', err): + if re.findall("Maximum allocation for logical", err): changed = False return changed, err else: module.fail_json(msg=f"Failed to run chfs. Error message: {err}") else: - if re.findall('The filesystem size is already', chfs_out): + if re.findall("The filesystem size is already", chfs_out): changed = False else: changed = True @@ -268,48 +267,50 @@ def resize_fs(module, filesystem, size): return changed, chfs_out else: changed = True - msg = '' + msg = "" return changed, msg def create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, - account_subsystem, permissions, nfs_server, attributes): - """ Create LVM file system or NFS remote mount point. """ - - attributes = ' -a '.join(attributes) + module, + fs_type, + filesystem, + vg, + device, + size, + mount_group, + auto_mount, + account_subsystem, + permissions, + nfs_server, + attributes, +): + """Create LVM file system or NFS remote mount point.""" + + attributes = " -a ".join(attributes) # Parameters definition. - account_subsys_opt = { - True: '-t yes', - False: '-t no' - } + account_subsys_opt = {True: "-t yes", False: "-t no"} if nfs_server is not None: - auto_mount_opt = { - True: '-A', - False: '-a' - } + auto_mount_opt = {True: "-A", False: "-a"} else: - auto_mount_opt = { - True: '-A yes', - False: '-A no' - } + auto_mount_opt = {True: "-A yes", False: "-A no"} if size is None: - size = '' + size = "" else: size = f"-a size={size}" if device is None: - device = '' + device = "" else: device = f"-d {device}" if vg is None: - vg = '' + vg = "" else: vg_state, msg = _validate_vg(module, vg) if vg_state: @@ -320,7 +321,7 @@ def create_fs( return changed, msg if mount_group is None: - mount_group = '' + mount_group = "" else: mount_group = f"-u {mount_group}" @@ -330,9 +331,11 @@ def create_fs( if nfs_server is not None: # Creates a NFS file system. - mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) + mknfsmnt_cmd = module.get_bin_path("mknfsmnt", True) if not module.check_mode: - rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"]) + rc, mknfsmnt_out, err = module.run_command( + [mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"] + ) if rc != 0: module.fail_json(msg=f"Failed to run mknfsmnt. Error message: {err}") else: @@ -342,13 +345,13 @@ def create_fs( return changed, msg else: changed = True - msg = '' + msg = "" return changed, msg else: # Creates a LVM file system. - crfs_cmd = module.get_bin_path('crfs', True) + crfs_cmd = module.get_bin_path("crfs", True) if not module.check_mode: cmd = [crfs_cmd] @@ -401,7 +404,8 @@ def create_fs( if rc == 10: module.exit_json( - msg=f"Using a existent previously defined logical volume, volume group needs to be empty. {err}") + msg=f"Using a existent previously defined logical volume, volume group needs to be empty. {err}" + ) elif rc != 0: module.fail_json(msg=f"Failed to run {cmd}. Error message: {err}") @@ -411,23 +415,20 @@ def create_fs( return changed, crfs_out else: changed = True - msg = '' + msg = "" return changed, msg def remove_fs(module, filesystem, rm_mount_point): - """ Remove an LVM file system or NFS entry. """ + """Remove an LVM file system or NFS entry.""" # Command parameters. - rm_mount_point_opt = { - True: '-r', - False: '' - } + rm_mount_point_opt = {True: "-r", False: ""} rm_mount_point = rm_mount_point_opt[rm_mount_point] - rmfs_cmd = module.get_bin_path('rmfs', True) + rmfs_cmd = module.get_bin_path("rmfs", True) if not module.check_mode: cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem] rc, rmfs_out, err = module.run_command(cmd) @@ -442,14 +443,14 @@ def remove_fs(module, filesystem, rm_mount_point): return changed, msg else: changed = True - msg = '' + msg = "" return changed, msg def mount_fs(module, filesystem): - """ Mount a file system. """ - mount_cmd = module.get_bin_path('mount', True) + """Mount a file system.""" + mount_cmd = module.get_bin_path("mount", True) if not module.check_mode: rc, mount_out, err = module.run_command([mount_cmd, filesystem]) @@ -462,14 +463,14 @@ def mount_fs(module, filesystem): return changed, msg else: changed = True - msg = '' + msg = "" return changed, msg def unmount_fs(module, filesystem): - """ Unmount a file system.""" - unmount_cmd = module.get_bin_path('unmount', True) + """Unmount a file system.""" + unmount_cmd = module.get_bin_path("unmount", True) if not module.check_mode: rc, unmount_out, err = module.run_command([unmount_cmd, filesystem]) @@ -482,7 +483,7 @@ def unmount_fs(module, filesystem): return changed, msg else: changed = True - msg = '' + msg = "" return changed, msg @@ -490,114 +491,150 @@ def unmount_fs(module, filesystem): def main(): module = AnsibleModule( argument_spec=dict( - account_subsystem=dict(type='bool', default=False), - attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]), - auto_mount=dict(type='bool', default=True), - device=dict(type='str'), - filesystem=dict(type='str', required=True), - fs_type=dict(type='str', default='jfs2'), - permissions=dict(type='str', default='rw', choices=['rw', 'ro']), - mount_group=dict(type='str'), - nfs_server=dict(type='str'), - rm_mount_point=dict(type='bool', default=False), - size=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), - vg=dict(type='str'), + account_subsystem=dict(type="bool", default=False), + attributes=dict(type="list", elements="str", default=["agblksize=4096", "isnapshot=no"]), + auto_mount=dict(type="bool", default=True), + device=dict(type="str"), + filesystem=dict(type="str", required=True), + fs_type=dict(type="str", default="jfs2"), + permissions=dict(type="str", default="rw", choices=["rw", "ro"]), + mount_group=dict(type="str"), + nfs_server=dict(type="str"), + rm_mount_point=dict(type="bool", default=False), + size=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "mounted", "present", "unmounted"]), + vg=dict(type="str"), ), supports_check_mode=True, ) - account_subsystem = module.params['account_subsystem'] - attributes = module.params['attributes'] - auto_mount = module.params['auto_mount'] - device = module.params['device'] - fs_type = module.params['fs_type'] - permissions = module.params['permissions'] - mount_group = module.params['mount_group'] - filesystem = module.params['filesystem'] - nfs_server = module.params['nfs_server'] - rm_mount_point = module.params['rm_mount_point'] - size = module.params['size'] - state = module.params['state'] - vg = module.params['vg'] + account_subsystem = module.params["account_subsystem"] + attributes = module.params["attributes"] + auto_mount = module.params["auto_mount"] + device = module.params["device"] + fs_type = module.params["fs_type"] + permissions = module.params["permissions"] + mount_group = module.params["mount_group"] + filesystem = module.params["filesystem"] + nfs_server = module.params["nfs_server"] + rm_mount_point = module.params["rm_mount_point"] + size = module.params["size"] + state = module.params["state"] + vg = module.params["vg"] result = dict( changed=False, - msg='', + msg="", ) - if state == 'present': + if state == "present": fs_mounted = ismount(filesystem) fs_exists = _fs_exists(module, filesystem) # Check if fs is mounted or exists. if fs_mounted or fs_exists: - result['msg'] = f"File system {filesystem} already exists." - result['changed'] = False + result["msg"] = f"File system {filesystem} already exists." + result["changed"] = False # If parameter size was passed, resize fs. if size is not None: - result['changed'], result['msg'] = resize_fs(module, filesystem, size) + result["changed"], result["msg"] = resize_fs(module, filesystem, size) # If fs doesn't exist, create it. else: # Check if fs will be a NFS device. if nfs_server is not None: if device is None: - result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' + result["msg"] = 'Parameter "device" is required when "nfs_server" is defined.' module.fail_json(**result) else: # Create a fs from NFS export. if _check_nfs_device(module, nfs_server, device): - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + result["changed"], result["msg"] = create_fs( + module, + fs_type, + filesystem, + vg, + device, + size, + mount_group, + auto_mount, + account_subsystem, + permissions, + nfs_server, + attributes, + ) if device is None: if vg is None: - result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' + result["msg"] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' module.fail_json(**result) else: # Create a fs from - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + result["changed"], result["msg"] = create_fs( + module, + fs_type, + filesystem, + vg, + device, + size, + mount_group, + auto_mount, + account_subsystem, + permissions, + nfs_server, + attributes, + ) if device is not None and nfs_server is None: # Create a fs from a previously lv device. - result['changed'], result['msg'] = create_fs( - module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) - - elif state == 'absent': + result["changed"], result["msg"] = create_fs( + module, + fs_type, + filesystem, + vg, + device, + size, + mount_group, + auto_mount, + account_subsystem, + permissions, + nfs_server, + attributes, + ) + + elif state == "absent": if ismount(filesystem): - result['msg'] = f"File system {filesystem} mounted." + result["msg"] = f"File system {filesystem} mounted." else: fs_status = _fs_exists(module, filesystem) if not fs_status: - result['msg'] = f"File system {filesystem} does not exist." + result["msg"] = f"File system {filesystem} does not exist." else: - result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) + result["changed"], result["msg"] = remove_fs(module, filesystem, rm_mount_point) - elif state == 'mounted': + elif state == "mounted": if ismount(filesystem): - result['changed'] = False - result['msg'] = f"File system {filesystem} already mounted." + result["changed"] = False + result["msg"] = f"File system {filesystem} already mounted." else: - result['changed'], result['msg'] = mount_fs(module, filesystem) + result["changed"], result["msg"] = mount_fs(module, filesystem) - elif state == 'unmounted': + elif state == "unmounted": if not ismount(filesystem): - result['changed'] = False - result['msg'] = f"File system {filesystem} already unmounted." + result["changed"] = False + result["msg"] = f"File system {filesystem} already unmounted." else: - result['changed'], result['msg'] = unmount_fs(module, filesystem) + result["changed"], result["msg"] = unmount_fs(module, filesystem) else: # Unreachable codeblock - result['msg'] = f"Unexpected state {state}." + result["msg"] = f"Unexpected state {state}." module.fail_json(**result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py index f20f2e903d6..31897d29484 100644 --- a/plugins/modules/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -122,16 +122,16 @@ def check_current_entry(module): # Check if entry exists, if not return False in exists in return dict, # if true return True and the entry in return dict - existsdict = {'exist': False} - lsitab = module.get_bin_path('lsitab') - (rc, out, err) = module.run_command([lsitab, module.params['name']]) + existsdict = {"exist": False} + lsitab = module.get_bin_path("lsitab") + (rc, out, err) = module.run_command([lsitab, module.params["name"]]) if rc == 0: - keys = ('name', 'runlevel', 'action', 'command') + keys = ("name", "runlevel", "action", "command") values = out.split(":") # strip non readable characters as \n values = map(lambda s: s.strip(), values) existsdict = dict(zip(keys, values)) - existsdict.update({'exist': True}) + existsdict.update({"exist": True}) return existsdict @@ -139,39 +139,38 @@ def main(): # initialize module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True, aliases=['service']), - runlevel=dict(type='str', required=True), - action=dict(type='str', choices=[ - 'boot', - 'bootwait', - 'hold', - 'initdefault', - 'off', - 'once', - 'ondemand', - 'powerfail', - 'powerwait', - 'respawn', - 'sysinit', - 'wait', - ]), - command=dict(type='str', required=True), - insertafter=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type="str", required=True, aliases=["service"]), + runlevel=dict(type="str", required=True), + action=dict( + type="str", + choices=[ + "boot", + "bootwait", + "hold", + "initdefault", + "off", + "once", + "ondemand", + "powerfail", + "powerwait", + "respawn", + "sysinit", + "wait", + ], + ), + command=dict(type="str", required=True), + insertafter=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), ), supports_check_mode=True, ) - result = { - 'name': module.params['name'], - 'changed': False, - 'msg': "" - } + result = {"name": module.params["name"], "changed": False, "msg": ""} # Find commandline strings - mkitab = module.get_bin_path('mkitab') - rmitab = module.get_bin_path('rmitab') - chitab = module.get_bin_path('chitab') + mkitab = module.get_bin_path("mkitab") + rmitab = module.get_bin_path("rmitab") + chitab = module.get_bin_path("chitab") rc = 0 err = None @@ -179,58 +178,54 @@ def main(): current_entry = check_current_entry(module) # if action is install or change, - if module.params['state'] == 'present': - + if module.params["state"] == "present": # create new entry string - new_entry = f"{module.params['name']}:{module.params['runlevel']}:{module.params['action']}:{module.params['command']}" + new_entry = ( + f"{module.params['name']}:{module.params['runlevel']}:{module.params['action']}:{module.params['command']}" + ) # If current entry exists or fields are different(if the entry does not # exists, then the entry will be created - if (not current_entry['exist']) or ( - module.params['runlevel'] != current_entry['runlevel'] or - module.params['action'] != current_entry['action'] or - module.params['command'] != current_entry['command']): - + if (not current_entry["exist"]) or ( + module.params["runlevel"] != current_entry["runlevel"] + or module.params["action"] != current_entry["action"] + or module.params["command"] != current_entry["command"] + ): # If the entry does exist then change the entry - if current_entry['exist']: + if current_entry["exist"]: if not module.check_mode: (rc, out, err) = module.run_command([chitab, new_entry]) if rc != 0: - module.fail_json( - msg="could not change inittab", rc=rc, err=err) - result['msg'] = f"changed inittab entry {current_entry['name']}" - result['changed'] = True + module.fail_json(msg="could not change inittab", rc=rc, err=err) + result["msg"] = f"changed inittab entry {current_entry['name']}" + result["changed"] = True # If the entry does not exist create the entry - elif not current_entry['exist']: - if module.params['insertafter']: + elif not current_entry["exist"]: + if module.params["insertafter"]: if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, '-i', module.params['insertafter'], new_entry]) + (rc, out, err) = module.run_command([mkitab, "-i", module.params["insertafter"], new_entry]) else: if not module.check_mode: - (rc, out, err) = module.run_command( - [mkitab, new_entry]) + (rc, out, err) = module.run_command([mkitab, new_entry]) if rc != 0: module.fail_json(msg="could not adjust inittab", rc=rc, err=err) - result['msg'] = f"add inittab entry {module.params['name']}" - result['changed'] = True + result["msg"] = f"add inittab entry {module.params['name']}" + result["changed"] = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": # If the action is remove and the entry exists then remove the entry - if current_entry['exist']: + if current_entry["exist"]: if not module.check_mode: - (rc, out, err) = module.run_command( - [rmitab, module.params['name']]) + (rc, out, err) = module.run_command([rmitab, module.params["name"]]) if rc != 0: - module.fail_json( - msg="could not remove entry from inittab)", rc=rc, err=err) - result['msg'] = f"removed inittab entry {current_entry['name']}" - result['changed'] = True + module.fail_json(msg="could not remove entry from inittab)", rc=rc, err=err) + result["msg"] = f"removed inittab entry {current_entry['name']}" + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py index aa1a947e2fd..63b6e69cdd8 100644 --- a/plugins/modules/aix_lvg.py +++ b/plugins/modules/aix_lvg.py @@ -101,7 +101,7 @@ def _validate_pv(module, vg, pvs): :return: [bool, message] or module.fail_json for errors. """ - lspv_cmd = module.get_bin_path('lspv', True) + lspv_cmd = module.get_bin_path("lspv", True) rc, current_lspv, stderr = module.run_command([lspv_cmd]) if rc != 0: module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) @@ -117,15 +117,17 @@ def _validate_pv(module, vg, pvs): if pv not in lspv_list.keys(): module.fail_json(msg=f"Physical volume '{pv}' doesn't exist.") - if lspv_list[pv] == 'None': + if lspv_list[pv] == "None": # Disk None, looks free. # Check if PV is not already in use by Oracle ASM. - lquerypv_cmd = module.get_bin_path('lquerypv', True) + lquerypv_cmd = module.get_bin_path("lquerypv", True) rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", f"/dev/{pv}", "20", "10"]) if rc != 0: - module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) + module.fail_json( + msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr + ) - if 'ORCLDISK' in current_lquerypv: + if "ORCLDISK" in current_lquerypv: module.fail_json(f"Physical volume '{pv}' is already used by Oracle ASM.") msg = f"Physical volume '{pv}' is ok to be used." @@ -148,7 +150,7 @@ def _validate_vg(module, vg): :return: True (VG in varyon state) or False (VG in varyoff state) or None (VG does not exist), message. """ - lsvg_cmd = module.get_bin_path('lsvg', True) + lsvg_cmd = module.get_bin_path("lsvg", True) rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) if rc != 0: module.fail_json(msg=f"Failed executing '{lsvg_cmd}' command.") @@ -170,18 +172,15 @@ def _validate_vg(module, vg): def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): - """ Creates or extend a volume group. """ + """Creates or extend a volume group.""" # Command option parameters. - force_opt = { - True: '-f', - False: '' - } + force_opt = {True: "-f", False: ""} vg_opt = { - 'normal': '', - 'big': '-B', - 'scalable': '-S', + "normal": "", + "big": "-B", + "scalable": "-S", } # Validate if PV are not already in use. @@ -201,7 +200,7 @@ def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): msg = "" if not module.check_mode: - extendvg_cmd = module.get_bin_path('extendvg', True) + extendvg_cmd = module.get_bin_path("extendvg", True) rc, output, err = module.run_command([extendvg_cmd, vg] + pvs) if rc != 0: changed = False @@ -214,10 +213,10 @@ def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): elif vg_state is None: # Volume group creation. changed = True - msg = '' + msg = "" if not module.check_mode: - mkvg_cmd = module.get_bin_path('mkvg', True) + mkvg_cmd = module.get_bin_path("mkvg", True) rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs) if rc != 0: changed = False @@ -243,7 +242,7 @@ def reduce_vg(module, vg, pvs, vg_validation): if pvs is None: # Remove VG if pvs are note informed. # Remark: AIX will permit remove only if the VG has not LVs. - lsvg_cmd = module.get_bin_path('lsvg', True) + lsvg_cmd = module.get_bin_path("lsvg", True) rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg]) if rc != 0: module.fail_json(msg=f"Failing to execute '{lsvg_cmd}' command.") @@ -264,10 +263,10 @@ def reduce_vg(module, vg, pvs, vg_validation): return changed, msg changed = True - msg = '' + msg = "" if not module.check_mode: - reducevg_cmd = module.get_bin_path('reducevg', True) + reducevg_cmd = module.get_bin_path("reducevg", True) rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove) if rc != 0: module.fail_json(msg=f"Unable to remove '{vg}'.", rc=rc, stdout=stdout, stderr=stderr) @@ -282,15 +281,15 @@ def state_vg(module, vg, state, vg_validation): if vg_state is None: module.fail_json(msg=msg) - if state == 'varyon': + if state == "varyon": if vg_state is True: changed = False return changed, msg changed = True - msg = '' + msg = "" if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyonvg', True) + varyonvg_cmd = module.get_bin_path("varyonvg", True) rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg]) if rc != 0: module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) @@ -298,16 +297,16 @@ def state_vg(module, vg, state, vg_validation): msg = f"Varyon volume group {vg} completed." return changed, msg - elif state == 'varyoff': + elif state == "varyoff": if vg_state is False: changed = False return changed, msg changed = True - msg = '' + msg = "" if not module.check_mode: - varyonvg_cmd = module.get_bin_path('varyoffvg', True) + varyonvg_cmd = module.get_bin_path("varyoffvg", True) rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg]) if rc != 0: module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) @@ -319,31 +318,31 @@ def state_vg(module, vg, state, vg_validation): def main(): module = AnsibleModule( argument_spec=dict( - force=dict(type='bool', default=False), - pp_size=dict(type='int'), - pvs=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), - vg=dict(type='str', required=True), - vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) + force=dict(type="bool", default=False), + pp_size=dict(type="int"), + pvs=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["absent", "present", "varyoff", "varyon"]), + vg=dict(type="str", required=True), + vg_type=dict(type="str", default="normal", choices=["big", "normal", "scalable"]), ), supports_check_mode=True, ) - force = module.params['force'] - pp_size = module.params['pp_size'] - pvs = module.params['pvs'] - state = module.params['state'] - vg = module.params['vg'] - vg_type = module.params['vg_type'] + force = module.params["force"] + pp_size = module.params["pp_size"] + pvs = module.params["pvs"] + state = module.params["state"] + vg = module.params["vg"] + vg_type = module.params["vg_type"] if pp_size is None: - pp_size = '' + pp_size = "" else: pp_size = f"-s {pp_size}" vg_validation = _validate_vg(module, vg) - if state == 'present': + if state == "present": if not pvs: changed = False msg = "pvs is required to state 'present'." @@ -351,10 +350,10 @@ def main(): else: changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) - elif state == 'absent': + elif state == "absent": changed, msg = reduce_vg(module, vg, pvs, vg_validation) - elif state == 'varyon' or state == 'varyoff': + elif state == "varyon" or state == "varyoff": changed, msg = state_vg(module, vg, state, vg_validation) else: @@ -364,5 +363,5 @@ def main(): module.exit_json(changed=changed, msg=msg, state=state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py index 5f02a91f63a..b5cb7a35123 100644 --- a/plugins/modules/aix_lvol.py +++ b/plugins/modules/aix_lvol.py @@ -138,7 +138,7 @@ def convert_size(module, size): unit = size[-1].upper() - units = ['M', 'G', 'T'] + units = ["M", "G", "T"] try: multiplier = 1024 ** units.index(unit) except ValueError: @@ -181,13 +181,11 @@ def parse_lv(data): size = lps * pp_size - return {'name': name, 'vg': vg, 'size': size, 'policy': policy} + return {"name": name, "vg": vg, "size": size, "policy": policy} def parse_vg(data): - for line in data.splitlines(): - match = re.search(r"VOLUME GROUP:\s+(\w+)", line) if match is not None: name = match.group(1) @@ -208,39 +206,39 @@ def parse_vg(data): free = int(match.group(1)) continue - return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} + return {"name": name, "size": size, "free": free, "pp_size": pp_size} def main(): module = AnsibleModule( argument_spec=dict( - vg=dict(type='str', required=True), - lv=dict(type='str', required=True), - lv_type=dict(type='str', default='jfs2'), - size=dict(type='str'), - opts=dict(type='str', default=''), - copies=dict(type='int', default=1), - state=dict(type='str', default='present', choices=['absent', 'present']), - policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), - pvs=dict(type='list', elements='str', default=list()) + vg=dict(type="str", required=True), + lv=dict(type="str", required=True), + lv_type=dict(type="str", default="jfs2"), + size=dict(type="str"), + opts=dict(type="str", default=""), + copies=dict(type="int", default=1), + state=dict(type="str", default="present", choices=["absent", "present"]), + policy=dict(type="str", default="maximum", choices=["maximum", "minimum"]), + pvs=dict(type="list", elements="str", default=list()), ), supports_check_mode=True, ) - vg = module.params['vg'] - lv = module.params['lv'] - lv_type = module.params['lv_type'] - size = module.params['size'] - opts = module.params['opts'] - copies = module.params['copies'] - policy = module.params['policy'] - state = module.params['state'] - pvs = module.params['pvs'] - - if policy == 'maximum': - lv_policy = 'x' + vg = module.params["vg"] + lv = module.params["lv"] + lv_type = module.params["lv_type"] + size = module.params["size"] + opts = module.params["opts"] + copies = module.params["copies"] + policy = module.params["policy"] + state = module.params["state"] + pvs = module.params["pvs"] + + if policy == "maximum": + lv_policy = "x" else: - lv_policy = 'm' + lv_policy = "m" # Add echo command when running in check-mode if module.check_mode: @@ -256,7 +254,7 @@ def main(): rc, vg_info, err = module.run_command([lsvg_cmd, vg]) if rc != 0: - if state == 'absent': + if state == "absent": module.exit_json(changed=False, msg=f"Volume group {vg} does not exist.") else: module.fail_json(msg=f"Volume group {vg} does not exist.", rc=rc, out=vg_info, err=err) @@ -265,57 +263,63 @@ def main(): if size is not None: # Calculate pp size and round it up based on pp size. - lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) + lv_size = round_ppsize(convert_size(module, size), base=this_vg["pp_size"]) # Get information on logical volume requested rc, lv_info, err = module.run_command([lslv_cmd, lv]) if rc != 0: - if state == 'absent': + if state == "absent": module.exit_json(changed=False, msg=f"Logical Volume {lv} does not exist.") changed = False this_lv = parse_lv(lv_info) - if state == 'present' and not size: + if state == "present" and not size: if this_lv is None: module.fail_json(msg="No size given.") if this_lv is None: - if state == 'present': - if lv_size > this_vg['free']: - module.fail_json(msg=f"Not enough free space in volume group {this_vg['name']}: {this_vg['free']} MB free.") + if state == "present": + if lv_size > this_vg["free"]: + module.fail_json( + msg=f"Not enough free space in volume group {this_vg['name']}: {this_vg['free']} MB free." + ) # create LV mklv_cmd = module.get_bin_path("mklv", required=True) - cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, f"{lv_size}M"] + pvs + cmd = ( + test_opt + + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, f"{lv_size}M"] + + pvs + ) rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg=f"Logical volume {lv} created.") else: module.fail_json(msg=f"Creating logical volume {lv} failed.", rc=rc, out=out, err=err) else: - if state == 'absent': + if state == "absent": # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']]) + rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv["name"]]) if rc == 0: module.exit_json(changed=True, msg=f"Logical volume {lv} deleted.") else: module.fail_json(msg=f"Failed to remove logical volume {lv}.", rc=rc, out=out, err=err) else: - if this_lv['policy'] != policy: + if this_lv["policy"] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']]) + rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv["name"]]) if rc == 0: module.exit_json(changed=True, msg=f"Logical volume {lv} policy changed: {policy}.") else: module.fail_json(msg=f"Failed to change logical volume {lv} policy.", rc=rc, out=out, err=err) - if vg != this_lv['vg']: + if vg != this_lv["vg"]: module.fail_json(msg=f"Logical volume {lv} already exist in volume group {this_lv['vg']}") # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. @@ -323,7 +327,7 @@ def main(): module.exit_json(changed=False, msg=f"Logical volume {lv} already exist.") # resize LV based on absolute values - if int(lv_size) > this_lv['size']: + if int(lv_size) > this_lv["size"]: extendlv_cmd = module.get_bin_path("extendlv", required=True) cmd = test_opt + [extendlv_cmd, lv, f"{lv_size - this_lv['size']}M"] rc, out, err = module.run_command(cmd) @@ -331,11 +335,13 @@ def main(): module.exit_json(changed=True, msg=f"Logical volume {lv} size extended to {lv_size}MB.") else: module.fail_json(msg=f"Unable to resize {lv} to {lv_size}MB.", rc=rc, out=out, err=err) - elif lv_size < this_lv['size']: - module.fail_json(msg=f"No shrinking of Logical Volume {lv} permitted. Current size: {this_lv['size']} MB") + elif lv_size < this_lv["size"]: + module.fail_json( + msg=f"No shrinking of Logical Volume {lv} permitted. Current size: {this_lv['size']} MB" + ) else: module.exit_json(changed=False, msg=f"Logical volume {lv} size is already {lv_size}MB.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/alerta_customer.py b/plugins/modules/alerta_customer.py index 6b4cfb47cd0..a85be173234 100644 --- a/plugins/modules/alerta_customer.py +++ b/plugins/modules/alerta_customer.py @@ -99,19 +99,20 @@ class AlertaInterface: - def __init__(self, module): self.module = module - self.state = module.params['state'] - self.customer = module.params['customer'] - self.match = module.params['match'] - self.alerta_url = module.params['alerta_url'] + self.state = module.params["state"] + self.customer = module.params["customer"] + self.match = module.params["match"] + self.alerta_url = module.params["alerta_url"] self.headers = {"Content-Type": "application/json"} - if module.params.get('api_key', None): + if module.params.get("api_key", None): self.headers["Authorization"] = f"Key {module.params['api_key']}" else: - self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password']) + self.headers["Authorization"] = basic_auth_header( + module.params["api_username"], module.params["api_password"] + ) def send_request(self, url, data=None, method="GET"): response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) @@ -142,46 +143,46 @@ def create_customer(self): url = f"{self.alerta_url}/api/customer" payload = { - 'customer': self.customer, - 'match': self.match, + "customer": self.customer, + "match": self.match, } payload = self.module.jsonify(payload) - response = self.send_request(url, payload, 'POST') + response = self.send_request(url, payload, "POST") return response def delete_customer(self, id): url = f"{self.alerta_url}/api/customer/{id}" - response = self.send_request(url, None, 'DELETE') + response = self.send_request(url, None, "DELETE") return response def find_customer_id(self, customer): - for i in customer['customers']: - if self.customer == i['customer'] and self.match == i['match']: - return i['id'] + for i in customer["customers"]: + if self.customer == i["customer"] and self.match == i["match"]: + return i["id"] return None def main(): module = AnsibleModule( argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - customer=dict(type='str', required=True), - match=dict(type='str', required=True), - alerta_url=dict(type='str', required=True), - api_username=dict(type='str'), - api_password=dict(type='str', no_log=True), - api_key=dict(type='str', no_log=True), + state=dict(choices=["present", "absent"], default="present"), + customer=dict(type="str", required=True), + match=dict(type="str", required=True), + alerta_url=dict(type="str", required=True), + api_username=dict(type="str"), + api_password=dict(type="str", no_log=True), + api_key=dict(type="str", no_log=True), ), - required_together=[['api_username', 'api_password']], - mutually_exclusive=[['api_username', 'api_key']], - supports_check_mode=True + required_together=[["api_username", "api_password"]], + mutually_exclusive=[["api_username", "api_key"]], + supports_check_mode=True, ) alerta_iface = AlertaInterface(module) - if alerta_iface.state == 'present': + if alerta_iface.state == "present": response = alerta_iface.get_customers() if alerta_iface.find_customer_id(response): module.exit_json(changed=False, response=response, msg=f"Customer {alerta_iface.customer} already exists") @@ -195,7 +196,9 @@ def main(): if id: if not module.check_mode: alerta_iface.delete_customer(id) - module.exit_json(changed=True, response=response, msg=f"Customer {alerta_iface.customer} with id {id} deleted") + module.exit_json( + changed=True, response=response, msg=f"Customer {alerta_iface.customer} with id {id} deleted" + ) else: module.exit_json(changed=False, response=response, msg=f"Customer {alerta_iface.customer} does not exists") diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py index 37b2f067e09..3d82d6b6e63 100644 --- a/plugins/modules/ali_instance.py +++ b/plugins/modules/ali_instance.py @@ -618,7 +618,10 @@ import time from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( - ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK + ecs_argument_spec, + ecs_connect, + FOOTMARK_IMP_ERR, + HAS_FOOTMARK, ) @@ -628,8 +631,8 @@ def get_instances_info(connection, ids): if len(instances) > 0: for inst in instances: volumes = connection.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) + setattr(inst, "block_device_mappings", volumes) + setattr(inst, "user_data", inst.describe_user_data()) result.append(inst.read()) return result @@ -637,61 +640,84 @@ def get_instances_info(connection, ids): def run_instance(module, ecs, exact_count): if exact_count <= 0: return None - zone_id = module.params['availability_zone'] - image_id = module.params['image_id'] - instance_type = module.params['instance_type'] - security_groups = module.params['security_groups'] - vswitch_id = module.params['vswitch_id'] - instance_name = module.params['instance_name'] - description = module.params['description'] - internet_charge_type = module.params['internet_charge_type'] - max_bandwidth_out = module.params['max_bandwidth_out'] - max_bandwidth_in = module.params['max_bandwidth_in'] - host_name = module.params['host_name'] - password = module.params['password'] - system_disk_category = module.params['system_disk_category'] - system_disk_size = module.params['system_disk_size'] - system_disk_name = module.params['system_disk_name'] - system_disk_description = module.params['system_disk_description'] - allocate_public_ip = module.params['allocate_public_ip'] - period = module.params['period'] - auto_renew = module.params['auto_renew'] - instance_charge_type = module.params['instance_charge_type'] - auto_renew_period = module.params['auto_renew_period'] - user_data = module.params['user_data'] - key_name = module.params['key_name'] - ram_role_name = module.params['ram_role_name'] - spot_price_limit = module.params['spot_price_limit'] - spot_strategy = module.params['spot_strategy'] - unique_suffix = module.params['unique_suffix'] + zone_id = module.params["availability_zone"] + image_id = module.params["image_id"] + instance_type = module.params["instance_type"] + security_groups = module.params["security_groups"] + vswitch_id = module.params["vswitch_id"] + instance_name = module.params["instance_name"] + description = module.params["description"] + internet_charge_type = module.params["internet_charge_type"] + max_bandwidth_out = module.params["max_bandwidth_out"] + max_bandwidth_in = module.params["max_bandwidth_in"] + host_name = module.params["host_name"] + password = module.params["password"] + system_disk_category = module.params["system_disk_category"] + system_disk_size = module.params["system_disk_size"] + system_disk_name = module.params["system_disk_name"] + system_disk_description = module.params["system_disk_description"] + allocate_public_ip = module.params["allocate_public_ip"] + period = module.params["period"] + auto_renew = module.params["auto_renew"] + instance_charge_type = module.params["instance_charge_type"] + auto_renew_period = module.params["auto_renew_period"] + user_data = module.params["user_data"] + key_name = module.params["key_name"] + ram_role_name = module.params["ram_role_name"] + spot_price_limit = module.params["spot_price_limit"] + spot_strategy = module.params["spot_strategy"] + unique_suffix = module.params["unique_suffix"] # check whether the required parameter passed or not if not image_id: - module.fail_json(msg='image_id is required for new instance') + module.fail_json(msg="image_id is required for new instance") if not instance_type: - module.fail_json(msg='instance_type is required for new instance') + module.fail_json(msg="instance_type is required for new instance") if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') + module.fail_json(msg="The parameter security_groups should be a list, aborting") if len(security_groups) <= 0: - module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') + module.fail_json( + msg="Expected the parameter security_groups is non-empty when create new ECS instances, aborting" + ) client_token = f"Ansible-Alicloud-{hash(str(module.params))}-{time.time()}" try: # call to create_instance method from footmark - instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], - zone_id=zone_id, instance_name=instance_name, description=description, - internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out, - internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, - io_optimized='optimized', system_disk_category=system_disk_category, - system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name, - system_disk_description=system_disk_description, vswitch_id=vswitch_id, - amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month", - auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name, - user_data=user_data, client_token=client_token, ram_role_name=ram_role_name, - spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) + instances = ecs.run_instances( + image_id=image_id, + instance_type=instance_type, + security_group_id=security_groups[0], + zone_id=zone_id, + instance_name=instance_name, + description=description, + internet_charge_type=internet_charge_type, + internet_max_bandwidth_out=max_bandwidth_out, + internet_max_bandwidth_in=max_bandwidth_in, + host_name=host_name, + password=password, + io_optimized="optimized", + system_disk_category=system_disk_category, + system_disk_size=system_disk_size, + system_disk_disk_name=system_disk_name, + system_disk_description=system_disk_description, + vswitch_id=vswitch_id, + amount=exact_count, + instance_charge_type=instance_charge_type, + period=period, + period_unit="Month", + auto_renew=auto_renew, + auto_renew_period=auto_renew_period, + key_pair_name=key_name, + user_data=user_data, + client_token=client_token, + ram_role_name=ram_role_name, + spot_price_limit=spot_price_limit, + spot_strategy=spot_strategy, + unique_suffix=unique_suffix, + ) except Exception as e: - module.fail_json(msg=f'Unable to create instance, error: {e}') + module.fail_json(msg=f"Unable to create instance, error: {e}") return instances @@ -699,16 +725,16 @@ def run_instance(module, ecs, exact_count): def modify_instance(module, instance): # According to state to modify instance's some special attribute state = module.params["state"] - name = module.params['instance_name'] - unique_suffix = module.params['unique_suffix'] + name = module.params["instance_name"] + unique_suffix = module.params["unique_suffix"] if not name: name = instance.name - description = module.params['description'] + description = module.params["description"] if not description: description = instance.description - host_name = module.params['host_name'] + host_name = module.params["host_name"] if unique_suffix and host_name: suffix = instance.host_name[-3:] host_name = host_name + suffix @@ -719,16 +745,18 @@ def modify_instance(module, instance): # password can be modified only when restart instance password = "" if state == "restarted": - password = module.params['password'] + password = module.params["password"] # userdata can be modified only when instance is stopped setattr(instance, "user_data", instance.describe_user_data()) user_data = instance.user_data if state == "stopped": - user_data = module.params['user_data'].encode() + user_data = module.params["user_data"].encode() try: - return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) + return instance.modify( + name=name, description=description, host_name=host_name, password=password, user_data=user_data + ) except Exception as e: module.fail_json(msg=f"Modify instance {instance.id} attribute got an error: {e}") @@ -756,89 +784,96 @@ def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, ti def main(): argument_spec = ecs_argument_spec() - argument_spec.update(dict( - security_groups=dict(type='list', elements='str', aliases=['group_ids']), - availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']), - instance_type=dict(type='str', aliases=['type']), - image_id=dict(type='str', aliases=['image']), - count=dict(type='int', default=1), - count_tag=dict(type='str'), - vswitch_id=dict(type='str', aliases=['subnet_id']), - instance_name=dict(type='str', aliases=['name']), - host_name=dict(type='str'), - password=dict(type='str', no_log=True), - internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), - max_bandwidth_in=dict(type='int', default=200), - max_bandwidth_out=dict(type='int', default=0), - system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), - system_disk_size=dict(type='int', default=40), - system_disk_name=dict(type='str'), - system_disk_description=dict(type='str'), - force=dict(type='bool', default=False), - tags=dict(type='dict', aliases=['instance_tags']), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), - description=dict(type='str'), - allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), - instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), - period=dict(type='int', default=1), - auto_renew=dict(type='bool', default=False), - instance_ids=dict(type='list', elements='str'), - auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), - key_name=dict(type='str', aliases=['keypair']), - user_data=dict(type='str'), - ram_role_name=dict(type='str'), - spot_price_limit=dict(type='float'), - spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']), - unique_suffix=dict(type='bool', default=False), - period_unit=dict(type='str', default='Month', choices=['Month', 'Week']), - dry_run=dict(type='bool', default=False), - include_data_disks=dict(type='bool', default=True) - ) + argument_spec.update( + dict( + security_groups=dict(type="list", elements="str", aliases=["group_ids"]), + availability_zone=dict(type="str", aliases=["alicloud_zone", "zone_id"]), + instance_type=dict(type="str", aliases=["type"]), + image_id=dict(type="str", aliases=["image"]), + count=dict(type="int", default=1), + count_tag=dict(type="str"), + vswitch_id=dict(type="str", aliases=["subnet_id"]), + instance_name=dict(type="str", aliases=["name"]), + host_name=dict(type="str"), + password=dict(type="str", no_log=True), + internet_charge_type=dict(type="str", default="PayByBandwidth", choices=["PayByBandwidth", "PayByTraffic"]), + max_bandwidth_in=dict(type="int", default=200), + max_bandwidth_out=dict(type="int", default=0), + system_disk_category=dict( + type="str", default="cloud_efficiency", choices=["cloud_efficiency", "cloud_ssd"] + ), + system_disk_size=dict(type="int", default=40), + system_disk_name=dict(type="str"), + system_disk_description=dict(type="str"), + force=dict(type="bool", default=False), + tags=dict(type="dict", aliases=["instance_tags"]), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "running", "stopped", "restarted", "absent"]), + description=dict(type="str"), + allocate_public_ip=dict(type="bool", aliases=["assign_public_ip"], default=False), + instance_charge_type=dict(type="str", default="PostPaid", choices=["PrePaid", "PostPaid"]), + period=dict(type="int", default=1), + auto_renew=dict(type="bool", default=False), + instance_ids=dict(type="list", elements="str"), + auto_renew_period=dict(type="int", choices=[1, 2, 3, 6, 12]), + key_name=dict(type="str", aliases=["keypair"]), + user_data=dict(type="str"), + ram_role_name=dict(type="str"), + spot_price_limit=dict(type="float"), + spot_strategy=dict(type="str", default="NoSpot", choices=["NoSpot", "SpotWithPriceLimit", "SpotAsPriceGo"]), + unique_suffix=dict(type="bool", default=False), + period_unit=dict(type="str", default="Month", choices=["Month", "Week"]), + dry_run=dict(type="bool", default=False), + include_data_disks=dict(type="bool", default=True), + ) ) module = AnsibleModule(argument_spec=argument_spec) if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + module.fail_json(msg=missing_required_lib("footmark"), exception=FOOTMARK_IMP_ERR) ecs = ecs_connect(module) - host_name = module.params['host_name'] - state = module.params['state'] - instance_ids = module.params['instance_ids'] - count_tag = module.params['count_tag'] - count = module.params['count'] - instance_name = module.params['instance_name'] - force = module.params['force'] - zone_id = module.params['availability_zone'] - key_name = module.params['key_name'] - tags = module.params['tags'] - max_bandwidth_out = module.params['max_bandwidth_out'] - instance_charge_type = module.params['instance_charge_type'] + host_name = module.params["host_name"] + state = module.params["state"] + instance_ids = module.params["instance_ids"] + count_tag = module.params["count_tag"] + count = module.params["count"] + instance_name = module.params["instance_name"] + force = module.params["force"] + zone_id = module.params["availability_zone"] + key_name = module.params["key_name"] + tags = module.params["tags"] + max_bandwidth_out = module.params["max_bandwidth_out"] + instance_charge_type = module.params["instance_charge_type"] if instance_charge_type == "PrePaid": - module.params['spot_strategy'] = '' + module.params["spot_strategy"] = "" changed = False instances = [] if instance_ids: if not isinstance(instance_ids, list): - module.fail_json(msg='The parameter instance_ids should be a list, aborting') + module.fail_json(msg="The parameter instance_ids should be a list, aborting") instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) if not instances: - module.fail_json(msg=f"There are no instances in our record based on instance_ids {instance_ids}. Please check it and try again.") + module.fail_json( + msg=f"There are no instances in our record based on instance_ids {instance_ids}. Please check it and try again." + ) elif count_tag: instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) elif instance_name: instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name) ids = [] - if state == 'absent': + if state == "absent": if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') + module.fail_json( + msg="Please specify ECS instances that you want to operate by using " + "parameters instance_ids, tags or instance_name, aborting" + ) try: targets = [] for inst in instances: - if inst.status != 'stopped' and not force: + if inst.status != "stopped" and not force: module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") targets.append(inst.id) if ecs.delete_instances(instance_ids=targets, force=force): @@ -847,21 +882,23 @@ def main(): module.exit_json(changed=changed, ids=ids, instances=[]) except Exception as e: - module.fail_json(msg=f'Delete instance got an error: {e}') + module.fail_json(msg=f"Delete instance got an error: {e}") - if module.params['allocate_public_ip'] and max_bandwidth_out < 0: + if module.params["allocate_public_ip"] and max_bandwidth_out < 0: module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") - if not module.params['allocate_public_ip']: - module.params['max_bandwidth_out'] = 0 + if not module.params["allocate_public_ip"]: + module.params["max_bandwidth_out"] = 0 - if state == 'present': + if state == "present": if not instance_ids: if len(instances) > count: for i in range(0, len(instances) - count): inst = instances[len(instances) - 1] - if inst.status != 'stopped' and not force: - module.fail_json(msg=f"That to delete instance {inst.id} is failed results from it is running, " - "and please stop it or set 'force' as True.") + if inst.status != "stopped" and not force: + module.fail_json( + msg=f"That to delete instance {inst.id} is failed results from it is running, " + "and please stop it or set 'force' as True." + ) try: if inst.terminate(force=force): changed = True @@ -871,8 +908,10 @@ def main(): else: try: if re.search(r"-\[\d+,\d+\]-", host_name): - module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered ' - 'suffix to the hostname, you can set unique_suffix to True') + module.fail_json( + msg="Ordered hostname is not supported, If you want to add an ordered " + "suffix to the hostname, you can set unique_suffix to True" + ) new_instances = run_instance(module, ecs, count - len(instances)) if new_instances: changed = True @@ -881,12 +920,12 @@ def main(): module.fail_json(msg=f"Create new instances got an error: {e}") # Security Group join/leave begin - security_groups = module.params['security_groups'] + security_groups = module.params["security_groups"] if security_groups: if not isinstance(security_groups, list): - module.fail_json(msg='The parameter security_groups should be a list, aborting') + module.fail_json(msg="The parameter security_groups should be a list, aborting") for inst in instances: - existing = inst.security_group_ids['security_group_id'] + existing = inst.security_group_ids["security_group_id"] remove = list(set(existing).difference(set(security_groups))) add = list(set(security_groups).difference(set(existing))) for sg in remove: @@ -922,12 +961,16 @@ def main(): if inst.instance_charge_type != instance_charge_type: ids.append(inst.id) if ids: - params = {"instance_ids": ids, "instance_charge_type": instance_charge_type, - "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'], - "auto_pay": True} - if instance_charge_type == 'PrePaid': - params['period'] = module.params['period'] - params['period_unit'] = module.params['period_unit'] + params = { + "instance_ids": ids, + "instance_charge_type": instance_charge_type, + "include_data_disks": module.params["include_data_disks"], + "dry_run": module.params["dry_run"], + "auto_pay": True, + } + if instance_charge_type == "PrePaid": + params["period"] = module.params["period"] + params["period_unit"] = module.params["period_unit"] if ecs.modify_instance_charge_type(**params): changed = True @@ -935,9 +978,11 @@ def main(): else: if len(instances) < 1: - module.fail_json(msg='Please specify ECS instances that you want to operate by using ' - 'parameters instance_ids, tags or instance_name, aborting') - if state == 'running': + module.fail_json( + msg="Please specify ECS instances that you want to operate by using " + "parameters instance_ids, tags or instance_name, aborting" + ) + if state == "running": try: targets = [] for inst in instances: @@ -950,8 +995,8 @@ def main(): changed = True ids.extend(targets) except Exception as e: - module.fail_json(msg=f'Start instances got an error: {e}') - elif state == 'stopped': + module.fail_json(msg=f"Start instances got an error: {e}") + elif state == "stopped": try: targets = [] for inst in instances: @@ -964,22 +1009,22 @@ def main(): if modify_instance(module, inst): changed = True except Exception as e: - module.fail_json(msg=f'Stop instances got an error: {e}') - elif state == 'restarted': + module.fail_json(msg=f"Stop instances got an error: {e}") + elif state == "restarted": try: targets = [] for inst in instances: if modify_instance(module, inst): changed = True targets.append(inst.id) - if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']): + if ecs.reboot_instances(instance_ids=targets, force_stop=module.params["force"]): changed = True ids.extend(targets) except Exception as e: - module.fail_json(msg=f'Reboot instances got an error: {e}') + module.fail_json(msg=f"Reboot instances got an error: {e}") - tags = module.params['tags'] - if module.params['purge_tags']: + tags = module.params["tags"] + if module.params["purge_tags"]: for inst in instances: if not tags: tags = inst.tags @@ -1000,5 +1045,5 @@ def main(): module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py index 31550c4d0af..eb4d35ca59d 100644 --- a/plugins/modules/ali_instance_info.py +++ b/plugins/modules/ali_instance_info.py @@ -344,17 +344,17 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( - ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK + ecs_argument_spec, + ecs_connect, + FOOTMARK_IMP_ERR, + HAS_FOOTMARK, ) def main(): argument_spec = ecs_argument_spec() - argument_spec.update(dict( - name_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['instance_tags']), - filters=dict(type='dict') - ) + argument_spec.update( + dict(name_prefix=dict(type="str"), tags=dict(type="dict", aliases=["instance_tags"]), filters=dict(type="dict")) ) module = AnsibleModule( argument_spec=argument_spec, @@ -362,16 +362,16 @@ def main(): ) if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + module.fail_json(msg=missing_required_lib("footmark"), exception=FOOTMARK_IMP_ERR) ecs = ecs_connect(module) instances = [] instance_ids = [] ids = [] - name_prefix = module.params['name_prefix'] + name_prefix = module.params["name_prefix"] - filters = module.params['filters'] + filters = module.params["filters"] if not filters: filters = {} for key, value in list(filters.items()): @@ -380,22 +380,22 @@ def main(): if id not in ids: ids.append(value) if ids: - filters['instance_ids'] = ids - if module.params['tags']: - filters['tags'] = module.params['tags'] + filters["instance_ids"] = ids + if module.params["tags"]: + filters["tags"] = module.params["tags"] for inst in ecs.describe_instances(**filters): if name_prefix: if not str(inst.instance_name).startswith(name_prefix): continue volumes = ecs.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) + setattr(inst, "block_device_mappings", volumes) + setattr(inst, "user_data", inst.describe_user_data()) instances.append(inst.read()) instance_ids.append(inst.id) module.exit_json(changed=False, ids=instance_ids, instances=instances) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py index 9e7075d3b73..58036b92719 100644 --- a/plugins/modules/alternatives.py +++ b/plugins/modules/alternatives.py @@ -164,37 +164,49 @@ class AlternativesModule: def __init__(self, module): self.module = module self.result = dict(changed=False, diff=dict(before=dict(), after=dict())) - self.module.run_command_environ_update = {'LC_ALL': 'C'} + self.module.run_command_environ_update = {"LC_ALL": "C"} self.messages = [] self.run() @property def mode_present(self): - return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO] + return self.module.params.get("state") in [ + AlternativeState.PRESENT, + AlternativeState.SELECTED, + AlternativeState.AUTO, + ] @property def mode_selected(self): - return self.module.params.get('state') == AlternativeState.SELECTED + return self.module.params.get("state") == AlternativeState.SELECTED @property def mode_auto(self): - return self.module.params.get('state') == AlternativeState.AUTO + return self.module.params.get("state") == AlternativeState.AUTO def run(self): self.parse() if self.mode_present: # Check if we need to (re)install - subcommands_parameter = self.module.params['subcommands'] - priority_parameter = self.module.params['priority'] - if ( - self.path is not None and ( - self.path not in self.current_alternatives or - (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or - (subcommands_parameter is not None and ( - not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or - not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) - )) + subcommands_parameter = self.module.params["subcommands"] + priority_parameter = self.module.params["priority"] + if self.path is not None and ( + self.path not in self.current_alternatives + or ( + priority_parameter is not None + and self.current_alternatives[self.path].get("priority") != priority_parameter + ) + or ( + subcommands_parameter is not None + and ( + not all( + s in subcommands_parameter for s in self.current_alternatives[self.path].get("subcommands") + ) + or not all( + s in self.current_alternatives[self.path].get("subcommands") for s in subcommands_parameter + ) + ) ) ): self.install() @@ -204,44 +216,46 @@ def run(self): is_same_family = False if self.current_path is not None and self.current_path in self.current_alternatives: current_alternative = self.current_alternatives[self.current_path] - is_same_family = current_alternative.get('family') == self.family + is_same_family = current_alternative.get("family") == self.family if self.mode_selected and not (is_same_path or is_same_family): self.set() # Check if we need to reset to auto - if self.mode_auto and self.current_mode == 'manual': + if self.mode_auto and self.current_mode == "manual": self.auto() else: # Check if we need to uninstall if self.path in self.current_alternatives: self.remove() - self.result['msg'] = ' '.join(self.messages) + self.result["msg"] = " ".join(self.messages) self.module.exit_json(**self.result) def install(self): if not os.path.exists(self.path): self.module.fail_json(msg=f"Specified path {self.path} does not exist") if not self.link: - self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') + self.module.fail_json( + msg="Needed to install the alternative, but unable to do so as we are missing the link" + ) - cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + cmd = [self.UPDATE_ALTERNATIVES, "--install", self.link, self.name, self.path, str(self.priority)] if self.family is not None: cmd.extend(["--family", self.family]) - if self.module.params['subcommands'] is not None: - subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] + if self.module.params["subcommands"] is not None: + subcommands = [["--slave", subcmd["link"], subcmd["name"], subcmd["path"]] for subcmd in self.subcommands] cmd += [item for sublist in subcommands for item in sublist] - self.result['changed'] = True + self.result["changed"] = True self.messages.append(f"Install alternative '{self.path}' for '{self.name}'.") if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) if self.module._diff: - self.result['diff']['after'] = dict( + self.result["diff"]["after"] = dict( state=AlternativeState.PRESENT, path=self.path, family=self.family, @@ -249,20 +263,18 @@ def install(self): link=self.link, ) if self.subcommands: - self.result['diff']['after'].update(dict( - subcommands=self.subcommands - )) + self.result["diff"]["after"].update(dict(subcommands=self.subcommands)) def remove(self): - cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path] - self.result['changed'] = True + cmd = [self.UPDATE_ALTERNATIVES, "--remove", self.name, self.path] + self.result["changed"] = True self.messages.append(f"Remove alternative '{self.path}' from '{self.name}'.") if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) if self.module._diff: - self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) + self.result["diff"]["after"] = dict(state=AlternativeState.ABSENT) def set(self): # Path takes precedence over family as it is more specific @@ -271,61 +283,61 @@ def set(self): else: arg = self.path - cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg] - self.result['changed'] = True + cmd = [self.UPDATE_ALTERNATIVES, "--set", self.name, arg] + self.result["changed"] = True self.messages.append(f"Set alternative '{arg}' for '{self.name}'.") if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) if self.module._diff: - self.result['diff']['after']['state'] = AlternativeState.SELECTED + self.result["diff"]["after"]["state"] = AlternativeState.SELECTED def auto(self): - cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name] + cmd = [self.UPDATE_ALTERNATIVES, "--auto", self.name] self.messages.append(f"Set alternative to auto for '{self.name}'.") - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) if self.module._diff: - self.result['diff']['after']['state'] = AlternativeState.PRESENT + self.result["diff"]["after"]["state"] = AlternativeState.PRESENT @property def name(self): - return self.module.params.get('name') + return self.module.params.get("name") @property def path(self): - return self.module.params.get('path') + return self.module.params.get("path") @property def family(self): - return self.module.params.get('family') + return self.module.params.get("family") @property def link(self): - return self.module.params.get('link') or self.current_link + return self.module.params.get("link") or self.current_link @property def priority(self): - if self.module.params.get('priority') is not None: - return self.module.params.get('priority') - return self.current_alternatives.get(self.path, {}).get('priority', 50) + if self.module.params.get("priority") is not None: + return self.module.params.get("priority") + return self.current_alternatives.get(self.path, {}).get("priority", 50) @property def subcommands(self): - if self.module.params.get('subcommands') is not None: - return self.module.params.get('subcommands') - elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'): - return self.current_alternatives[self.path].get('subcommands') + if self.module.params.get("subcommands") is not None: + return self.module.params.get("subcommands") + elif self.path in self.current_alternatives and self.current_alternatives[self.path].get("subcommands"): + return self.current_alternatives[self.path].get("subcommands") return None @property def UPDATE_ALTERNATIVES(self): if self._UPDATE_ALTERNATIVES is None: - self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True) + self._UPDATE_ALTERNATIVES = self.module.get_bin_path("update-alternatives", True) return self._UPDATE_ALTERNATIVES def parse(self): @@ -335,21 +347,21 @@ def parse(self): self.current_alternatives = {} # Run `update-alternatives --display ` to find existing alternatives - (rc, display_output, dummy) = self.module.run_command( - [self.UPDATE_ALTERNATIVES, '--display', self.name] - ) + (rc, display_output, dummy) = self.module.run_command([self.UPDATE_ALTERNATIVES, "--display", self.name]) if rc != 0: self.module.debug(f"No current alternative found. '{self.UPDATE_ALTERNATIVES}' exited with {rc}") return - current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE) - current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE) - current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) - subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) + current_mode_regex = re.compile(r"\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$", re.MULTILINE) + current_path_regex = re.compile(r"^\s*link currently points to (.*)$", re.MULTILINE) + current_link_regex = re.compile(r"^\s*link \w+ is (.*)$", re.MULTILINE) + subcmd_path_link_regex = re.compile(r"^\s*(?:slave|follower) (\S+) is (.*)$", re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) - subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) + alternative_regex = re.compile( + r"^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)", re.MULTILINE + ) + subcmd_regex = re.compile(r"^\s+(?:slave|follower) (.*): (.*)$", re.MULTILINE) match = current_mode_regex.search(display_output) if not match: @@ -371,67 +383,69 @@ def parse(self): subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) if not subcmd_path_map and self.subcommands: - subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} + subcmd_path_map = {s["name"]: s["link"] for s in self.subcommands} for path, family, prio, subcmd in alternative_regex.findall(display_output): self.current_alternatives[path] = dict( priority=int(prio), family=family, - subcommands=[dict( - name=name, - path=spath, - link=subcmd_path_map.get(name) - ) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)'] + subcommands=[ + dict(name=name, path=spath, link=subcmd_path_map.get(name)) + for name, spath in subcmd_regex.findall(subcmd) + if spath != "(null)" + ], ) if self.module._diff: if self.path in self.current_alternatives: - self.result['diff']['before'].update(dict( - state=AlternativeState.PRESENT, - path=self.path, - priority=self.current_alternatives[self.path].get('priority'), - link=self.current_link, - )) - if self.current_alternatives[self.path].get('subcommands'): - self.result['diff']['before'].update(dict( - subcommands=self.current_alternatives[self.path].get('subcommands') - )) - if self.current_mode == 'manual' and self.current_path != self.path: - self.result['diff']['before'].update(dict( - state=AlternativeState.SELECTED - )) + self.result["diff"]["before"].update( + dict( + state=AlternativeState.PRESENT, + path=self.path, + priority=self.current_alternatives[self.path].get("priority"), + link=self.current_link, + ) + ) + if self.current_alternatives[self.path].get("subcommands"): + self.result["diff"]["before"].update( + dict(subcommands=self.current_alternatives[self.path].get("subcommands")) + ) + if self.current_mode == "manual" and self.current_path != self.path: + self.result["diff"]["before"].update(dict(state=AlternativeState.SELECTED)) else: - self.result['diff']['before'].update(dict( - state=AlternativeState.ABSENT - )) + self.result["diff"]["before"].update(dict(state=AlternativeState.ABSENT)) def main(): - module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - path=dict(type='path'), - family=dict(type='str'), - link=dict(type='path'), - priority=dict(type='int'), + name=dict(type="str", required=True), + path=dict(type="path"), + family=dict(type="str"), + link=dict(type="path"), + priority=dict(type="int"), state=dict( - type='str', + type="str", choices=AlternativeState.to_list(), default=AlternativeState.SELECTED, ), - subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict( - name=dict(type='str', required=True), - path=dict(type='path', required=True), - link=dict(type='path', required=True), - )), + subcommands=dict( + type="list", + elements="dict", + aliases=["slaves"], + options=dict( + name=dict(type="str", required=True), + path=dict(type="path", required=True), + link=dict(type="path", required=True), + ), + ), ), supports_check_mode=True, - required_one_of=[('path', 'family')] + required_one_of=[("path", "family")], ) AlternativesModule(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py index 523ea2bbde6..76ef57a2487 100644 --- a/plugins/modules/android_sdk.py +++ b/plugins/modules/android_sdk.py @@ -140,19 +140,19 @@ class AndroidSdk(StateModuleHelper): module = dict( argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent', 'latest']), - package=dict(type='list', elements='str', aliases=['pkg', 'name']), - sdk_root=dict(type='path'), - channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']), - accept_licenses=dict(type='bool', default=False) + state=dict(type="str", default="present", choices=["present", "absent", "latest"]), + package=dict(type="list", elements="str", aliases=["pkg", "name"]), + sdk_root=dict(type="path"), + channel=dict(type="str", default="stable", choices=["stable", "beta", "dev", "canary"]), + accept_licenses=dict(type="bool", default=False), ), - supports_check_mode=True + supports_check_mode=True, ) def __init_module__(self): self.sdkmanager = AndroidSdkManager(self.module) - self.vars.set('installed', [], change=True) - self.vars.set('removed', [], change=True) + self.vars.set("installed", [], change=True) + self.vars.set("removed", [], change=True) def _parse_packages(self): arg_pkgs = set(self.vars.package) @@ -203,5 +203,5 @@ def main(): AndroidSdk.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index ab9a57afb9c..b2e371f4820 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -193,39 +193,39 @@ class AnsibleGalaxyInstall(ModuleHelper): - _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') - _RE_LIST_PATH = re.compile(r'^# (?P.*)$') - _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') - _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_GALAXY_VERSION = re.compile(r"^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?") + _RE_LIST_PATH = re.compile(r"^# (?P.*)$") + _RE_LIST_COLL = re.compile(r"^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$") + _RE_LIST_ROLE = re.compile(r"^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$") _RE_INSTALL_OUTPUT = re.compile( - r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + r"^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$" ) ansible_version = None - output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') + output_params = ("type", "name", "dest", "requirements_file", "force", "no_deps") module = dict( argument_spec=dict( - state=dict(type='str', choices=['present', 'latest'], default='present'), - type=dict(type='str', choices=('collection', 'role', 'both'), required=True), - name=dict(type='str'), - requirements_file=dict(type='path'), - dest=dict(type='path'), - force=dict(type='bool', default=False), - no_deps=dict(type='bool', default=False), + state=dict(type="str", choices=["present", "latest"], default="present"), + type=dict(type="str", choices=("collection", "role", "both"), required=True), + name=dict(type="str"), + requirements_file=dict(type="path"), + dest=dict(type="path"), + force=dict(type="bool", default=False), + no_deps=dict(type="bool", default=False), ), - mutually_exclusive=[('name', 'requirements_file')], - required_one_of=[('name', 'requirements_file')], - required_if=[('type', 'both', ['requirements_file'])], + mutually_exclusive=[("name", "requirements_file")], + required_one_of=[("name", "requirements_file")], + required_if=[("type", "both", ["requirements_file"])], supports_check_mode=False, ) - command = 'ansible-galaxy' + command = "ansible-galaxy" command_args_formats = dict( - type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + type=cmd_runner_fmt.as_func(lambda v: [] if v == "both" else [v]), galaxy_cmd=cmd_runner_fmt.as_list(), upgrade=cmd_runner_fmt.as_bool("--upgrade"), - requirements_file=cmd_runner_fmt.as_opt_val('-r'), - dest=cmd_runner_fmt.as_opt_val('-p'), + requirements_file=cmd_runner_fmt.as_opt_val("-r"), + dest=cmd_runner_fmt.as_opt_val("-p"), force=cmd_runner_fmt.as_bool("--force"), no_deps=cmd_runner_fmt.as_bool("--no-deps"), version=cmd_runner_fmt.as_fixed("--version"), @@ -233,7 +233,9 @@ class AnsibleGalaxyInstall(ModuleHelper): ) def _make_runner(self, lang): - return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True) + return CmdRunner( + self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True + ) def _get_ansible_galaxy_version(self): class UnsupportedLocale(ModuleHelperException): @@ -260,7 +262,7 @@ def process(rc, out, err): def __init_module__(self): self.runner, self.vars.version = self._get_ansible_galaxy_version() - self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3]) + self.ansible_version = tuple(int(x) for x in self.vars.version.split(".")[:3]) if self.ansible_version < (2, 11): self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") self.vars.set("new_collections", {}, change=True) @@ -274,8 +276,8 @@ def _list_element(self, _type, path_re, elem_re): def process(rc, out, err): return [] if "None of the provided paths were usable" in out else out.splitlines() - with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx: - elems = ctx.run(type=_type, galaxy_cmd='list') + with self.runner("type galaxy_cmd dest", output_process=process, check_rc=False) as ctx: + elems = ctx.run(type=_type, galaxy_cmd="list") elems_dict = {} current_path = None @@ -284,27 +286,26 @@ def process(rc, out, err): match = path_re.match(line) if not match: continue - if self.vars.dest is not None and match.group('path') != self.vars.dest: + if self.vars.dest is not None and match.group("path") != self.vars.dest: current_path = None continue - current_path = match.group('path') if match else None + current_path = match.group("path") if match else None elems_dict[current_path] = {} elif current_path is not None: match = elem_re.match(line) - if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + if not match or (self.vars.name is not None and match.group("elem") != self.vars.name): continue - elems_dict[current_path][match.group('elem')] = match.group('version') + elems_dict[current_path][match.group("elem")] = match.group("version") return elems_dict def _list_collections(self): - return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + return self._list_element("collection", self._RE_LIST_PATH, self._RE_LIST_COLL) def _list_roles(self): - return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + return self._list_element("role", self._RE_LIST_PATH, self._RE_LIST_ROLE) def __run__(self): - def process(rc, out, err): for line in out.splitlines(): match = self._RE_INSTALL_OUTPUT.match(line) @@ -315,8 +316,10 @@ def process(rc, out, err): elif match.group("role"): self.vars.new_roles[match.group("role")] = match.group("rversion") - upgrade = (self.vars.type == "collection" and self.vars.state == "latest") - with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + upgrade = self.vars.type == "collection" and self.vars.state == "latest" + with self.runner( + "type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process + ) as ctx: ctx.run(galaxy_cmd="install", upgrade=upgrade) if self.verbosity > 2: self.vars.set("run_info", ctx.run_info) @@ -326,5 +329,5 @@ def main(): AnsibleGalaxyInstall.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index 349d2c63a4b..a8dcc7c4a70 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -218,7 +218,9 @@ from bs4 import BeautifulSoup # balancer member attributes extraction regexp: -EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")) +EXPRESSION = re.compile( + to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)") +) # Apache2 server version extraction regexp: APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)")) @@ -228,16 +230,16 @@ def find_all(where, what): def regexp_extraction(string, _regexp, groups=1): - """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + """Returns the capture group (default=1) specified in the regexp, applied to the string""" regexp_search = _regexp.search(string) if regexp_search: - if regexp_search.group(groups) != '': + if regexp_search.group(groups) != "": return regexp_search.group(groups) return None class BalancerMember: - """ Apache 2.4 mod_proxy LB balancer member. + """Apache 2.4 mod_proxy LB balancer member. attributes: read-only: host -> member host (string), @@ -262,11 +264,11 @@ def __init__(self, management_url, balancer_url, module): self.module = module def get_member_attributes(self): - """ Returns a dictionary of a balancer member's attributes.""" + """Returns a dictionary of a balancer member's attributes.""" - resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url}) + resp, info = fetch_url(self.module, self.management_url, headers={"Referer": self.management_url}) - if info['status'] != 200: + if info["status"] != 200: raise ModuleHelperException(f"Could not get balancer_member_page, check for connectivity! {info}") try: @@ -274,36 +276,37 @@ def get_member_attributes(self): except TypeError as exc: raise ModuleHelperException(f"Cannot parse balancer_member_page HTML! {exc}") from exc - subsoup = find_all(find_all(soup, 'table')[1], 'tr') - keys = find_all(subsoup[0], 'th') + subsoup = find_all(find_all(soup, "table")[1], "tr") + keys = find_all(subsoup[0], "th") for valuesset in subsoup[1::1]: if re.search(pattern=self.host, string=str(valuesset)): - values = find_all(valuesset, 'td') + values = find_all(valuesset, "td") return {keys[x].string: values[x].string for x in range(0, len(keys))} def get_member_status(self): - """ Returns a dictionary of a balancer member's status attributes.""" - status_mapping = {'disabled': 'Dis', - 'drained': 'Drn', - 'hot_standby': 'Stby', - 'ignore_errors': 'Ign'} - actual_status = self.attributes['Status'] + """Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {"disabled": "Dis", "drained": "Drn", "hot_standby": "Stby", "ignore_errors": "Ign"} + actual_status = self.attributes["Status"] status = {mode: patt in actual_status for mode, patt in status_mapping.items()} return status def set_member_status(self, values): - """ Sets a balancer member's status attributes amongst pre-mapped values.""" - values_mapping = {'disabled': '&w_status_D', - 'drained': '&w_status_N', - 'hot_standby': '&w_status_H', - 'ignore_errors': '&w_status_I'} + """Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = { + "disabled": "&w_status_D", + "drained": "&w_status_N", + "hot_standby": "&w_status_H", + "ignore_errors": "&w_status_I", + } request_body = regexp_extraction(self.management_url, EXPRESSION, 1) values_url = "".join(f"{url_param}={1 if values[mode] else 0}" for mode, url_param in values_mapping.items()) request_body = f"{request_body}{values_url}" - response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url}) - if info['status'] != 200: + response, info = fetch_url( + self.module, self.management_url, data=request_body, headers={"Referer": self.management_url} + ) + if info["status"] != 200: raise ModuleHelperException(f"Could not set the member status! {self.host} {info['status']}") attributes = property(get_member_attributes) @@ -318,24 +321,24 @@ def as_dict(self): "path": self.path, "attributes": self.attributes, "management_url": self.management_url, - "balancer_url": self.balancer_url + "balancer_url": self.balancer_url, } class Balancer: - """ Apache httpd 2.4 mod_proxy balancer object""" + """Apache httpd 2.4 mod_proxy balancer object""" def __init__(self, module, host, suffix, tls=False): proto = "https" if tls else "http" - self.base_url = f'{proto}://{host}' - self.url = f'{proto}://{host}{suffix}' + self.base_url = f"{proto}://{host}" + self.url = f"{proto}://{host}{suffix}" self.module = module self.page = self.fetch_balancer_page() def fetch_balancer_page(self): - """ Returns the balancer management html page as a string for later parsing.""" + """Returns the balancer management html page as a string for later parsing.""" resp, info = fetch_url(self.module, self.url) - if info['status'] != 200: + if info["status"] != 200: raise ModuleHelperException(f"Could not get balancer page! HTTP status response: {info['status']}") content = to_text(resp.read()) @@ -344,19 +347,21 @@ def fetch_balancer_page(self): raise ModuleHelperException("Could not get the Apache server version from the balancer-manager") if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): - raise ModuleHelperException(f"This module only acts on an Apache2 2.4+ instance, current Apache2 version: {apache_version}") + raise ModuleHelperException( + f"This module only acts on an Apache2 2.4+ instance, current Apache2 version: {apache_version}" + ) return content def get_balancer_members(self): - """ Returns members of the balancer as a generator object for later iteration.""" + """Returns members of the balancer as a generator object for later iteration.""" try: soup = BeautifulSoup(self.page) except TypeError as e: raise ModuleHelperException(f"Cannot parse balancer page HTML! {self.page}") from e - elements = find_all(soup, 'a') + elements = find_all(soup, "a") for element in elements[1::1]: - balancer_member_suffix = element.get('href') + balancer_member_suffix = element.get("href") if not balancer_member_suffix: raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!") @@ -366,17 +371,22 @@ def get_balancer_members(self): class ApacheModProxy(ModuleHelper): - """ Initiates module.""" + """Initiates module.""" + module = dict( argument_spec=dict( - balancer_vhost=dict(required=True, type='str'), - balancer_url_suffix=dict(default="/balancer-manager/", type='str'), - member_host=dict(type='str'), - state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']), - tls=dict(default=False, type='bool'), - validate_certs=dict(default=True, type='bool') + balancer_vhost=dict(required=True, type="str"), + balancer_url_suffix=dict(default="/balancer-manager/", type="str"), + member_host=dict(type="str"), + state=dict( + type="list", + elements="str", + choices=["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"], + ), + tls=dict(default=False, type="bool"), + validate_certs=dict(default=True, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) def __init_module__(self): @@ -385,19 +395,21 @@ def __init_module__(self): if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state): self.do_raise(msg="states present/enabled are mutually exclusive with other states!") - self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls) + self.mybalancer = Balancer( + self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls + ) def __run__(self): if self.vars.member_host is None: self.vars.members = [member.as_dict() for member in self.mybalancer.members] else: member_exists = False - member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + member_status = {"disabled": False, "drained": False, "hot_standby": False, "ignore_errors": False} for mode in member_status: for state in self.vars.state or []: if mode == state: member_status[mode] = True - elif mode == 'disabled' and state == 'absent': + elif mode == "disabled" and state == "absent": member_status[mode] = True for member in self.mybalancer.members: @@ -409,16 +421,18 @@ def __run__(self): member_status_after = member.status = member_status else: member_status_after = member_status - self.changed |= (member_status_before != member_status_after) + self.changed |= member_status_before != member_status_after self.vars.member = member.as_dict() if not member_exists: - self.do_raise(msg=f'{self.vars.member_host} is not a member of the balancer {self.vars.balancer_vhost}!') + self.do_raise( + msg=f"{self.vars.member_host} is not a member of the balancer {self.vars.balancer_vhost}!" + ) def main(): ApacheModProxy.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py index 6e3f10203c1..c71a8a2ba47 100644 --- a/plugins/modules/apache2_module.py +++ b/plugins/modules/apache2_module.py @@ -118,7 +118,7 @@ # import module snippets from ansible.module_utils.basic import AnsibleModule -_re_threaded = re.compile(r'threaded: *yes') +_re_threaded = re.compile(r"threaded: *yes") def _run_threaded(module): @@ -129,7 +129,7 @@ def _run_threaded(module): def _get_ctl_binary(module): - for command in ['apache2ctl', 'apachectl']: + for command in ["apache2ctl", "apachectl"]: ctl_binary = module.get_bin_path(command) if ctl_binary is not None: return ctl_binary @@ -143,9 +143,9 @@ def _module_is_enabled(module): if result != 0: error_msg = f"Error executing {control_binary}: {stderr}" - if module.params['ignore_configcheck']: - if 'AH00534' in stderr and 'mpm_' in module.params['name']: - if module.params['warn_mpm_absent']: + if module.params["ignore_configcheck"]: + if "AH00534" in stderr and "mpm_" in module.params["name"]: + if module.params["warn_mpm_absent"]: module.warn( "No MPM module loaded! apache2 reload AND other module actions" " will fail if no MPM module is loaded immediately." @@ -169,15 +169,15 @@ def create_apache_identifier(name): # a2enmod name replacement to apache2ctl -M names text_workarounds = [ - ('shib', 'mod_shib'), - ('shib2', 'mod_shib'), - ('evasive', 'evasive20_module'), + ("shib", "mod_shib"), + ("shib2", "mod_shib"), + ("evasive", "evasive20_module"), ] # re expressions to extract subparts of names re_workarounds = [ - ('php8', re.compile(r'^(php)[\d\.]+')), - ('php', re.compile(r'^(php\d)\.')), + ("php8", re.compile(r"^(php)[\d\.]+")), + ("php", re.compile(r"^(php\d)\.")), ] for a2enmod_spelling, module_name in text_workarounds: @@ -196,12 +196,12 @@ def create_apache_identifier(name): def _set_state(module, state): - name = module.params['name'] - force = module.params['force'] + name = module.params["name"] + force = module.params["force"] - want_enabled = state == 'present' - state_string = {'present': 'enabled', 'absent': 'disabled'}[state] - a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] + want_enabled = state == "present" + state_string = {"present": "enabled", "absent": "disabled"}[state] + a2mod_binary = {"present": "a2enmod", "absent": "a2dismod"}[state] success_msg = f"Module {name} {state_string}" if _module_is_enabled(module) != want_enabled: @@ -210,13 +210,15 @@ def _set_state(module, state): a2mod_binary_path = module.get_bin_path(a2mod_binary) if a2mod_binary_path is None: - module.fail_json(msg=f"{a2mod_binary} not found. Perhaps this system does not use {a2mod_binary} to manage apache") + module.fail_json( + msg=f"{a2mod_binary} not found. Perhaps this system does not use {a2mod_binary} to manage apache" + ) a2mod_binary_cmd = [a2mod_binary_path] if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary_cmd.append('-f') + a2mod_binary_cmd.append("-f") result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) @@ -224,15 +226,12 @@ def _set_state(module, state): module.exit_json(changed=True, result=success_msg) else: msg = ( - f'Failed to set module {name} to {state_string}:\n' - f'{stdout}\n' - f'Maybe the module identifier ({module.params["identifier"]}) was guessed incorrectly.' + f"Failed to set module {name} to {state_string}:\n" + f"{stdout}\n" + f"Maybe the module identifier ({module.params['identifier']}) was guessed incorrectly." 'Consider setting the "identifier" option.' ) - module.fail_json(msg=msg, - rc=result, - stdout=stdout, - stderr=stderr) + module.fail_json(msg=msg, rc=result, stdout=stdout, stderr=stderr) else: module.exit_json(changed=False, result=success_msg) @@ -241,25 +240,25 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - identifier=dict(type='str'), - force=dict(type='bool', default=False), - state=dict(default='present', choices=['absent', 'present']), - ignore_configcheck=dict(type='bool', default=False), - warn_mpm_absent=dict(type='bool', default=True), + identifier=dict(type="str"), + force=dict(type="bool", default=False), + state=dict(default="present", choices=["absent", "present"]), + ignore_configcheck=dict(type="bool", default=False), + warn_mpm_absent=dict(type="bool", default=True), ), supports_check_mode=True, ) - name = module.params['name'] - if name == 'cgi' and module.params['state'] == 'present' and _run_threaded(module): + name = module.params["name"] + if name == "cgi" and module.params["state"] == "present" and _run_threaded(module): module.fail_json(msg="Your MPM seems to be threaded, therefore enabling cgi module is not allowed.") - if not module.params['identifier']: - module.params['identifier'] = create_apache_identifier(module.params['name']) + if not module.params["identifier"]: + module.params["identifier"] = create_apache_identifier(module.params["name"]) - if module.params['state'] in ['present', 'absent']: - _set_state(module, module.params['state']) + if module.params["state"] in ["present", "absent"]: + _set_state(module, module.params["state"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py index b65326094be..19fd3c694e4 100644 --- a/plugins/modules/apk.py +++ b/plugins/modules/apk.py @@ -166,14 +166,15 @@ """ import re + # Import module snippets. from ansible.module_utils.basic import AnsibleModule def parse_for_packages(stdout): packages = [] - data = stdout.split('\n') - regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)') + data = stdout.split("\n") + regex = re.compile(r"^\(\d+/\d+\)\s+\S+\s+(\S+)") for l in data: p = regex.search(l) if p: @@ -187,7 +188,7 @@ def update_package_db(module, exit): if rc != 0: module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) elif exit: - module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr) + module.exit_json(changed=True, msg="updated repository indexes", stdout=stdout, stderr=stderr) else: return True @@ -253,8 +254,10 @@ def upgrade_packages(module, available): packagelist = parse_for_packages(stdout) if rc != 0: module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist) - if re.search(r'^OK', stdout): - module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist) + if re.search(r"^OK", stdout): + module.exit_json( + changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist + ) module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) @@ -268,12 +271,12 @@ def install_packages(module, names, state, world): # Get virtual package dependencies dependencies = get_dependencies(module, name) for dependency in dependencies: - if state == 'latest' and not query_latest(module, dependency): + if state == "latest" and not query_latest(module, dependency): to_upgrade.append(dependency) else: if not query_toplevel(module, name, world): to_install.append(name) - elif state == 'latest' and not query_latest(module, name): + elif state == "latest" and not query_latest(module, name): to_upgrade.append(name) if to_upgrade: upgrade = True @@ -294,7 +297,9 @@ def install_packages(module, names, state, world): packagelist = parse_for_packages(stdout) if rc != 0: module.fail_json(msg=f"failed to install {packages}", stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg=f"installed {packages} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json( + changed=True, msg=f"installed {packages} package(s)", stdout=stdout, stderr=stderr, packages=packagelist + ) def remove_packages(module, names): @@ -318,7 +323,10 @@ def remove_packages(module, names): break if rc != 0: module.fail_json(msg=f"failed to remove {names} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg=f"removed {names} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json( + changed=True, msg=f"removed {names} package(s)", stdout=stdout, stderr=stderr, packages=packagelist + ) + # ========================================== # Main control flow. @@ -327,56 +335,56 @@ def remove_packages(module, names): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), - name=dict(type='list', elements='str'), - no_cache=dict(default=False, type='bool'), - repository=dict(type='list', elements='str'), - update_cache=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - available=dict(default=False, type='bool'), - world=dict(default='/etc/apk/world', type='str'), + state=dict(default="present", choices=["present", "installed", "absent", "removed", "latest"]), + name=dict(type="list", elements="str"), + no_cache=dict(default=False, type="bool"), + repository=dict(type="list", elements="str"), + update_cache=dict(default=False, type="bool"), + upgrade=dict(default=False, type="bool"), + available=dict(default=False, type="bool"), + world=dict(default="/etc/apk/world", type="str"), ), - required_one_of=[['name', 'update_cache', 'upgrade']], - mutually_exclusive=[['name', 'upgrade']], - supports_check_mode=True + required_one_of=[["name", "update_cache", "upgrade"]], + mutually_exclusive=[["name", "upgrade"]], + supports_check_mode=True, ) # Set LANG env since we parse stdout - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") global APK_PATH - APK_PATH = [module.get_bin_path('apk', required=True)] + APK_PATH = [module.get_bin_path("apk", required=True)] p = module.params - if p['name'] and any(not name.strip() for name in p['name']): + if p["name"] and any(not name.strip() for name in p["name"]): module.fail_json(msg="Package name(s) cannot be empty or whitespace-only") - if p['no_cache']: + if p["no_cache"]: APK_PATH.append("--no-cache") # add repositories to the APK_PATH - if p['repository']: - for r in p['repository']: + if p["repository"]: + for r in p["repository"]: APK_PATH.extend(["--repository", r, "--repositories-file", "/dev/null"]) # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - if p['state'] in ['absent', 'removed']: - p['state'] = 'absent' + if p["state"] in ["present", "installed"]: + p["state"] = "present" + if p["state"] in ["absent", "removed"]: + p["state"] = "absent" - if p['update_cache']: - update_package_db(module, not p['name'] and not p['upgrade']) + if p["update_cache"]: + update_package_db(module, not p["name"] and not p["upgrade"]) - if p['upgrade']: - upgrade_packages(module, p['available']) + if p["upgrade"]: + upgrade_packages(module, p["available"]) - if p['state'] in ['present', 'latest']: - install_packages(module, p['name'], p['state'], p['world']) - elif p['state'] == 'absent': - remove_packages(module, p['name']) + if p["state"] in ["present", "latest"]: + install_packages(module, p["name"], p["state"], p["world"]) + elif p["state"] == "absent": + remove_packages(module, p["name"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/apt_repo.py b/plugins/modules/apt_repo.py index f90631195d0..c1ce5b3db4b 100644 --- a/plugins/modules/apt_repo.py +++ b/plugins/modules/apt_repo.py @@ -92,54 +92,54 @@ def apt_repo(module, *args): def add_repo(module, repo): """add a repository""" - apt_repo(module, 'add', repo) + apt_repo(module, "add", repo) def rm_repo(module, repo): """remove a repository""" - apt_repo(module, 'rm', repo) + apt_repo(module, "rm", repo) def set_repo(module, repo): """add a repository and remove other repositories""" # first add to validate repository - apt_repo(module, 'add', repo) - apt_repo(module, 'rm', 'all') - apt_repo(module, 'add', repo) + apt_repo(module, "add", repo) + apt_repo(module, "rm", "all") + apt_repo(module, "add", repo) def update(module): """update package cache""" - apt_repo(module, 'update') + apt_repo(module, "update") def main(): module = AnsibleModule( argument_spec=dict( - repo=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - remove_others=dict(type='bool', default=False), - update=dict(type='bool', default=False), + repo=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + remove_others=dict(type="bool", default=False), + update=dict(type="bool", default=False), ), ) if not os.path.exists(APT_REPO_PATH): - module.fail_json(msg='cannot find /usr/bin/apt-repo') + module.fail_json(msg="cannot find /usr/bin/apt-repo") params = module.params - repo = params['repo'] - state = params['state'] + repo = params["repo"] + state = params["state"] old_repositories = apt_repo(module) - if state == 'present': - if params['remove_others']: + if state == "present": + if params["remove_others"]: set_repo(module, repo) else: add_repo(module, repo) - elif state == 'absent': + elif state == "absent": rm_repo(module, repo) - if params['update']: + if params["update"]: update(module) new_repositories = apt_repo(module) @@ -147,5 +147,5 @@ def main(): module.exit_json(changed=changed, repo=repo, state=state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index 2b49f845f70..fd370fbba48 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -189,11 +189,11 @@ def check_package_version(module, name): def query_package_provides(module, name, allow_upgrade=False): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - if name.endswith('.rpm'): + if name.endswith(".rpm"): # Likely a local RPM file if not HAS_RPM_PYTHON: module.fail_json( - msg=missing_required_lib('rpm'), + msg=missing_required_lib("rpm"), exception=RPM_PYTHON_IMPORT_ERROR, ) @@ -238,7 +238,6 @@ def update_kernel(module): def remove_packages(module, packages): - if packages is None: return (False, "Empty package list") @@ -263,7 +262,6 @@ def remove_packages(module, packages): def install_packages(module, pkgspec, allow_upgrade=False): - if pkgspec is None: return (False, "Empty package list") @@ -293,12 +291,16 @@ def install_packages(module, pkgspec, allow_upgrade=False): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']), - update_cache=dict(type='bool', default=False), - clean=dict(type='bool', default=False), - dist_upgrade=dict(type='bool', default=False), - update_kernel=dict(type='bool', default=False), - package=dict(type='list', elements='str', aliases=['name', 'pkg']), + state=dict( + type="str", + default="present", + choices=["absent", "installed", "present", "removed", "present_not_latest", "latest"], + ), + update_cache=dict(type="bool", default=False), + clean=dict(type="bool", default=False), + dist_upgrade=dict(type="bool", default=False), + update_kernel=dict(type="bool", default=False), + package=dict(type="list", elements="str", aliases=["name", "pkg"]), ), ) @@ -310,30 +312,30 @@ def main(): modified = False output = "" - if p['update_cache']: + if p["update_cache"]: update_package_db(module) - if p['clean']: + if p["clean"]: (m, out) = clean(module) modified = modified or m - if p['dist_upgrade']: + if p["dist_upgrade"]: (m, out) = dist_upgrade(module) modified = modified or m output += out - if p['update_kernel']: + if p["update_kernel"]: (m, out) = update_kernel(module) modified = modified or m output += out - packages = p['package'] - if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: - (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest') + packages = p["package"] + if p["state"] in ["installed", "present", "present_not_latest", "latest"]: + (m, out) = install_packages(module, packages, allow_upgrade=p["state"] == "latest") modified = modified or m output += out - if p['state'] in ['absent', 'removed']: + if p["state"] in ["absent", "removed"]: (m, out) = remove_packages(module, packages) modified = modified or m output += out @@ -342,5 +344,5 @@ def main(): module.exit_json(changed=modified, msg=output) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py index f931a925250..8161f441303 100644 --- a/plugins/modules/archive.py +++ b/plugins/modules/archive.py @@ -194,14 +194,14 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native -STATE_ABSENT = 'absent' -STATE_ARCHIVED = 'archive' -STATE_COMPRESSED = 'compress' -STATE_INCOMPLETE = 'incomplete' +STATE_ABSENT = "absent" +STATE_ARCHIVED = "archive" +STATE_COMPRESSED = "compress" +STATE_INCOMPLETE = "incomplete" def common_path(paths): - empty = b'' if paths and isinstance(paths[0], bytes) else '' + empty = b"" if paths and isinstance(paths[0], bytes) else "" return os.path.join( os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty @@ -213,7 +213,7 @@ def expand_paths(paths): is_globby = False for path in paths: b_path = _to_bytes(path) - if b'*' in b_path or b'?' in b_path: + if b"*" in b_path or b"?" in b_path: e_paths = glob.glob(b_path) is_globby = True else: @@ -227,34 +227,34 @@ def matches_exclusion_patterns(path, exclusion_patterns): def is_archive(path): - return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + return re.search(rb"\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$", os.path.basename(path), re.IGNORECASE) def strip_prefix(prefix, string): - return string[len(prefix):] if string.startswith(prefix) else string + return string[len(prefix) :] if string.startswith(prefix) else string def _to_bytes(s): - return to_bytes(s, errors='surrogate_or_strict') + return to_bytes(s, errors="surrogate_or_strict") def _to_native(s): - return to_native(s, errors='surrogate_or_strict') + return to_native(s, errors="surrogate_or_strict") def _to_native_ascii(s): - return to_native(s, errors='surrogate_or_strict', encoding='ascii') + return to_native(s, errors="surrogate_or_strict", encoding="ascii") class Archive(metaclass=abc.ABCMeta): def __init__(self, module): self.module = module - self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None - self.exclusion_patterns = module.params['exclusion_patterns'] or [] - self.format = module.params['format'] - self.must_archive = module.params['force_archive'] - self.remove = module.params['remove'] + self.destination = _to_bytes(module.params["dest"]) if module.params["dest"] else None + self.exclusion_patterns = module.params["exclusion_patterns"] or [] + self.format = module.params["format"] + self.must_archive = module.params["force_archive"] + self.remove = module.params["remove"] self.changed = False self.destination_state = STATE_ABSENT @@ -264,18 +264,18 @@ def __init__(self, module): self.targets = [] self.not_found = [] - paths = module.params['path'] + paths = module.params["path"] self.expanded_paths, has_globs = expand_paths(paths) - self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] + self.expanded_exclude_paths = expand_paths(module.params["exclude_path"])[0] self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) if not self.paths: module.fail_json( - path=', '.join(paths), - expanded_paths=_to_native(b', '.join(self.expanded_paths)), - expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), - msg='Error, no source paths were found' + path=", ".join(paths), + expanded_paths=_to_native(b", ".join(self.expanded_paths)), + expanded_exclude_paths=_to_native(b", ".join(self.expanded_exclude_paths)), + msg="Error, no source paths were found", ) self.root = common_path(self.paths) @@ -284,13 +284,13 @@ def __init__(self, module): self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) if not self.destination and not self.must_archive: - self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) + self.destination = b"%s.%s" % (self.paths[0], _to_bytes(self.format)) if self.must_archive and not self.destination: module.fail_json( dest=_to_native(self.destination), - path=', '.join(paths), - msg='Error, must specify "dest" when archiving multiple files or trees' + path=", ".join(paths), + msg='Error, must specify "dest" when archiving multiple files or trees', ) if self.remove: @@ -305,18 +305,18 @@ def add(self, path, archive_name): if self.contains(_to_native(archive_name)): self.successes.append(path) except Exception as e: - self.errors.append(f'{_to_native_ascii(path)}: {e}') + self.errors.append(f"{_to_native_ascii(path)}: {e}") def add_single_target(self, path): - if self.format in ('zip', 'tar'): + if self.format in ("zip", "tar"): self.open() self.add(path, strip_prefix(self.root, path)) self.close() self.destination_state = STATE_ARCHIVED else: try: - f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') - with open(path, 'rb') as f_in: + f_out = self._open_compressed_file(_to_native_ascii(self.destination), "wb") + with open(path, "rb") as f_in: shutil.copyfileobj(f_in, f_out) f_out.close() self.successes.append(path) @@ -325,7 +325,8 @@ def add_single_target(self, path): self.module.fail_json( path=_to_native(path), dest=_to_native(self.destination), - msg=f'Unable to write to compressed file: {e}', exception=format_exc() + msg=f"Unable to write to compressed file: {e}", + exception=format_exc(), ) def add_targets(self): @@ -344,13 +345,13 @@ def add_targets(self): else: self.add(target, strip_prefix(self.root, target)) except Exception as e: - if self.format in ('zip', 'tar'): + if self.format in ("zip", "tar"): archive_format = self.format else: archive_format = f"tar.{self.format}" self.module.fail_json( - msg=f'Error when writing {archive_format} archive at {_to_native(self.destination)}: {e}', - exception=format_exc() + msg=f"Error when writing {archive_format} archive at {_to_native(self.destination)}: {e}", + exception=format_exc(), ) self.close() @@ -397,8 +398,7 @@ def remove_single_target(self, path): os.remove(path) except OSError as e: self.module.fail_json( - path=_to_native(path), - msg=f'Unable to remove source file: {e}', exception=format_exc() + path=_to_native(path), msg=f"Unable to remove source file: {e}", exception=format_exc() ) def remove_targets(self): @@ -420,7 +420,7 @@ def remove_targets(self): if self.errors: self.module.fail_json( - dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors + dest=_to_native(self.destination), msg="Error deleting some source files: ", files=self.errors ) def update_permissions(self): @@ -430,31 +430,31 @@ def update_permissions(self): @property def result(self): return { - 'archived': [_to_native(p) for p in self.successes], - 'dest': _to_native(self.destination), - 'dest_state': self.destination_state, - 'changed': self.changed, - 'arcroot': _to_native(self.root), - 'missing': [_to_native(p) for p in self.not_found], - 'expanded_paths': [_to_native(p) for p in self.expanded_paths], - 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], + "archived": [_to_native(p) for p in self.successes], + "dest": _to_native(self.destination), + "dest_state": self.destination_state, + "changed": self.changed, + "arcroot": _to_native(self.root), + "missing": [_to_native(p) for p in self.not_found], + "expanded_paths": [_to_native(p) for p in self.expanded_paths], + "expanded_exclude_paths": [_to_native(p) for p in self.expanded_exclude_paths], } def _check_removal_safety(self): for path in self.paths: - if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): + if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b"")): self.module.fail_json( - path=b', '.join(self.paths), - msg='Error, created archive can not be contained in source paths when remove=true' + path=b", ".join(self.paths), + msg="Error, created archive can not be contained in source paths when remove=true", ) def _open_compressed_file(self, path, mode): f = None - if self.format == 'gz': + if self.format == "gz": f = gzip.open(path, mode) - elif self.format == 'bz2': + elif self.format == "bz2": f = bz2.BZ2File(path, mode) - elif self.format == 'xz': + elif self.format == "xz": f = lzma.LZMAFile(path, mode) else: self.module.fail_json(msg=f"{self.format} is not a valid format") @@ -497,7 +497,7 @@ def contains(self, name): return True def open(self): - self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) + self.file = zipfile.ZipFile(_to_native_ascii(self.destination), "w", zipfile.ZIP_DEFLATED, True) def _add(self, path, archive_name): if not matches_exclusion_patterns(path, self.exclusion_patterns): @@ -505,7 +505,7 @@ def _add(self, path, archive_name): def _get_checksums(self, path): try: - archive = zipfile.ZipFile(_to_native_ascii(path), 'r') + archive = zipfile.ZipFile(_to_native_ascii(path), "r") checksums = set((info.filename, info.CRC) for info in archive.infolist()) archive.close() except BadZipFile: @@ -520,8 +520,8 @@ def __init__(self, module): def close(self): self.file.close() - if self.format == 'xz': - with lzma.open(_to_native(self.destination), 'wb') as f: + if self.format == "xz": + with lzma.open(_to_native(self.destination), "wb") as f: f.write(self.fileIO.getvalue()) self.fileIO.close() @@ -533,15 +533,15 @@ def contains(self, name): return True def open(self): - if self.format in ('gz', 'bz2'): + if self.format in ("gz", "bz2"): self.file = tarfile.open(_to_native_ascii(self.destination), f"w|{self.format}") # python3 tarfile module allows xz format but for python2 we have to create the tarfile # in memory and then compress it with lzma. - elif self.format == 'xz': + elif self.format == "xz": self.fileIO = io.BytesIO() - self.file = tarfile.open(fileobj=self.fileIO, mode='w') - elif self.format == 'tar': - self.file = tarfile.open(_to_native_ascii(self.destination), 'w') + self.file = tarfile.open(fileobj=self.fileIO, mode="w") + elif self.format == "tar": + self.file = tarfile.open(_to_native_ascii(self.destination), "w") else: self.module.fail_json(msg=f"{self.format} is not a valid archive format") @@ -555,8 +555,8 @@ def _get_checksums(self, path): LZMAError = lzma.LZMAError try: - if self.format == 'xz': - with lzma.open(_to_native_ascii(path), 'r') as f: + if self.format == "xz": + with lzma.open(_to_native_ascii(path), "r") as f: archive = tarfile.open(fileobj=f) checksums = set((info.name, info.chksum) for info in archive.getmembers()) archive.close() @@ -568,14 +568,14 @@ def _get_checksums(self, path): try: # The python implementations of gzip, bz2, and lzma do not support restoring compressed files # to their original names so only file checksum is returned - f = self._open_compressed_file(_to_native_ascii(path), 'r') + f = self._open_compressed_file(_to_native_ascii(path), "r") checksum = 0 while True: chunk = f.read(16 * 1024 * 1024) if not chunk: break checksum = crc32(chunk, checksum) - checksums = set([(b'', checksum)]) + checksums = set([(b"", checksum)]) f.close() except Exception: checksums = set() @@ -583,7 +583,7 @@ def _get_checksums(self, path): def get_archive(module): - if module.params['format'] == 'zip': + if module.params["format"] == "zip": return ZipArchive(module) else: return TarArchive(module) @@ -592,13 +592,13 @@ def get_archive(module): def main(): module = AnsibleModule( argument_spec=dict( - path=dict(type='list', elements='path', required=True), - format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), - dest=dict(type='path'), - exclude_path=dict(type='list', elements='path', default=[]), - exclusion_patterns=dict(type='list', elements='path'), - force_archive=dict(type='bool', default=False), - remove=dict(type='bool', default=False), + path=dict(type="list", elements="path", required=True), + format=dict(type="str", default="gz", choices=["bz2", "gz", "tar", "xz", "zip"]), + dest=dict(type="path"), + exclude_path=dict(type="list", elements="path", default=[]), + exclusion_patterns=dict(type="list", elements="path"), + force_archive=dict(type="bool", default=False), + remove=dict(type="bool", default=False), ), add_file_common_args=True, supports_check_mode=True, @@ -638,5 +638,5 @@ def main(): module.exit_json(**archive.result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py index 9051705f120..c22377c45ea 100644 --- a/plugins/modules/atomic_container.py +++ b/plugins/modules/atomic_container.py @@ -107,11 +107,18 @@ def do_install(module, mode, rootfs, container, image, values_list, backend): - system_list = ["--system"] if mode == 'system' else [] - user_list = ["--user"] if mode == 'user' else [] + system_list = ["--system"] if mode == "system" else [] + user_list = ["--user"] if mode == "user" else [] rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] + atomic_bin = module.get_bin_path("atomic") + args = ( + [atomic_bin, "install", "--storage=%s" % backend, "--name=%s" % container] + + system_list + + user_list + + rootfs_list + + values_list + + [image] + ) rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -121,8 +128,8 @@ def do_install(module, mode, rootfs, container, image, values_list, backend): def do_update(module, container, image, values_list): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] + atomic_bin = module.get_bin_path("atomic") + args = [atomic_bin, "containers", "update", "--rebase=%s" % image] + values_list + [container] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -132,8 +139,8 @@ def do_update(module, container, image, values_list): def do_uninstall(module, name, backend): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] + atomic_bin = module.get_bin_path("atomic") + args = [atomic_bin, "uninstall", "--storage=%s" % backend, name] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -141,8 +148,8 @@ def do_uninstall(module, name, backend): def do_rollback(module, name): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'containers', 'rollback', name] + atomic_bin = module.get_bin_path("atomic") + args = [atomic_bin, "containers", "rollback", name] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -152,65 +159,76 @@ def do_rollback(module, name): def core(module): - mode = module.params['mode'] - name = module.params['name'] - image = module.params['image'] - rootfs = module.params['rootfs'] - values = module.params['values'] - backend = module.params['backend'] - state = module.params['state'] + mode = module.params["mode"] + name = module.params["name"] + image = module.params["image"] + rootfs = module.params["rootfs"] + values = module.params["values"] + backend = module.params["backend"] + state = module.params["state"] - atomic_bin = module.get_bin_path('atomic') - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + atomic_bin = module.get_bin_path("atomic") + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") values_list = ["--set=%s" % x for x in values] if values else [] - args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] + args = [ + atomic_bin, + "containers", + "list", + "--no-trunc", + "-n", + "--all", + "-f", + "backend=%s" % backend, + "-f", + "container=%s" % name, + ] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) return present = name in out - if state == 'present' and present: + if state == "present" and present: module.exit_json(msg=out, changed=False) - elif (state in ['latest', 'present']) and not present: + elif (state in ["latest", "present"]) and not present: do_install(module, mode, rootfs, name, image, values_list, backend) - elif state == 'latest': + elif state == "latest": do_update(module, name, image, values_list) - elif state == 'absent': + elif state == "absent": if not present: module.exit_json(msg="The container is not present", changed=False) else: do_uninstall(module, name, backend) - elif state == 'rollback': + elif state == "rollback": do_rollback(module, name) def main(): module = AnsibleModule( argument_spec=dict( - mode=dict(choices=['user', 'system']), + mode=dict(choices=["user", "system"]), name=dict(required=True), image=dict(required=True), rootfs=dict(), - state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), - backend=dict(required=True, choices=['docker', 'ostree']), - values=dict(type='list', default=[], elements='str'), + state=dict(default="latest", choices=["present", "absent", "latest", "rollback"]), + backend=dict(required=True, choices=["docker", "ostree"]), + values=dict(type="list", default=[], elements="str"), ), ) - if module.params['values'] is not None and module.params['mode'] == 'default': + if module.params["values"] is not None and module.params["mode"] == "default": module.fail_json(msg="values is supported only with user or system mode") # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) + dummy = module.get_bin_path("atomic", required=True) try: core(module) except Exception as e: - module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) + module.fail_json(msg="Unanticipated error running atomic: %s" % to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py index 470e65c919c..eabdc80de73 100644 --- a/plugins/modules/atomic_host.py +++ b/plugins/modules/atomic_host.py @@ -65,19 +65,19 @@ def core(module): - revision = module.params['revision'] - atomic_bin = module.get_bin_path('atomic', required=True) + revision = module.params["revision"] + atomic_bin = module.get_bin_path("atomic", required=True) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") - if revision == 'latest': - args = [atomic_bin, 'host', 'upgrade'] + if revision == "latest": + args = [atomic_bin, "host", "upgrade"] else: - args = [atomic_bin, 'host', 'deploy', revision] + args = [atomic_bin, "host", "deploy", revision] rc, out, err = module.run_command(args, check_rc=False) - if rc == 77 and revision == 'latest': + if rc == 77 and revision == "latest": module.exit_json(msg="Already on latest", changed=False) elif rc != 0: module.fail_json(rc=rc, msg=err) @@ -88,7 +88,7 @@ def core(module): def main(): module = AnsibleModule( argument_spec=dict( - revision=dict(type='str', default='latest', aliases=["version"]), + revision=dict(type="str", default="latest", aliases=["version"]), ), ) @@ -102,5 +102,5 @@ def main(): module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py index 0c3025b75fd..8b31d1eae95 100644 --- a/plugins/modules/atomic_image.py +++ b/plugins/modules/atomic_image.py @@ -82,48 +82,48 @@ def do_upgrade(module, image): - atomic_bin = module.get_bin_path('atomic') - args = [atomic_bin, 'update', '--force', image] + atomic_bin = module.get_bin_path("atomic") + args = [atomic_bin, "update", "--force", image] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=err) - elif 'Image is up to date' in out: + elif "Image is up to date" in out: return False return True def core(module): - image = module.params['name'] - state = module.params['state'] - started = module.params['started'] - backend = module.params['backend'] + image = module.params["name"] + state = module.params["state"] + started = module.params["started"] + backend = module.params["backend"] is_upgraded = False - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - atomic_bin = module.get_bin_path('atomic') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") + atomic_bin = module.get_bin_path("atomic") out = {} err = {} rc = 0 if backend: - if state == 'present' or state == 'latest': - args = [atomic_bin, 'pull', "--storage=%s" % backend, image] + if state == "present" or state == "latest": + args = [atomic_bin, "pull", "--storage=%s" % backend, image] rc, out, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) else: out_run = "" if started: - args = [atomic_bin, 'run', "--storage=%s" % backend, image] + args = [atomic_bin, "run", "--storage=%s" % backend, image] rc, out_run, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) changed = "Extracting" in out or "Copying blob" in out module.exit_json(msg=(out + out_run), changed=changed) - elif state == 'absent': - args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] + elif state == "absent": + args = [atomic_bin, "images", "delete", "--storage=%s" % backend, image] rc, out, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) @@ -132,24 +132,24 @@ def core(module): module.exit_json(msg=out, changed=changed) return - if state == 'present' or state == 'latest': - if state == 'latest': + if state == "present" or state == "latest": + if state == "latest": is_upgraded = do_upgrade(module, image) if started: - args = [atomic_bin, 'run', image] + args = [atomic_bin, "run", image] else: - args = [atomic_bin, 'install', image] - elif state == 'absent': - args = [atomic_bin, 'uninstall', image] + args = [atomic_bin, "install", image] + elif state == "absent": + args = [atomic_bin, "uninstall", image] rc, out, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) - elif rc == 1 and 'already present' in err: + elif rc == 1 and "already present" in err: module.exit_json(restult=err, changed=is_upgraded) - elif started and 'Container is running' in out: + elif started and "Container is running" in out: module.exit_json(result=out, changed=is_upgraded) else: module.exit_json(msg=out, changed=True) @@ -158,15 +158,15 @@ def core(module): def main(): module = AnsibleModule( argument_spec=dict( - backend=dict(type='str', choices=['docker', 'ostree']), - name=dict(type='str', required=True), - state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), - started=dict(type='bool', default=True), + backend=dict(type="str", choices=["docker", "ostree"]), + name=dict(type="str", required=True), + state=dict(type="str", default="latest", choices=["absent", "latest", "present"]), + started=dict(type="bool", default=True), ), ) # Verify that the platform supports atomic command - dummy = module.get_bin_path('atomic', required=True) + dummy = module.get_bin_path("atomic", required=True) try: core(module) @@ -174,5 +174,5 @@ def main(): module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py index 6e5b5b17750..c90e00990c3 100644 --- a/plugins/modules/awall.py +++ b/plugins/modules/awall.py @@ -130,26 +130,26 @@ def disable_policy(module, names, act): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), - name=dict(type='list', elements='str'), - activate=dict(type='bool', default=False), + state=dict(type="str", default="enabled", choices=["disabled", "enabled"]), + name=dict(type="list", elements="str"), + activate=dict(type="bool", default=False), ), - required_one_of=[['name', 'activate']], + required_one_of=[["name", "activate"]], supports_check_mode=True, ) global AWALL_PATH - AWALL_PATH = module.get_bin_path('awall', required=True) + AWALL_PATH = module.get_bin_path("awall", required=True) p = module.params - if p['name']: - if p['state'] == 'enabled': - enable_policy(module, p['name'], p['activate']) - elif p['state'] == 'disabled': - disable_policy(module, p['name'], p['activate']) + if p["name"]: + if p["state"] == "enabled": + enable_policy(module, p["name"], p["activate"]) + elif p["state"] == "disabled": + disable_policy(module, p["name"], p["activate"]) - if p['activate']: + if p["activate"]: if not module.check_mode: activate(module) module.exit_json(changed=True, msg="activated awall rules") @@ -157,5 +157,5 @@ def main(): module.fail_json(msg="no action defined") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py index 91128ff686f..247c744c5e8 100644 --- a/plugins/modules/beadm.py +++ b/plugins/modules/beadm.py @@ -146,36 +146,36 @@ class BE: def __init__(self, module): self.module = module - self.name = module.params['name'] - self.snapshot = module.params['snapshot'] - self.description = module.params['description'] - self.options = module.params['options'] - self.mountpoint = module.params['mountpoint'] - self.state = module.params['state'] - self.force = module.params['force'] - self.is_freebsd = os.uname()[0] == 'FreeBSD' + self.name = module.params["name"] + self.snapshot = module.params["snapshot"] + self.description = module.params["description"] + self.options = module.params["options"] + self.mountpoint = module.params["mountpoint"] + self.state = module.params["state"] + self.force = module.params["force"] + self.is_freebsd = os.uname()[0] == "FreeBSD" def _beadm_list(self): - cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] - if '@' in self.name: - cmd.append('-s') + cmd = [self.module.get_bin_path("beadm"), "list", "-H"] + if "@" in self.name: + cmd.append("-s") return self.module.run_command(cmd) def _find_be_by_name(self, out): - if '@' in self.name: + if "@" in self.name: for line in out.splitlines(): if self.is_freebsd: check = line.split() if check == []: continue - full_name = check[0].split('/') + full_name = check[0].split("/") if full_name == []: continue check[0] = full_name[len(full_name) - 1] if check[0] == self.name: return check else: - check = line.split(';') + check = line.split(";") if check[0] == self.name: return check else: @@ -185,7 +185,7 @@ def _find_be_by_name(self, out): if check[0] == self.name: return check else: - check = line.split(';') + check = line.split(";") if check[0] == self.name: return check return None @@ -209,35 +209,35 @@ def is_activated(self): if line is None: return False if self.is_freebsd: - if 'R' in line[1]: + if "R" in line[1]: return True else: - if 'R' in line[2]: + if "R" in line[2]: return True return False def activate_be(self): - cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] + cmd = [self.module.get_bin_path("beadm"), "activate", self.name] return self.module.run_command(cmd) def create_be(self): - cmd = [self.module.get_bin_path('beadm'), 'create'] + cmd = [self.module.get_bin_path("beadm"), "create"] if self.snapshot: - cmd.extend(['-e', self.snapshot]) + cmd.extend(["-e", self.snapshot]) if not self.is_freebsd: if self.description: - cmd.extend(['-d', self.description]) + cmd.extend(["-d", self.description]) if self.options: - cmd.extend(['-o', self.options]) + cmd.extend(["-o", self.options]) cmd.append(self.name) return self.module.run_command(cmd) def destroy_be(self): - cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] + cmd = [self.module.get_bin_path("beadm"), "destroy", "-F", self.name] return self.module.run_command(cmd) def is_mounted(self): @@ -251,7 +251,7 @@ def is_mounted(self): # On FreeBSD, we exclude currently mounted BE on /, as it is # special and can be activated even if it is mounted. That is not # possible with non-root BEs. - if line[2] != '-' and line[2] != '/': + if line[2] != "-" and line[2] != "/": return True else: if line[3]: @@ -260,7 +260,7 @@ def is_mounted(self): return False def mount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] + cmd = [self.module.get_bin_path("beadm"), "mount", self.name] if self.mountpoint: cmd.append(self.mountpoint) @@ -268,9 +268,9 @@ def mount_be(self): return self.module.run_command(cmd) def unmount_be(self): - cmd = [self.module.get_bin_path('beadm'), 'unmount'] + cmd = [self.module.get_bin_path("beadm"), "unmount"] if self.force: - cmd.append('-f') + cmd.append("-f") cmd.append(self.name) return self.module.run_command(cmd) @@ -279,13 +279,15 @@ def unmount_be(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True, aliases=['be']), - snapshot=dict(type='str'), - description=dict(type='str'), - options=dict(type='str'), - mountpoint=dict(type='path'), - state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), - force=dict(type='bool', default=False), + name=dict(type="str", required=True, aliases=["be"]), + snapshot=dict(type="str"), + description=dict(type="str"), + options=dict(type="str"), + mountpoint=dict(type="path"), + state=dict( + type="str", default="present", choices=["absent", "activated", "mounted", "present", "unmounted"] + ), + force=dict(type="bool", default=False), ), supports_check_mode=True, ) @@ -293,25 +295,25 @@ def main(): be = BE(module) rc = None - out = '' - err = '' + out = "" + err = "" result = {} - result['name'] = be.name - result['state'] = be.state + result["name"] = be.name + result["state"] = be.state if be.snapshot: - result['snapshot'] = be.snapshot + result["snapshot"] = be.snapshot if be.description: - result['description'] = be.description + result["description"] = be.description if be.options: - result['options'] = be.options + result["options"] = be.options if be.mountpoint: - result['mountpoint'] = be.mountpoint + result["mountpoint"] = be.mountpoint - if be.state == 'absent': + if be.state == "absent": # beadm on FreeBSD and Solarish systems differs in delete behaviour in # that we are not allowed to delete activated BE on FreeBSD while on # Solarish systems we cannot delete BE if it is mounted. We add mount @@ -324,19 +326,16 @@ def main(): if be.is_freebsd: if be.is_activated(): - module.fail_json(msg='Unable to remove active BE!') + module.fail_json(msg="Unable to remove active BE!") (rc, out, err) = be.destroy_be() if rc != 0: - module.fail_json(msg=f'Error while destroying BE: "{err}"', - name=be.name, - stderr=err, - rc=rc) + module.fail_json(msg=f'Error while destroying BE: "{err}"', name=be.name, stderr=err, rc=rc) else: - module.fail_json(msg='Unable to remove BE as it is mounted!') + module.fail_json(msg="Unable to remove BE as it is mounted!") - elif be.state == 'present': + elif be.state == "present": if not be.exists(): if module.check_mode: module.exit_json(changed=True) @@ -344,12 +343,9 @@ def main(): (rc, out, err) = be.create_be() if rc != 0: - module.fail_json(msg=f'Error while creating BE: "{err}"', - name=be.name, - stderr=err, - rc=rc) + module.fail_json(msg=f'Error while creating BE: "{err}"', name=be.name, stderr=err, rc=rc) - elif be.state == 'activated': + elif be.state == "activated": if not be.is_activated(): if module.check_mode: module.exit_json(changed=True) @@ -358,16 +354,13 @@ def main(): # an explicit check for that case. if be.is_freebsd: if be.is_mounted(): - module.fail_json(msg='Unable to activate mounted BE!') + module.fail_json(msg="Unable to activate mounted BE!") (rc, out, err) = be.activate_be() if rc != 0: - module.fail_json(msg=f'Error while activating BE: "{err}"', - name=be.name, - stderr=err, - rc=rc) - elif be.state == 'mounted': + module.fail_json(msg=f'Error while activating BE: "{err}"', name=be.name, stderr=err, rc=rc) + elif be.state == "mounted": if not be.is_mounted(): if module.check_mode: module.exit_json(changed=True) @@ -375,12 +368,9 @@ def main(): (rc, out, err) = be.mount_be() if rc != 0: - module.fail_json(msg=f'Error while mounting BE: "{err}"', - name=be.name, - stderr=err, - rc=rc) + module.fail_json(msg=f'Error while mounting BE: "{err}"', name=be.name, stderr=err, rc=rc) - elif be.state == 'unmounted': + elif be.state == "unmounted": if be.is_mounted(): if module.check_mode: module.exit_json(changed=True) @@ -388,23 +378,20 @@ def main(): (rc, out, err) = be.unmount_be() if rc != 0: - module.fail_json(msg=f'Error while unmounting BE: "{err}"', - name=be.name, - stderr=err, - rc=rc) + module.fail_json(msg=f'Error while unmounting BE: "{err}"', name=be.name, stderr=err, rc=rc) if rc is None: - result['changed'] = False + result["changed"] = False else: - result['changed'] = True + result["changed"] = True if out: - result['stdout'] = out + result["stdout"] = out if err: - result['stderr'] = err + result["stderr"] = err module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py index 0f8ffad456c..1762fa2d5e5 100644 --- a/plugins/modules/bigpanda.py +++ b/plugins/modules/bigpanda.py @@ -141,67 +141,66 @@ def main(): - module = AnsibleModule( argument_spec=dict( - component=dict(required=True, aliases=['name']), + component=dict(required=True, aliases=["name"]), version=dict(required=True), token=dict(required=True, no_log=True), - state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(aliases=['host']), + state=dict(required=True, choices=["started", "finished", "failed"]), + hosts=dict(aliases=["host"]), env=dict(), owner=dict(), description=dict(), deployment_message=dict(), - source_system=dict(default='ansible'), - validate_certs=dict(default=True, type='bool'), - url=dict(default='https://api.bigpanda.io'), + source_system=dict(default="ansible"), + validate_certs=dict(default=True, type="bool"), + url=dict(default="https://api.bigpanda.io"), ), supports_check_mode=True, ) - token = module.params['token'] - state = module.params['state'] - url = module.params['url'] + token = module.params["token"] + state = module.params["state"] + url = module.params["url"] # Build the common request body body = dict() - for k in ('component', 'version', 'hosts'): + for k in ("component", "version", "hosts"): v = module.params[k] if v is not None: body[k] = v - if body.get('hosts') is None: - body['hosts'] = [socket.gethostname()] + if body.get("hosts") is None: + body["hosts"] = [socket.gethostname()] - if not isinstance(body['hosts'], list): - body['hosts'] = [body['hosts']] + if not isinstance(body["hosts"], list): + body["hosts"] = [body["hosts"]] # Insert state-specific attributes to body - if state == 'started': - for k in ('source_system', 'env', 'owner', 'description'): + if state == "started": + for k in ("source_system", "env", "owner", "description"): v = module.params[k] if v is not None: body[k] = v request_url = f"{url}/data/events/deployments/start" else: - message = module.params['deployment_message'] + message = module.params["deployment_message"] if message is not None: - body['errorMessage'] = message + body["errorMessage"] = message - if state == 'finished': - body['status'] = 'success' + if state == "finished": + body["status"] = "success" else: - body['status'] = 'failure' + body["status"] = "failure" request_url = f"{url}/data/events/deployments/end" # Build the deployment object we return deployment = dict(token=token, url=url) deployment.update(body) - if 'errorMessage' in deployment: - message = deployment.pop('errorMessage') - deployment['message'] = message + if "errorMessage" in deployment: + message = deployment.pop("errorMessage") + deployment["message"] = message # If we're in check mode, just exit pretending like we succeeded if module.check_mode: @@ -209,10 +208,10 @@ def main(): # Send the data to bigpanda data = json.dumps(body) - headers = {'Authorization': f'Bearer {token}', 'Content-Type': 'application/json'} + headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} try: response, info = fetch_url(module, request_url, data=data, headers=headers) - if info['status'] == 200: + if info["status"] == 200: module.exit_json(changed=True, **deployment) else: module.fail_json(msg=json.dumps(info)) @@ -220,5 +219,5 @@ def main(): module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py index 88a95068056..8971f1dc2fd 100644 --- a/plugins/modules/bitbucket_access_key.py +++ b/plugins/modules/bitbucket_access_key.py @@ -76,15 +76,16 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { - 'required_key': '`key` is required when the `state` is `present`', - 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', - 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`', - 'invalid_key': 'Invalid SSH key or key is already in use', + "required_key": "`key` is required when the `state` is `present`", + "required_permission": "OAuth consumer `client_id` should have permissions to read and administrate the repository", + "invalid_workspace_or_repo": "Invalid `repository` or `workspace`", + "invalid_key": "Invalid SSH key or key is already in use", } BITBUCKET_API_ENDPOINTS = { - 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, - 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, + "deploy-key-list": "%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/" % BitbucketHelper.BITBUCKET_API_URL, + "deploy-key-detail": "%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}" + % BitbucketHelper.BITBUCKET_API_URL, } @@ -133,29 +134,29 @@ def get_existing_deploy_key(module, bitbucket): } """ content = { - 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + "next": BITBUCKET_API_ENDPOINTS["deploy-key-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ) } # Look through the all response pages in search of deploy key we need - while 'next' in content: + while "next" in content: info, content = bitbucket.request( - api_url=content['next'], - method='GET', + api_url=content["next"], + method="GET", ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_workspace_or_repo"]) - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) + if info["status"] == 403: + module.fail_json(msg=error_messages["required_permission"]) - if info['status'] != 200: - module.fail_json(msg=f'Failed to retrieve the list of deploy keys: {info}') + if info["status"] != 200: + module.fail_json(msg=f"Failed to retrieve the list of deploy keys: {info}") - res = next((v for v in content['values'] if v['label'] == module.params['label']), None) + res = next((v for v in content["values"] if v["label"] == module.params["label"]), None) if res is not None: return res @@ -165,60 +166,61 @@ def get_existing_deploy_key(module, bitbucket): def create_deploy_key(module, bitbucket): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["deploy-key-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ), - method='POST', + method="POST", data={ - 'key': module.params['key'], - 'label': module.params['label'], + "key": module.params["key"], + "label": module.params["label"], }, ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_workspace_or_repo"]) - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) + if info["status"] == 403: + module.fail_json(msg=error_messages["required_permission"]) - if info['status'] == 400: - module.fail_json(msg=error_messages['invalid_key']) + if info["status"] == 400: + module.fail_json(msg=error_messages["invalid_key"]) - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"Failed to create deploy key `{module.params['label']}`: {info}") def delete_deploy_key(module, bitbucket, key_id): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["deploy-key-detail"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], key_id=key_id, ), - method='DELETE', + method="DELETE", ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_workspace_or_repo"]) - if info['status'] == 403: - module.fail_json(msg=error_messages['required_permission']) + if info["status"] == 403: + module.fail_json(msg=error_messages["required_permission"]) - if info['status'] != 204: + if info["status"] != 204: module.fail_json(msg=f"Failed to delete deploy key `{module.params['label']}`: {info}") def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( - repository=dict(type='str', required=True), + repository=dict(type="str", required=True), workspace=dict( - type='str', required=True, + type="str", + required=True, ), - key=dict(type='str', no_log=False), - label=dict(type='str', required=True), - state=dict(type='str', choices=['present', 'absent'], required=True), + key=dict(type="str", no_log=False), + label=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleModule( argument_spec=argument_spec, @@ -229,12 +231,12 @@ def main(): bitbucket = BitbucketHelper(module) - key = module.params['key'] - state = module.params['state'] + key = module.params["key"] + state = module.params["state"] # Check parameters - if (key is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_key']) + if (key is None) and (state == "present"): + module.fail_json(msg=error_messages["required_key"]) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() @@ -244,29 +246,29 @@ def main(): changed = False # Create new deploy key in case it doesn't exists - if not existing_deploy_key and (state == 'present'): + if not existing_deploy_key and (state == "present"): if not module.check_mode: create_deploy_key(module, bitbucket) changed = True # Update deploy key if the old value does not match the new one - elif existing_deploy_key and (state == 'present'): - if not key.startswith(existing_deploy_key.get('key')): + elif existing_deploy_key and (state == "present"): + if not key.startswith(existing_deploy_key.get("key")): if not module.check_mode: # Bitbucket doesn't support update key for the same label, # so we need to delete the old one first - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + delete_deploy_key(module, bitbucket, existing_deploy_key["id"]) create_deploy_key(module, bitbucket) changed = True # Delete deploy key - elif existing_deploy_key and (state == 'absent'): + elif existing_deploy_key and (state == "absent"): if not module.check_mode: - delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + delete_deploy_key(module, bitbucket, existing_deploy_key["id"]) changed = True module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py index e92652a36ac..7ef3e270025 100644 --- a/plugins/modules/bitbucket_pipeline_key_pair.py +++ b/plugins/modules/bitbucket_pipeline_key_pair.py @@ -73,12 +73,13 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { - 'invalid_params': 'Account, repository or SSH key pair was not found', - 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', + "invalid_params": "Account, repository or SSH key pair was not found", + "required_keys": "`public_key` and `private_key` are required when the `state` is `present`", } BITBUCKET_API_ENDPOINTS = { - 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, + "ssh-key-pair": "%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair" + % BitbucketHelper.BITBUCKET_API_URL, } @@ -99,17 +100,17 @@ def get_existing_ssh_key_pair(module, bitbucket): "type": "pipeline_ssh_key_pair" } """ - api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url = BITBUCKET_API_ENDPOINTS["ssh-key-pair"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ) info, content = bitbucket.request( api_url=api_url, - method='GET', + method="GET", ) - if info['status'] == 404: + if info["status"] == 404: # Account, repository or SSH key pair was not found. return None @@ -118,48 +119,48 @@ def get_existing_ssh_key_pair(module, bitbucket): def update_ssh_key_pair(module, bitbucket): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["ssh-key-pair"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ), - method='PUT', + method="PUT", data={ - 'private_key': module.params['private_key'], - 'public_key': module.params['public_key'], + "private_key": module.params["private_key"], + "public_key": module.params["public_key"], }, ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_params"]) - if info['status'] != 200: - module.fail_json(msg=f'Failed to create or update pipeline ssh key pair : {info}') + if info["status"] != 200: + module.fail_json(msg=f"Failed to create or update pipeline ssh key pair : {info}") def delete_ssh_key_pair(module, bitbucket): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["ssh-key-pair"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ), - method='DELETE', + method="DELETE", ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_params"]) - if info['status'] != 204: - module.fail_json(msg=f'Failed to delete pipeline ssh key pair: {info}') + if info["status"] != 204: + module.fail_json(msg=f"Failed to delete pipeline ssh key pair: {info}") def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict(type='str', required=True), - public_key=dict(type='str'), - private_key=dict(type='str', no_log=True), - state=dict(type='str', choices=['present', 'absent'], required=True), + repository=dict(type="str", required=True), + workspace=dict(type="str", required=True), + public_key=dict(type="str"), + private_key=dict(type="str", no_log=True), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleModule( argument_spec=argument_spec, @@ -170,13 +171,13 @@ def main(): bitbucket = BitbucketHelper(module) - state = module.params['state'] - public_key = module.params['public_key'] - private_key = module.params['private_key'] + state = module.params["state"] + public_key = module.params["public_key"] + private_key = module.params["private_key"] # Check parameters - if ((public_key is None) or (private_key is None)) and (state == 'present'): - module.fail_json(msg=error_messages['required_keys']) + if ((public_key is None) or (private_key is None)) and (state == "present"): + module.fail_json(msg=error_messages["required_keys"]) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() @@ -186,13 +187,13 @@ def main(): changed = False # Create or update key pair - if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): + if (not key_pair or (key_pair.get("public_key") != public_key)) and (state == "present"): if not module.check_mode: update_ssh_key_pair(module, bitbucket) changed = True # Delete key pair - elif key_pair and (state == 'absent'): + elif key_pair and (state == "absent"): if not module.check_mode: delete_ssh_key_pair(module, bitbucket) changed = True @@ -200,5 +201,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py index 259e9a6c412..95e2c531cd8 100644 --- a/plugins/modules/bitbucket_pipeline_known_host.py +++ b/plugins/modules/bitbucket_pipeline_known_host.py @@ -88,6 +88,7 @@ try: import paramiko + HAS_PARAMIKO = True except ImportError: HAS_PARAMIKO = False @@ -96,13 +97,15 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { - 'invalid_params': 'Account or repository was not found', - 'unknown_key_type': 'Public key type is unknown', + "invalid_params": "Account or repository was not found", + "unknown_key_type": "Public key type is unknown", } BITBUCKET_API_ENDPOINTS = { - 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, - 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, + "known-host-list": "%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/" + % BitbucketHelper.BITBUCKET_API_URL, + "known-host-detail": "%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}" + % BitbucketHelper.BITBUCKET_API_URL, } @@ -132,26 +135,26 @@ def get_existing_known_host(module, bitbucket): } """ content = { - 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + "next": BITBUCKET_API_ENDPOINTS["known-host-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ) } # Look through all response pages in search of hostname we need - while 'next' in content: + while "next" in content: info, content = bitbucket.request( - api_url=content['next'], - method='GET', + api_url=content["next"], + method="GET", ) - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') + if info["status"] == 404: + module.fail_json(msg="Invalid `repository` or `workspace`.") - if info['status'] != 200: - module.fail_json(msg=f'Failed to retrieve list of known hosts: {info}') + if info["status"] != 200: + module.fail_json(msg=f"Failed to retrieve list of known hosts: {info}") - host = next((v for v in content['values'] if v['hostname'] == module.params['name']), None) + host = next((v for v in content["values"] if v["hostname"] == module.params["name"]), None) if host is not None: return host @@ -179,14 +182,14 @@ def get_host_key(module, hostname): sock = socket.socket() sock.connect((hostname, 22)) except socket.error: - module.fail_json(msg=f'Error opening socket to {hostname}') + module.fail_json(msg=f"Error opening socket to {hostname}") try: trans = paramiko.transport.Transport(sock) trans.start_client() host_key = trans.get_remote_server_key() except paramiko.SSHException: - module.fail_json(msg=f'SSH error on retrieving {hostname} server key') + module.fail_json(msg=f"SSH error on retrieving {hostname} server key") trans.close() sock.close() @@ -198,63 +201,63 @@ def get_host_key(module, hostname): def create_known_host(module, bitbucket): - hostname = module.params['name'] - key_param = module.params['key'] + hostname = module.params["name"] + key_param = module.params["key"] if key_param is None: key_type, key = get_host_key(module, hostname) - elif ' ' in key_param: - key_type, key = key_param.split(' ', 1) + elif " " in key_param: + key_type, key = key_param.split(" ", 1) else: - module.fail_json(msg=error_messages['unknown_key_type']) + module.fail_json(msg=error_messages["unknown_key_type"]) info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["known-host-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ), - method='POST', + method="POST", data={ - 'hostname': hostname, - 'public_key': { - 'key_type': key_type, - 'key': key, - } + "hostname": hostname, + "public_key": { + "key_type": key_type, + "key": key, + }, }, ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_params"]) - if info['status'] != 201: + if info["status"] != 201: module.fail_json(msg=f"Failed to create known host `{module.params['hostname']}`: {info}") def delete_known_host(module, bitbucket, known_host_uuid): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["known-host-detail"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], known_host_uuid=known_host_uuid, ), - method='DELETE', + method="DELETE", ) - if info['status'] == 404: - module.fail_json(msg=error_messages['invalid_params']) + if info["status"] == 404: + module.fail_json(msg=error_messages["invalid_params"]) - if info['status'] != 204: + if info["status"] != 204: module.fail_json(msg=f"Failed to delete known host `{module.params['name']}`: {info}") def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict(type='str', required=True), - name=dict(type='str', required=True), - key=dict(type='str', no_log=False), - state=dict(type='str', choices=['present', 'absent'], required=True), + repository=dict(type="str", required=True), + workspace=dict(type="str", required=True), + name=dict(type="str", required=True), + key=dict(type="str", no_log=False), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = AnsibleModule( argument_spec=argument_spec, @@ -263,8 +266,8 @@ def main(): required_together=BitbucketHelper.bitbucket_required_together(), ) - if (module.params['key'] is None) and (not HAS_PARAMIKO): - module.fail_json(msg='`paramiko` package not found, please install it.') + if (module.params["key"] is None) and (not HAS_PARAMIKO): + module.fail_json(msg="`paramiko` package not found, please install it.") bitbucket = BitbucketHelper(module) @@ -273,23 +276,23 @@ def main(): # Retrieve existing known host existing_host = get_existing_known_host(module, bitbucket) - state = module.params['state'] + state = module.params["state"] changed = False # Create new host in case it doesn't exists - if not existing_host and (state == 'present'): + if not existing_host and (state == "present"): if not module.check_mode: create_known_host(module, bitbucket) changed = True # Delete host - elif existing_host and (state == 'absent'): + elif existing_host and (state == "absent"): if not module.check_mode: - delete_known_host(module, bitbucket, existing_host['uuid']) + delete_known_host(module, bitbucket, existing_host["uuid"]) changed = True module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py index 2394ba6c1e0..a1a8bfca00c 100644 --- a/plugins/modules/bitbucket_pipeline_variable.py +++ b/plugins/modules/bitbucket_pipeline_variable.py @@ -85,12 +85,14 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { - 'required_value': '`value` is required when the `state` is `present`', + "required_value": "`value` is required when the `state` is `present`", } BITBUCKET_API_ENDPOINTS = { - 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, - 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, + "pipeline-variable-list": "%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/" + % BitbucketHelper.BITBUCKET_API_URL, + "pipeline-variable-detail": "%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}" + % BitbucketHelper.BITBUCKET_API_URL, } @@ -115,9 +117,9 @@ def get_existing_pipeline_variable(module, bitbucket): The `value` key in dict is absent in case of secured variable. """ - variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + variables_base_url = BITBUCKET_API_ENDPOINTS["pipeline-variable-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ) # Look through the all response pages in search of variable we need page = 1 @@ -125,94 +127,94 @@ def get_existing_pipeline_variable(module, bitbucket): next_url = f"{variables_base_url}?page={page}" info, content = bitbucket.request( api_url=next_url, - method='GET', + method="GET", ) - if info['status'] == 404: - module.fail_json(msg='Invalid `repository` or `workspace`.') + if info["status"] == 404: + module.fail_json(msg="Invalid `repository` or `workspace`.") - if info['status'] != 200: - module.fail_json(msg=f'Failed to retrieve the list of pipeline variables: {info}') + if info["status"] != 200: + module.fail_json(msg=f"Failed to retrieve the list of pipeline variables: {info}") # We are at the end of list - if 'pagelen' in content and content['pagelen'] == 0: + if "pagelen" in content and content["pagelen"] == 0: return None page += 1 - var = next((v for v in content['values'] if v['key'] == module.params['name']), None) + var = next((v for v in content["values"] if v["key"] == module.params["name"]), None) if var is not None: - var['name'] = var.pop('key') + var["name"] = var.pop("key") return var def create_pipeline_variable(module, bitbucket): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["pipeline-variable-list"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], ), - method='POST', + method="POST", data={ - 'key': module.params['name'], - 'value': module.params['value'], - 'secured': module.params['secured'], + "key": module.params["name"], + "value": module.params["value"], + "secured": module.params["secured"], }, ) - if info['status'] != 201: + if info["status"] != 201: module.fail_json(msg=f"Failed to create pipeline variable `{module.params['name']}`: {info}") def update_pipeline_variable(module, bitbucket, variable_uuid): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["pipeline-variable-detail"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], variable_uuid=variable_uuid, ), - method='PUT', + method="PUT", data={ - 'value': module.params['value'], - 'secured': module.params['secured'], + "value": module.params["value"], + "secured": module.params["secured"], }, ) - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"Failed to update pipeline variable `{module.params['name']}`: {info}") def delete_pipeline_variable(module, bitbucket, variable_uuid): info, content = bitbucket.request( - api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( - workspace=module.params['workspace'], - repo_slug=module.params['repository'], + api_url=BITBUCKET_API_ENDPOINTS["pipeline-variable-detail"].format( + workspace=module.params["workspace"], + repo_slug=module.params["repository"], variable_uuid=variable_uuid, ), - method='DELETE', + method="DELETE", ) - if info['status'] != 204: + if info["status"] != 204: module.fail_json(msg=f"Failed to delete pipeline variable `{module.params['name']}`: {info}") class BitBucketPipelineVariable(AnsibleModule): def __init__(self, *args, **kwargs): params = _load_params() or {} - if params.get('secured'): - kwargs['argument_spec']['value'].update({'no_log': True}) + if params.get("secured"): + kwargs["argument_spec"]["value"].update({"no_log": True}) super().__init__(*args, **kwargs) def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( - repository=dict(type='str', required=True), - workspace=dict(type='str', required=True), - name=dict(type='str', required=True), - value=dict(type='str'), - secured=dict(type='bool', default=False), - state=dict(type='str', choices=['present', 'absent'], required=True), + repository=dict(type="str", required=True), + workspace=dict(type="str", required=True), + name=dict(type="str", required=True), + value=dict(type="str"), + secured=dict(type="bool", default=False), + state=dict(type="str", choices=["present", "absent"], required=True), ) module = BitBucketPipelineVariable( argument_spec=argument_spec, @@ -223,13 +225,13 @@ def main(): bitbucket = BitbucketHelper(module) - value = module.params['value'] - state = module.params['state'] - secured = module.params['secured'] + value = module.params["value"] + state = module.params["state"] + secured = module.params["secured"] # Check parameters - if (value is None) and (state == 'present'): - module.fail_json(msg=error_messages['required_value']) + if (value is None) and (state == "present"): + module.fail_json(msg=error_messages["required_value"]) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() @@ -239,26 +241,26 @@ def main(): changed = False # Create new variable in case it doesn't exists - if not existing_variable and (state == 'present'): + if not existing_variable and (state == "present"): if not module.check_mode: create_pipeline_variable(module, bitbucket) changed = True # Update variable if it is secured or the old value does not match the new one - elif existing_variable and (state == 'present'): - if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): + elif existing_variable and (state == "present"): + if (existing_variable["secured"] != secured) or (existing_variable.get("value") != value): if not module.check_mode: - update_pipeline_variable(module, bitbucket, existing_variable['uuid']) + update_pipeline_variable(module, bitbucket, existing_variable["uuid"]) changed = True # Delete variable - elif existing_variable and (state == 'absent'): + elif existing_variable and (state == "absent"): if not module.check_mode: - delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) + delete_pipeline_variable(module, bitbucket, existing_variable["uuid"]) changed = True module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py index d854f866bfc..2ecca2ef9fc 100644 --- a/plugins/modules/bootc_manage.py +++ b/plugins/modules/bootc_manage.py @@ -55,38 +55,40 @@ def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['switch', 'latest']), - image=dict(type='str'), + state=dict(type="str", required=True, choices=["switch", "latest"]), + image=dict(type="str"), ) module = AnsibleModule( argument_spec=argument_spec, required_if=[ - ('state', 'switch', ['image']), + ("state", "switch", ["image"]), ], ) - state = module.params['state'] - image = module.params['image'] + state = module.params["state"] + image = module.params["image"] - if state == 'switch': - command = ['bootc', 'switch', image, '--retain'] - elif state == 'latest': - command = ['bootc', 'upgrade'] + if state == "switch": + command = ["bootc", "switch", image, "--retain"] + elif state == "latest": + command = ["bootc", "upgrade"] locale = get_best_parsable_locale(module) - module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + module.run_command_environ_update = dict( + LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale + ) rc, stdout, err = module.run_command(command, check_rc=True) - if 'Queued for next boot: ' in stdout: - result = {'changed': True, 'stdout': stdout} + if "Queued for next boot: " in stdout: + result = {"changed": True, "stdout": stdout} module.exit_json(**result) - elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: - result = {'changed': False, 'stdout': stdout} + elif "No changes in " in stdout or "Image specification is unchanged." in stdout: + result = {"changed": False, "stdout": stdout} module.exit_json(**result) else: - result = {'changed': False, 'stderr': err} - module.fail_json(msg='ERROR: Command execution failed.', **result) + result = {"changed": False, "stderr": err} + module.fail_json(msg="ERROR: Command execution failed.", **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py index 7dc45e25303..bb6adcb9188 100644 --- a/plugins/modules/bower.py +++ b/plugins/modules/bower.py @@ -99,14 +99,14 @@ class Bower: def __init__(self, module, **kwargs): self.module = module - self.name = kwargs['name'] - self.offline = kwargs['offline'] - self.production = kwargs['production'] - self.path = kwargs['path'] - self.relative_execpath = kwargs['relative_execpath'] - self.version = kwargs['version'] - - if kwargs['version']: + self.name = kwargs["name"] + self.offline = kwargs["offline"] + self.production = kwargs["production"] + self.path = kwargs["path"] + self.relative_execpath = kwargs["relative_execpath"] + self.version = kwargs["version"] + + if kwargs["version"]: self.name_version = f"{self.name}#{self.version}" else: self.name_version = self.name @@ -123,16 +123,16 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): cmd.append("bower") cmd.extend(args) - cmd.extend(['--config.interactive=false', '--allow-root']) + cmd.extend(["--config.interactive=false", "--allow-root"]) if self.name: cmd.append(self.name_version) if self.offline: - cmd.append('--offline') + cmd.append("--offline") if self.production: - cmd.append('--production') + cmd.append("--production") # If path is specified, cd into that path and run the command. cwd = None @@ -145,25 +145,27 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) return out - return '' + return "" def list(self): - cmd = ['list', '--json'] + cmd = ["list", "--json"] installed = list() missing = list() outdated = list() data = json.loads(self._exec(cmd, True, False)) - if 'dependencies' in data: - for dep in data['dependencies']: - dep_data = data['dependencies'][dep] - if dep_data.get('missing', False): + if "dependencies" in data: + for dep in data["dependencies"]: + dep_data = data["dependencies"][dep] + if dep_data.get("missing", False): missing.append(dep) - elif ('version' in dep_data['pkgMeta'] and - 'update' in dep_data and - dep_data['pkgMeta']['version'] != dep_data['update']['latest']): + elif ( + "version" in dep_data["pkgMeta"] + and "update" in dep_data + and dep_data["pkgMeta"]["version"] != dep_data["update"]["latest"] + ): outdated.append(dep) - elif dep_data.get('incompatible', False): + elif dep_data.get("incompatible", False): outdated.append(dep) else: installed.append(dep) @@ -174,49 +176,62 @@ def list(self): return installed, missing, outdated def install(self): - return self._exec(['install']) + return self._exec(["install"]) def update(self): - return self._exec(['update']) + return self._exec(["update"]) def uninstall(self): - return self._exec(['uninstall']) + return self._exec(["uninstall"]) def main(): arg_spec = dict( name=dict(), - offline=dict(default=False, type='bool'), - production=dict(default=False, type='bool'), - path=dict(required=True, type='path'), - relative_execpath=dict(type='path'), - state=dict(default='present', choices=['present', 'absent', 'latest', ]), + offline=dict(default=False, type="bool"), + production=dict(default=False, type="bool"), + path=dict(required=True, type="path"), + relative_execpath=dict(type="path"), + state=dict( + default="present", + choices=[ + "present", + "absent", + "latest", + ], + ), version=dict(), ) - module = AnsibleModule( - argument_spec=arg_spec + module = AnsibleModule(argument_spec=arg_spec) + + name = module.params["name"] + offline = module.params["offline"] + production = module.params["production"] + path = module.params["path"] + relative_execpath = module.params["relative_execpath"] + state = module.params["state"] + version = module.params["version"] + + if state == "absent" and not name: + module.fail_json(msg="uninstalling a package is only available for named packages") + + bower = Bower( + module, + name=name, + offline=offline, + production=production, + path=path, + relative_execpath=relative_execpath, + version=version, ) - name = module.params['name'] - offline = module.params['offline'] - production = module.params['production'] - path = module.params['path'] - relative_execpath = module.params['relative_execpath'] - state = module.params['state'] - version = module.params['version'] - - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - - bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) - changed = False - if state == 'present': + if state == "present": installed, missing, outdated = bower.list() if missing: changed = True bower.install() - elif state == 'latest': + elif state == "latest": installed, missing, outdated = bower.list() if missing or outdated: changed = True @@ -230,5 +245,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py index e05b6e6c6db..efe529b7864 100644 --- a/plugins/modules/btrfs_info.py +++ b/plugins/modules/btrfs_info.py @@ -82,10 +82,7 @@ def run_module(): module_args = dict() - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) provider = BtrfsFilesystemsProvider(module) filesystems = [x.get_summary() for x in provider.get_filesystems()] @@ -99,5 +96,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py index 28b0f2a1299..5c4045747a6 100644 --- a/plugins/modules/btrfs_subvolume.py +++ b/plugins/modules/btrfs_subvolume.py @@ -204,7 +204,11 @@ returned: Success and subvolume exists after module execution """ -from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException +from ansible_collections.community.general.plugins.module_utils.btrfs import ( + BtrfsFilesystemsProvider, + BtrfsCommands, + BtrfsModuleException, +) from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path from ansible.module_utils.basic import AnsibleModule import os @@ -212,17 +216,16 @@ class BtrfsSubvolumeModule: - - __BTRFS_ROOT_SUBVOLUME = '/' + __BTRFS_ROOT_SUBVOLUME = "/" __BTRFS_ROOT_SUBVOLUME_ID = 5 __BTRFS_SUBVOLUME_INODE_NUMBER = 256 - __CREATE_SUBVOLUME_OPERATION = 'create' - __CREATE_SNAPSHOT_OPERATION = 'snapshot' - __DELETE_SUBVOLUME_OPERATION = 'delete' - __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default' + __CREATE_SUBVOLUME_OPERATION = "create" + __CREATE_SNAPSHOT_OPERATION = "snapshot" + __DELETE_SUBVOLUME_OPERATION = "delete" + __SET_DEFAULT_SUBVOLUME_OPERATION = "set-default" - __UNKNOWN_SUBVOLUME_ID = '?' + __UNKNOWN_SUBVOLUME_ID = "?" def __init__(self, module): self.module = module @@ -230,18 +233,18 @@ def __init__(self, module): self.__provider = BtrfsFilesystemsProvider(module) # module parameters - name = self.module.params['name'] + name = self.module.params["name"] self.__name = normalize_subvolume_path(name) if name is not None else None - self.__state = self.module.params['state'] - - self.__automount = self.module.params['automount'] - self.__default = self.module.params['default'] - self.__filesystem_device = self.module.params['filesystem_device'] - self.__filesystem_label = self.module.params['filesystem_label'] - self.__filesystem_uuid = self.module.params['filesystem_uuid'] - self.__recursive = self.module.params['recursive'] - self.__snapshot_conflict = self.module.params['snapshot_conflict'] - snapshot_source = self.module.params['snapshot_source'] + self.__state = self.module.params["state"] + + self.__automount = self.module.params["automount"] + self.__default = self.module.params["default"] + self.__filesystem_device = self.module.params["filesystem_device"] + self.__filesystem_label = self.module.params["filesystem_label"] + self.__filesystem_uuid = self.module.params["filesystem_uuid"] + self.__recursive = self.module.params["recursive"] + self.__snapshot_conflict = self.module.params["snapshot_conflict"] + snapshot_source = self.module.params["snapshot_source"] self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None # execution state @@ -286,25 +289,31 @@ def __load_filesystem(self): if not self.__automount: raise BtrfsModuleException( f"Target filesystem uuid={filesystem.uuid} is not currently mounted and automount=False." - "Mount explicitly before module execution or pass automount=True") + "Mount explicitly before module execution or pass automount=True" + ) elif self.module.check_mode: # TODO is failing the module an appropriate outcome in this scenario? raise BtrfsModuleException( f"Target filesystem uuid={filesystem.uuid} is not currently mounted. Unable to validate the current" - "state while running with check_mode=True") + "state while running with check_mode=True" + ) else: self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID) filesystem.refresh() self.__filesystem = filesystem def __has_filesystem_criteria(self): - return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None + return ( + self.__filesystem_uuid is not None + or self.__filesystem_label is not None + or self.__filesystem_device is not None + ) def __find_matching_filesytem(self): criteria = { - 'uuid': self.__filesystem_uuid, - 'label': self.__filesystem_label, - 'device': self.__filesystem_device, + "uuid": self.__filesystem_uuid, + "label": self.__filesystem_label, + "device": self.__filesystem_device, } return self.__provider.get_matching_filesystem(criteria) @@ -371,7 +380,9 @@ def __prepare_snapshot_present(self): # No change required return elif self.__snapshot_conflict == "error": - raise BtrfsModuleException(f"Target subvolume={self.__name} already exists and snapshot_conflict='error'") + raise BtrfsModuleException( + f"Target subvolume={self.__name} already exists and snapshot_conflict='error'" + ) if source_subvolume is None: raise BtrfsModuleException(f"Source subvolume {self.__snapshot_source} does not exist") @@ -396,9 +407,11 @@ def __prepare_delete_subvolume_tree(self, subvolume): if subvolume.is_filesystem_root(): raise BtrfsModuleException("Can not delete the filesystem's root subvolume") if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0: - raise BtrfsModuleException(f"Subvolume targeted for deletion {subvolume.path} has children and recursive=False." - "Either explicitly delete the child subvolumes first or pass " - "parameter recursive=True.") + raise BtrfsModuleException( + f"Subvolume targeted for deletion {subvolume.path} has children and recursive=False." + "Either explicitly delete the child subvolumes first or pass " + "parameter recursive=True." + ) self.__stage_required_mount(subvolume.get_parent_subvolume()) queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume] @@ -435,7 +448,9 @@ def __stage_required_mount(self, subvolume): if self.__automount: self.__required_mounts.append(subvolume) else: - raise BtrfsModuleException(f"The requested changes will require the subvolume '{subvolume.path}' to be mounted, but automount=False") + raise BtrfsModuleException( + f"The requested changes will require the subvolume '{subvolume.path}' to be mounted, but automount=False" + ) def __stage_create_subvolume(self, subvolume_path, intermediate=False): """ @@ -443,74 +458,82 @@ def __stage_create_subvolume(self, subvolume_path, intermediate=False): If intermediate is true, the action will be skipped if a directory like file is found at target after mounting a parent subvolume """ - self.__unit_of_work.append({ - 'action': self.__CREATE_SUBVOLUME_OPERATION, - 'target': subvolume_path, - 'intermediate': intermediate, - }) + self.__unit_of_work.append( + { + "action": self.__CREATE_SUBVOLUME_OPERATION, + "target": subvolume_path, + "intermediate": intermediate, + } + ) def __stage_create_snapshot(self, source_subvolume, target_subvolume_path): """Add creation of a snapshot from source to target to the unit of work""" - self.__unit_of_work.append({ - 'action': self.__CREATE_SNAPSHOT_OPERATION, - 'source': source_subvolume.path, - 'source_id': source_subvolume.id, - 'target': target_subvolume_path, - }) + self.__unit_of_work.append( + { + "action": self.__CREATE_SNAPSHOT_OPERATION, + "source": source_subvolume.path, + "source_id": source_subvolume.id, + "target": target_subvolume_path, + } + ) def __stage_delete_subvolume(self, subvolume): """Add deletion of the target subvolume to the unit of work""" - self.__unit_of_work.append({ - 'action': self.__DELETE_SUBVOLUME_OPERATION, - 'target': subvolume.path, - 'target_id': subvolume.id, - }) + self.__unit_of_work.append( + { + "action": self.__DELETE_SUBVOLUME_OPERATION, + "target": subvolume.path, + "target_id": subvolume.id, + } + ) def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None): """Add update of the filesystem's default subvolume to the unit of work""" - self.__unit_of_work.append({ - 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION, - 'target': subvolume_path, - 'target_id': subvolume_id, - }) + self.__unit_of_work.append( + { + "action": self.__SET_DEFAULT_SUBVOLUME_OPERATION, + "target": subvolume_path, + "target_id": subvolume_id, + } + ) # Execute the unit of work def __execute_unit_of_work(self): self.__check_required_mounts() for op in self.__unit_of_work: - if op['action'] == self.__CREATE_SUBVOLUME_OPERATION: + if op["action"] == self.__CREATE_SUBVOLUME_OPERATION: self.__execute_create_subvolume(op) - elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION: + elif op["action"] == self.__CREATE_SNAPSHOT_OPERATION: self.__execute_create_snapshot(op) - elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION: + elif op["action"] == self.__DELETE_SUBVOLUME_OPERATION: self.__execute_delete_subvolume(op) - elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + elif op["action"] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: self.__execute_set_default_subvolume(op) else: raise ValueError(f"Unknown operation type '{op['action']}'") def __execute_create_subvolume(self, operation): - target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation["target"]) if not self.__is_existing_directory_like(target_mounted_path): self.__btrfs_api.subvolume_create(target_mounted_path) self.__completed_work.append(operation) def __execute_create_snapshot(self, operation): - source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source']) + source_subvolume = self.__filesystem.get_subvolume_by_name(operation["source"]) source_mounted_path = source_subvolume.get_mounted_path() - target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation["target"]) self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path) self.__completed_work.append(operation) def __execute_delete_subvolume(self, operation): - target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation["target"]) self.__btrfs_api.subvolume_delete(target_mounted_path) self.__completed_work.append(operation) def __execute_set_default_subvolume(self, operation): - target = operation['target'] - target_id = operation['target_id'] + target = operation["target"] + target_id = operation["target_id"] if target_id is None: target_subvolume = self.__filesystem.get_subvolume_by_name(target) @@ -529,8 +552,7 @@ def __execute_set_default_subvolume(self, operation): def __is_existing_directory_like(self, path): return os.path.exists(path) and ( - os.path.isdir(path) or - os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER + os.path.isdir(path) or os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER ) def __check_required_mounts(self): @@ -546,7 +568,7 @@ def __filter_child_subvolumes(self, subvolumes): last = None ordered = sorted(subvolumes, key=lambda x: x.path) for next in ordered: - if last is None or not next.path[0:len(last)] == last: + if last is None or not next.path[0 : len(last)] == last: filtered.append(next) last = next.path return filtered @@ -555,8 +577,10 @@ def __filter_child_subvolumes(self, subvolumes): def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid): # this check should be redundant if self.module.check_mode or not self.__automount: - raise BtrfsModuleException("Unable to temporarily mount required subvolumes" - f" with automount={self.__automount} and check_mode={self.module.check_mode}") + raise BtrfsModuleException( + "Unable to temporarily mount required subvolumes" + f" with automount={self.__automount} and check_mode={self.module.check_mode}" + ) cache_key = f"{filesystem.uuid}:{int(subvolid)}" # The subvolume was already mounted, so return the current path @@ -591,14 +615,14 @@ def get_results(self): changed=len(self.__completed_work) > 0, filesystem=self.__filesystem.get_summary(), modifications=self.__get_formatted_modifications(), - target_subvolume_id=(target.id if target is not None else None) + target_subvolume_id=(target.id if target is not None else None), ) def __get_formatted_modifications(self): return [self.__format_operation_result(op) for op in self.__completed_work] def __format_operation_result(self, operation): - action_type = operation['action'] + action_type = operation["action"] if action_type == self.__CREATE_SUBVOLUME_OPERATION: return self.__format_create_subvolume_result(operation) elif action_type == self.__CREATE_SNAPSHOT_OPERATION: @@ -611,29 +635,29 @@ def __format_operation_result(self, operation): raise ValueError(f"Unknown operation type '{operation['action']}'") def __format_create_subvolume_result(self, operation): - target = operation['target'] + target = operation["target"] target_subvolume = self.__filesystem.get_subvolume_by_name(target) target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID return f"Created subvolume '{target}' ({target_id})" def __format_create_snapshot_result(self, operation): - source = operation['source'] - source_id = operation['source_id'] + source = operation["source"] + source_id = operation["source_id"] - target = operation['target'] + target = operation["target"] target_subvolume = self.__filesystem.get_subvolume_by_name(target) target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID return f"Created snapshot '{target}' ({target_id}) from '{source}' ({source_id})" def __format_delete_subvolume_result(self, operation): - target = operation['target'] - target_id = operation['target_id'] + target = operation["target"] + target_id = operation["target_id"] return f"Deleted subvolume '{target}' ({target_id})" def __format_set_default_subvolume_result(self, operation): - target = operation['target'] - if 'target_id' in operation: - target_id = operation['target_id'] + target = operation["target"] + if "target_id" in operation: + target_id = operation["target_id"] else: target_subvolume = self.__filesystem.get_subvolume_by_name(target) target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID @@ -642,22 +666,19 @@ def __format_set_default_subvolume_result(self, operation): def run_module(): module_args = dict( - automount=dict(type='bool', default=False), - default=dict(type='bool', default=False), - filesystem_device=dict(type='path'), - filesystem_label=dict(type='str'), - filesystem_uuid=dict(type='str'), - name=dict(type='str', required=True), - recursive=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['present', 'absent']), - snapshot_source=dict(type='str'), - snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error']) + automount=dict(type="bool", default=False), + default=dict(type="bool", default=False), + filesystem_device=dict(type="path"), + filesystem_label=dict(type="str"), + filesystem_uuid=dict(type="str"), + name=dict(type="str", required=True), + recursive=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + snapshot_source=dict(type="str"), + snapshot_conflict=dict(type="str", default="skip", choices=["skip", "clobber", "error"]), ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) subvolume = BtrfsSubvolumeModule(module) error, result = subvolume.run() @@ -671,5 +692,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py index 2395cda332d..1a0ac552bcf 100644 --- a/plugins/modules/bundler.py +++ b/plugins/modules/bundler.py @@ -119,10 +119,10 @@ def get_bundler_executable(module): - if module.params.get('executable'): - result = module.params.get('executable').split(' ') + if module.params.get("executable"): + result = module.params.get("executable").split(" ") else: - result = [module.get_bin_path('bundle', True)] + result = [module.get_bin_path("bundle", True)] return result @@ -130,71 +130,71 @@ def main(): module = AnsibleModule( argument_spec=dict( executable=dict(), - state=dict(default='present', choices=['present', 'latest']), - chdir=dict(type='path'), - exclude_groups=dict(type='list', elements='str'), - clean=dict(default=False, type='bool'), - gemfile=dict(type='path'), - local=dict(default=False, type='bool'), - deployment_mode=dict(default=False, type='bool'), - user_install=dict(default=True, type='bool'), - gem_path=dict(type='path'), - binstub_directory=dict(type='path'), + state=dict(default="present", choices=["present", "latest"]), + chdir=dict(type="path"), + exclude_groups=dict(type="list", elements="str"), + clean=dict(default=False, type="bool"), + gemfile=dict(type="path"), + local=dict(default=False, type="bool"), + deployment_mode=dict(default=False, type="bool"), + user_install=dict(default=True, type="bool"), + gem_path=dict(type="path"), + binstub_directory=dict(type="path"), extra_args=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) - state = module.params.get('state') - chdir = module.params.get('chdir') - exclude_groups = module.params.get('exclude_groups') - clean = module.params.get('clean') - gemfile = module.params.get('gemfile') - local = module.params.get('local') - deployment_mode = module.params.get('deployment_mode') - user_install = module.params.get('user_install') - gem_path = module.params.get('gem_path') - binstub_directory = module.params.get('binstub_directory') - extra_args = module.params.get('extra_args') + state = module.params.get("state") + chdir = module.params.get("chdir") + exclude_groups = module.params.get("exclude_groups") + clean = module.params.get("clean") + gemfile = module.params.get("gemfile") + local = module.params.get("local") + deployment_mode = module.params.get("deployment_mode") + user_install = module.params.get("user_install") + gem_path = module.params.get("gem_path") + binstub_directory = module.params.get("binstub_directory") + extra_args = module.params.get("extra_args") cmd = get_bundler_executable(module) if module.check_mode: - cmd.append('check') + cmd.append("check") rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) - if state == 'present': - cmd.append('install') + if state == "present": + cmd.append("install") if exclude_groups: - cmd.extend(['--without', ':'.join(exclude_groups)]) + cmd.extend(["--without", ":".join(exclude_groups)]) if clean: - cmd.append('--clean') + cmd.append("--clean") if gemfile: - cmd.extend(['--gemfile', gemfile]) + cmd.extend(["--gemfile", gemfile]) if local: - cmd.append('--local') + cmd.append("--local") if deployment_mode: - cmd.append('--deployment') + cmd.append("--deployment") if not user_install: - cmd.append('--system') + cmd.append("--system") if gem_path: - cmd.extend(['--path', gem_path]) + cmd.extend(["--path", gem_path]) if binstub_directory: - cmd.extend(['--binstubs', binstub_directory]) + cmd.extend(["--binstubs", binstub_directory]) else: - cmd.append('update') + cmd.append("update") if local: - cmd.append('--local') + cmd.append("--local") if extra_args: - cmd.extend(extra_args.split(' ')) + cmd.extend(extra_args.split(" ")) rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) - module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + module.exit_json(changed="Installing" in out, state=state, stdout=out, stderr=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py index 32f3be5b653..45efcf9d338 100644 --- a/plugins/modules/bzr.py +++ b/plugins/modules/bzr.py @@ -76,7 +76,7 @@ def _command(self, args_list, cwd=None, **kwargs): return (rc, out, err) def get_version(self): - '''samples the version of the bzr branch''' + """samples the version of the bzr branch""" cmd = [self.bzr_path, "revno"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) @@ -84,41 +84,40 @@ def get_version(self): return revno def clone(self): - '''makes a new bzr branch if it does not already exist''' + """makes a new bzr branch if it does not already exist""" dest_dirname = os.path.dirname(self.dest) try: os.makedirs(dest_dirname) except Exception: pass - if self.version.lower() != 'head': + if self.version.lower() != "head": args_list = ["branch", "-r", self.version, self.parent, self.dest] else: args_list = ["branch", self.parent, self.dest] return self._command(args_list, check_rc=True, cwd=dest_dirname) def has_local_mods(self): - cmd = [self.bzr_path, "status", "-S"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) lines = stdout.splitlines() - mods_re = re.compile('^\\?\\?.*$') + mods_re = re.compile("^\\?\\?.*$") lines = [c for c in lines if not mods_re.search(c)] return len(lines) > 0 def reset(self, force): - ''' + """ Resets the index and working tree to head. Discards any changes to tracked files in the working tree since that commit. - ''' + """ if not force and self.has_local_mods(): self.module.fail_json(msg="Local modifications exist in branch (force=false).") return self._command(["revert"], check_rc=True, cwd=self.dest) def fetch(self): - '''updates branch from remote sources''' - if self.version.lower() != 'head': + """updates branch from remote sources""" + if self.version.lower() != "head": (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) else: (rc, out, err) = self._command(["pull"], cwd=self.dest) @@ -127,8 +126,8 @@ def fetch(self): return (rc, out, err) def switch_version(self): - '''once pulled, switch to a particular revno or revid''' - if self.version.lower() != 'head': + """once pulled, switch to a particular revno or revid""" + if self.version.lower() != "head": args_list = ["revert", "-r", self.version] else: args_list = ["revert"] @@ -137,24 +136,25 @@ def switch_version(self): # =========================================== + def main(): module = AnsibleModule( argument_spec=dict( - dest=dict(type='path', required=True), - name=dict(type='str', required=True, aliases=['parent']), - version=dict(type='str', default='head'), - force=dict(type='bool', default=False), - executable=dict(type='str'), + dest=dict(type="path", required=True), + name=dict(type="str", required=True, aliases=["parent"]), + version=dict(type="str", default="head"), + force=dict(type="bool", default=False), + executable=dict(type="str"), ) ) - dest = module.params['dest'] - parent = module.params['name'] - version = module.params['version'] - force = module.params['force'] - bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) + dest = module.params["dest"] + parent = module.params["name"] + version = module.params["version"] + force = module.params["force"] + bzr_path = module.params["executable"] or module.get_bin_path("bzr", True) - bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') + bzrconfig = os.path.join(dest, ".bzr", "branch", "branch.conf") rc, out, err = (0, None, None) @@ -192,5 +192,5 @@ def main(): module.exit_json(changed=changed, before=before, after=after) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py index f9924bcc9be..e0e25cb8119 100644 --- a/plugins/modules/campfire.py +++ b/plugins/modules/campfire.py @@ -121,29 +121,62 @@ def main(): - module = AnsibleModule( argument_spec=dict( subscription=dict(required=True), token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), - notify=dict(choices=["56k", "bell", "bezos", "bueller", - "clowntown", "cottoneyejoe", - "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", - "greatjob", "greyjoy", "guarantee", - "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", - "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", - "sexyback", "story", "tada", "tmyk", - "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", - "yodel"]), + notify=dict( + choices=[ + "56k", + "bell", + "bezos", + "bueller", + "clowntown", + "cottoneyejoe", + "crickets", + "dadgummit", + "dangerzone", + "danielsan", + "deeper", + "drama", + "greatjob", + "greyjoy", + "guarantee", + "heygirl", + "horn", + "horror", + "inconceivable", + "live", + "loggins", + "makeitso", + "noooo", + "nyan", + "ohmy", + "ohyeah", + "pushit", + "rimshot", + "rollout", + "rumble", + "sax", + "secret", + "sexyback", + "story", + "tada", + "tmyk", + "trololo", + "trombone", + "unix", + "vuvuzela", + "what", + "whoomp", + "yeah", + "yodel", + ] + ), ), - supports_check_mode=False + supports_check_mode=False, ) subscription = module.params["subscription"] @@ -158,26 +191,27 @@ def main(): AGENT = "Ansible/1.2" # Hack to add basic auth username and password the way fetch_url expects - module.params['url_username'] = token - module.params['url_password'] = 'X' + module.params["url_username"] = token + module.params["url_password"] = "X" - target_url = f'{URI}/room/{room}/speak.xml' - headers = {'Content-Type': 'application/xml', - 'User-agent': AGENT} + target_url = f"{URI}/room/{room}/speak.xml" + headers = {"Content-Type": "application/xml", "User-agent": AGENT} # Send some audible notification if requested if notify: response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) - if info['status'] not in [200, 201]: - module.fail_json(msg=f"unable to send msg: '{notify}', campfire api returned error code: '{info['status']}'") + if info["status"] not in [200, 201]: + module.fail_json( + msg=f"unable to send msg: '{notify}', campfire api returned error code: '{info['status']}'" + ) # Send the message response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) - if info['status'] not in [200, 201]: + if info["status"] not in [200, 201]: module.fail_json(msg=f"unable to send msg: '{msg}', campfire api returned error code: '{info['status']}'") module.exit_json(changed=True, room=room, msg=msg, notify=notify) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py index a923c15ef51..5b5a11f1e9b 100644 --- a/plugins/modules/capabilities.py +++ b/plugins/modules/capabilities.py @@ -62,47 +62,50 @@ from ansible.module_utils.basic import AnsibleModule -OPS = ('=', '-', '+') +OPS = ("=", "-", "+") class CapabilitiesModule: - platform = 'Linux' + platform = "Linux" distribution = None def __init__(self, module): self.module = module - self.path = module.params['path'].strip() - self.capability = module.params['capability'].strip().lower() - self.state = module.params['state'] - self.getcap_cmd = module.get_bin_path('getcap', required=True) - self.setcap_cmd = module.get_bin_path('setcap', required=True) - self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') + self.path = module.params["path"].strip() + self.capability = module.params["capability"].strip().lower() + self.state = module.params["state"] + self.getcap_cmd = module.get_bin_path("getcap", required=True) + self.setcap_cmd = module.get_bin_path("setcap", required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state == "present") self.run() def run(self): - current = self.getcap(self.path) caps = [cap[0] for cap in current] - if self.state == 'present' and self.capability_tup not in current: + if self.state == "present" and self.capability_tup not in current: # need to add capability if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') + self.module.exit_json(changed=True, msg="capabilities changed") else: # remove from current cap list if it is already set (but op/flags differ) current = [x for x in current if x[0] != self.capability_tup[0]] # add new cap with correct op/flags current.append(self.capability_tup) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - elif self.state == 'absent' and self.capability_tup[0] in caps: + self.module.exit_json( + changed=True, state=self.state, msg="capabilities changed", stdout=self.setcap(self.path, current) + ) + elif self.state == "absent" and self.capability_tup[0] in caps: # need to remove capability if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') + self.module.exit_json(changed=True, msg="capabilities changed") else: # remove from current cap list and then set current list current = [x for x in current if x[0] != self.capability_tup[0]] - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json( + changed=True, state=self.state, msg="capabilities changed", stdout=self.setcap(self.path, current) + ) self.module.exit_json(changed=False, state=self.state) def getcap(self, path): @@ -118,9 +121,9 @@ def getcap(self, path): if rc != 0 or stderr != "": self.module.fail_json(msg=f"Unable to get capabilities of {path}", stdout=stdout.strip(), stderr=stderr) if stdout.strip() != path: - if ' =' in stdout: + if " =" in stdout: # process output of an older version of libcap - caps = stdout.split(' =')[1].strip().split() + caps = stdout.split(" =")[1].strip().split() elif stdout.strip().endswith(")"): # '/foo (Error Message)' self.module.fail_json(msg=f"Unable to get capabilities of {path}", stdout=stdout.strip(), stderr=stderr) else: @@ -131,8 +134,8 @@ def getcap(self, path): cap = cap.lower() # getcap condenses capabilities with the same op/flags into a # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') + if "," in cap: + cap_group = cap.split(",") cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) for subcap in cap_group: rval.append((subcap, op, flags)) @@ -141,7 +144,7 @@ def getcap(self, path): return rval def setcap(self, path, caps): - caps = ' '.join([''.join(cap) for cap in caps]) + caps = " ".join(["".join(cap) for cap in caps]) cmd = [self.setcap_cmd, caps, path] rc, stdout, stderr = self.module.run_command(cmd) if rc != 0: @@ -169,13 +172,14 @@ def _parse_cap(self, cap, op_required=True): # ============================================================== # main + def main(): # defining module module = AnsibleModule( argument_spec=dict( - path=dict(type='str', required=True, aliases=['key']), - capability=dict(type='str', required=True, aliases=['cap']), - state=dict(type='str', default='present', choices=['absent', 'present']), + path=dict(type="str", required=True, aliases=["key"]), + capability=dict(type="str", required=True, aliases=["cap"]), + state=dict(type="str", default="present", choices=["absent", "present"]), ), supports_check_mode=True, ) @@ -183,5 +187,5 @@ def main(): CapabilitiesModule(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py index 1425a4bf3ca..474fcb26401 100644 --- a/plugins/modules/cargo.py +++ b/plugins/modules/cargo.py @@ -148,9 +148,7 @@ def path(self, path): self.module.fail_json(msg=f"Path {path} is not a directory") self._path = path - def _exec( - self, args, run_in_check_mode=False, check_rc=True, add_package_name=True - ): + def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = self.executable + args rc, out, err = self.module.run_command(cmd, check_rc=check_rc) @@ -195,9 +193,7 @@ def install(self, packages=None): def is_outdated(self, name): installed_version = self.get_installed().get(name) latest_version = ( - self.get_latest_published_version(name) - if not self.directory - else self.get_source_directory_version(name) + self.get_latest_published_version(name) if not self.directory else self.get_source_directory_version(name) ) return installed_version != latest_version @@ -207,9 +203,7 @@ def get_latest_published_version(self, name): match = re.search(r'"(.+)"', data) if not match: - self.module.fail_json( - msg=f"No published version for package {name} found" - ) + self.module.fail_json(msg=f"No published version for package {name} found") return match.group(1) def get_source_directory_version(self, name): @@ -265,27 +259,20 @@ def main(): module.fail_json(msg="Source directory does not exist") # Set LANG env since we parse stdout - module.run_command_environ_update = dict( - LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" - ) + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") cargo = Cargo(module, **module.params) changed, out, err = False, None, None installed_packages = cargo.get_installed() if state == "present": to_install = [ - n - for n in name - if (n not in installed_packages) - or (version and version != installed_packages[n]) + n for n in name if (n not in installed_packages) or (version and version != installed_packages[n]) ] if to_install: changed = True out, err = cargo.install(to_install) elif state == "latest": - to_update = [ - n for n in name if n not in installed_packages or cargo.is_outdated(n) - ] + to_update = [n for n in name if n not in installed_packages or cargo.is_outdated(n)] if to_update: changed = True out, err = cargo.install(to_update) diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py index 76323cfb1fc..c7ec58b1f27 100644 --- a/plugins/modules/catapult.py +++ b/plugins/modules/catapult.py @@ -106,15 +106,15 @@ def send(module, src, dest, msg, media, user_id, api_token, api_secret): """ AGENT = "Ansible" URI = f"https://api.catapult.inetwork.com/v1/users/{user_id}/messages" - data = {'from': src, 'to': dest, 'text': msg} + data = {"from": src, "to": dest, "text": msg} if media: - data['media'] = media + data["media"] = media - headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} + headers = {"User-Agent": AGENT, "Content-type": "application/json"} # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = api_token.replace('\n', '') - module.params['url_password'] = api_secret.replace('\n', '') + module.params["url_username"] = api_token.replace("\n", "") + module.params["url_password"] = api_secret.replace("\n", "") return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") @@ -123,7 +123,7 @@ def main(): module = AnsibleModule( argument_spec=dict( src=dict(required=True), - dest=dict(required=True, type='list', elements='str'), + dest=dict(required=True, type="list", elements="str"), msg=dict(required=True), user_id=dict(required=True), api_token=dict(required=True, no_log=True), @@ -132,13 +132,13 @@ def main(): ), ) - src = module.params['src'] - dest = module.params['dest'] - msg = module.params['msg'] - media = module.params['media'] - user_id = module.params['user_id'] - api_token = module.params['api_token'] - api_secret = module.params['api_secret'] + src = module.params["src"] + dest = module.params["dest"] + msg = module.params["msg"] + media = module.params["media"] + user_id = module.params["user_id"] + api_token = module.params["api_token"] + api_secret = module.params["api_secret"] for number in dest: rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) @@ -151,5 +151,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/circonus_annotation.py b/plugins/modules/circonus_annotation.py index cd157e2d7b6..5a9b8c7eb70 100644 --- a/plugins/modules/circonus_annotation.py +++ b/plugins/modules/circonus_annotation.py @@ -153,6 +153,7 @@ REQUESTS_IMP_ERR = None try: import requests + HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -165,62 +166,66 @@ def check_requests_dep(module): """Check if an adequate requests version is available""" if not HAS_REQUESTS: - module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) else: - required_version = '2.0.0' + required_version = "2.0.0" if LooseVersion(requests.__version__) < LooseVersion(required_version): - module.fail_json(msg=f"'requests' library version should be >= {required_version}, found: {requests.__version__}.") + module.fail_json( + msg=f"'requests' library version should be >= {required_version}, found: {requests.__version__}." + ) def post_annotation(annotation, api_key): - ''' Takes annotation dict and api_key string''' - base_url = 'https://api.circonus.com/v2' - anootate_post_endpoint = '/annotation' - resp = requests.post(base_url + anootate_post_endpoint, - headers=build_headers(api_key), data=json.dumps(annotation)) + """Takes annotation dict and api_key string""" + base_url = "https://api.circonus.com/v2" + anootate_post_endpoint = "/annotation" + resp = requests.post(base_url + anootate_post_endpoint, headers=build_headers(api_key), data=json.dumps(annotation)) resp.raise_for_status() return resp def create_annotation(module): - ''' Takes ansible module object ''' + """Takes ansible module object""" annotation = {} - duration = module.params['duration'] - if module.params['start'] is not None: - start = module.params['start'] + duration = module.params["duration"] + if module.params["start"] is not None: + start = module.params["start"] else: start = int(time.time()) - if module.params['stop'] is not None: - stop = module.params['stop'] + if module.params["stop"] is not None: + stop = module.params["stop"] else: stop = int(time.time()) + duration - annotation['start'] = start - annotation['stop'] = stop - annotation['category'] = module.params['category'] - annotation['description'] = module.params['description'] - annotation['title'] = module.params['title'] + annotation["start"] = start + annotation["stop"] = stop + annotation["category"] = module.params["category"] + annotation["description"] = module.params["description"] + annotation["title"] = module.params["title"] return annotation def build_headers(api_token): - '''Takes api token, returns headers with it included.''' - headers = {'X-Circonus-App-Name': 'ansible', - 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, - 'Accept': 'application/json'} + """Takes api token, returns headers with it included.""" + headers = { + "X-Circonus-App-Name": "ansible", + "Host": "api.circonus.com", + "X-Circonus-Auth-Token": api_token, + "Accept": "application/json", + } return headers def main(): - '''Main function, dispatches logic''' + """Main function, dispatches logic""" module = AnsibleModule( argument_spec=dict( - start=dict(type='int'), - stop=dict(type='int'), + start=dict(type="int"), + stop=dict(type="int"), category=dict(required=True), title=dict(required=True), description=dict(required=True), - duration=dict(default=0, type='int'), - api_key=dict(required=True, no_log=True) + duration=dict(default=0, type="int"), + api_key=dict(required=True, no_log=True), ) ) @@ -228,11 +233,11 @@ def main(): annotation = create_annotation(module) try: - resp = post_annotation(annotation, module.params['api_key']) + resp = post_annotation(annotation, module.params["api_key"]) except requests.exceptions.RequestException as e: - module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) + module.fail_json(msg="Request Failed", reason=to_native(e), exception=traceback.format_exc()) module.exit_json(changed=True, annotation=resp.json()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py index 82ce0a798a3..3409ee71de3 100644 --- a/plugins/modules/cisco_webex.py +++ b/plugins/modules/cisco_webex.py @@ -128,10 +128,7 @@ def webex_msg(module): results = {} ansible = module.params - headers = { - 'Authorization': f"Bearer {ansible['personal_token']}", - 'content-type': 'application/json' - } + headers = {"Authorization": f"Bearer {ansible['personal_token']}", "content-type": "application/json"} if module.check_mode: url = "https://webexapis.com/v1/people/me" @@ -140,47 +137,43 @@ def webex_msg(module): else: url = "https://webexapis.com/v1/messages" - payload = { - ansible['recipient_type']: ansible['recipient_id'], - ansible['msg_type']: ansible['msg'] - } + payload = {ansible["recipient_type"]: ansible["recipient_id"], ansible["msg_type"]: ansible["msg"]} payload = module.jsonify(payload) response, info = fetch_url(module, url, data=payload, headers=headers) - status_code = info['status'] - msg = info['msg'] + status_code = info["status"] + msg = info["msg"] # Module will fail if the response is not 200 if status_code != 200: - results['failed'] = True - results['status_code'] = status_code - results['message'] = msg + results["failed"] = True + results["status_code"] = status_code + results["message"] = msg else: - results['failed'] = False - results['status_code'] = status_code + results["failed"] = False + results["status_code"] = status_code if module.check_mode: - results['message'] = 'Authentication Successful.' + results["message"] = "Authentication Successful." else: - results['message'] = msg + results["message"] = msg return results def main(): - '''Ansible main. ''' + """Ansible main.""" module = AnsibleModule( argument_spec=dict( - recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), + recipient_type=dict(required=True, choices=["roomId", "toPersonEmail", "toPersonId"]), recipient_id=dict(required=True, no_log=True), - msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']), - personal_token=dict(required=True, no_log=True, aliases=['token']), + msg_type=dict(default="text", aliases=["message_type"], choices=["text", "markdown"]), + personal_token=dict(required=True, no_log=True, aliases=["token"]), msg=dict(required=True), ), - - supports_check_mode=True + supports_check_mode=True, ) results = webex_msg(module) diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py index 9c3888f25f1..c3786fbb77b 100644 --- a/plugins/modules/cloud_init_data_facts.py +++ b/plugins/modules/cloud_init_data_facts.py @@ -95,29 +95,27 @@ def gather_cloud_init_data_facts(module): - res = { - 'cloud_init_data_facts': dict() - } + res = {"cloud_init_data_facts": dict()} - for i in ['result', 'status']: - filter = module.params.get('filter') + for i in ["result", "status"]: + filter = module.params.get("filter") if filter is None or filter == i: - res['cloud_init_data_facts'][i] = dict() + res["cloud_init_data_facts"][i] = dict() json_file = os.path.join(CLOUD_INIT_PATH, f"{i}.json") if os.path.exists(json_file): - with open(json_file, 'rb') as f: - contents = to_text(f.read(), errors='surrogate_or_strict') + with open(json_file, "rb") as f: + contents = to_text(f.read(), errors="surrogate_or_strict") if contents: - res['cloud_init_data_facts'][i] = module.from_json(contents) + res["cloud_init_data_facts"][i] = module.from_json(contents) return res def main(): module = AnsibleModule( argument_spec=dict( - filter=dict(choices=['result', 'status']), + filter=dict(choices=["result", "status"]), ), supports_check_mode=True, ) @@ -127,5 +125,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index e292472fc7f..dded475bccd 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -454,78 +454,77 @@ def join_str(sep, *args): class CloudflareAPI: - - cf_api_endpoint = 'https://api.cloudflare.com/client/v4' + cf_api_endpoint = "https://api.cloudflare.com/client/v4" changed = False def __init__(self, module): self.module = module - self.api_token = module.params['api_token'] - self.account_api_key = module.params['account_api_key'] - self.account_email = module.params['account_email'] - self.algorithm = module.params['algorithm'] - self.cert_usage = module.params['cert_usage'] - self.comment = module.params['comment'] - self.hash_type = module.params['hash_type'] - self.flag = module.params['flag'] - self.tag = module.params['tag'] - self.tags = module.params['tags'] - self.key_tag = module.params['key_tag'] - self.port = module.params['port'] - self.priority = module.params['priority'] - self.proto = lowercase_string(module.params['proto']) - self.proxied = module.params['proxied'] - self.selector = module.params['selector'] - self.record = lowercase_string(module.params['record']) - self.service = lowercase_string(module.params['service']) - self.is_solo = module.params['solo'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - self.ttl = module.params['ttl'] - self.type = module.params['type'] - self.value = module.params['value'] - self.weight = module.params['weight'] - self.zone = lowercase_string(module.params['zone']) - - if self.record == '@': + self.api_token = module.params["api_token"] + self.account_api_key = module.params["account_api_key"] + self.account_email = module.params["account_email"] + self.algorithm = module.params["algorithm"] + self.cert_usage = module.params["cert_usage"] + self.comment = module.params["comment"] + self.hash_type = module.params["hash_type"] + self.flag = module.params["flag"] + self.tag = module.params["tag"] + self.tags = module.params["tags"] + self.key_tag = module.params["key_tag"] + self.port = module.params["port"] + self.priority = module.params["priority"] + self.proto = lowercase_string(module.params["proto"]) + self.proxied = module.params["proxied"] + self.selector = module.params["selector"] + self.record = lowercase_string(module.params["record"]) + self.service = lowercase_string(module.params["service"]) + self.is_solo = module.params["solo"] + self.state = module.params["state"] + self.timeout = module.params["timeout"] + self.ttl = module.params["ttl"] + self.type = module.params["type"] + self.value = module.params["value"] + self.weight = module.params["weight"] + self.zone = lowercase_string(module.params["zone"]) + + if self.record == "@": self.record = self.zone - if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): - self.value = self.value.rstrip('.').lower() + if (self.type in ["CNAME", "NS", "MX", "SRV"]) and (self.value is not None): + self.value = self.value.rstrip(".").lower() - if (self.type == 'AAAA') and (self.value is not None): + if (self.type == "AAAA") and (self.value is not None): self.value = self.value.lower() - if self.type == 'SRV': - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = f'_{self.proto}' - if (self.service is not None) and (not self.service.startswith('_')): - self.service = f'_{self.service}' + if self.type == "SRV": + if (self.proto is not None) and (not self.proto.startswith("_")): + self.proto = f"_{self.proto}" + if (self.service is not None) and (not self.service.startswith("_")): + self.service = f"_{self.service}" - if self.type == 'TLSA': - if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = f'_{self.proto}' - if (self.port is not None): - self.port = f'_{self.port}' + if self.type == "TLSA": + if (self.proto is not None) and (not self.proto.startswith("_")): + self.proto = f"_{self.proto}" + if self.port is not None: + self.port = f"_{self.port}" if not self.record.endswith(self.zone): - self.record = join_str('.', self.record, self.zone) + self.record = join_str(".", self.record, self.zone) - if self.type == 'DS': + if self.type == "DS": if self.record == self.zone: self.module.fail_json(msg="DS records only apply to subdomains.") - def _cf_simple_api_call(self, api_call, method='GET', payload=None): + def _cf_simple_api_call(self, api_call, method="GET", payload=None): if self.api_token: headers = { - 'Authorization': f'Bearer {self.api_token}', - 'Content-Type': 'application/json', + "Authorization": f"Bearer {self.api_token}", + "Content-Type": "application/json", } else: headers = { - 'X-Auth-Email': self.account_email, - 'X-Auth-Key': self.account_api_key, - 'Content-Type': 'application/json', + "X-Auth-Email": self.account_email, + "X-Auth-Key": self.account_api_key, + "Content-Type": "application/json", } data = None if payload: @@ -534,33 +533,41 @@ def _cf_simple_api_call(self, api_call, method='GET', payload=None): except Exception as e: self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ") - resp, info = fetch_url(self.module, - self.cf_api_endpoint + api_call, - headers=headers, - data=data, - method=method, - timeout=self.timeout) - - if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: - self.module.fail_json(msg=f"Failed API call {api_call}; got unexpected HTTP code {info['status']}: {info.get('msg')}") - - error_msg = '' - if info['status'] == 401: + resp, info = fetch_url( + self.module, + self.cf_api_endpoint + api_call, + headers=headers, + data=data, + method=method, + timeout=self.timeout, + ) + + if info["status"] not in [200, 304, 400, 401, 403, 429, 405, 415]: + self.module.fail_json( + msg=f"Failed API call {api_call}; got unexpected HTTP code {info['status']}: {info.get('msg')}" + ) + + error_msg = "" + if info["status"] == 401: # Unauthorized - error_msg = f"API user does not have permission; Status: {info['status']}; Method: {method}: Call: {api_call}" - elif info['status'] == 403: + error_msg = ( + f"API user does not have permission; Status: {info['status']}; Method: {method}: Call: {api_call}" + ) + elif info["status"] == 403: # Forbidden error_msg = f"API request not authenticated; Status: {info['status']}; Method: {method}: Call: {api_call}" - elif info['status'] == 429: + elif info["status"] == 429: # Too many requests error_msg = f"API client is rate limited; Status: {info['status']}; Method: {method}: Call: {api_call}" - elif info['status'] == 405: + elif info["status"] == 405: # Method not allowed - error_msg = f"API incorrect HTTP method provided; Status: {info['status']}; Method: {method}: Call: {api_call}" - elif info['status'] == 415: + error_msg = ( + f"API incorrect HTTP method provided; Status: {info['status']}; Method: {method}: Call: {api_call}" + ) + elif info["status"] == 415: # Unsupported Media Type error_msg = f"API request is not valid JSON; Status: {info['status']}; Method: {method}: Call: {api_call}" - elif info['status'] == 400: + elif info["status"] == 400: # Bad Request error_msg = f"API bad request; Status: {info['status']}; Method: {method}: Call: {api_call}" @@ -571,56 +578,56 @@ def _cf_simple_api_call(self, api_call, method='GET', payload=None): content = None if not content: - if info['body']: - content = info['body'] + if info["body"]: + content = info["body"] else: error_msg += "; The API response was empty" if content: try: - result = json.loads(to_text(content, errors='surrogate_or_strict')) - except (getattr(json, 'JSONDecodeError', ValueError)) as e: + result = json.loads(to_text(content, errors="surrogate_or_strict")) + except getattr(json, "JSONDecodeError", ValueError) as e: error_msg += f"; Failed to parse API response with error {to_native(e)}: {content}" # Without a valid/parsed JSON response no more error processing can be done if result is None: self.module.fail_json(msg=error_msg) - if 'success' not in result: + if "success" not in result: error_msg += f"; Unexpected error details: {result.get('error')}" self.module.fail_json(msg=error_msg) - if not result['success']: + if not result["success"]: error_msg += "; Error details: " - for error in result['errors']: + for error in result["errors"]: error_msg += f"code: {error['code']}, error: {error['message']}; " - if 'error_chain' in error: - for chain_error in error['error_chain']: + if "error_chain" in error: + for chain_error in error["error_chain"]: error_msg += f"code: {chain_error['code']}, error: {chain_error['message']}; " self.module.fail_json(msg=error_msg) - return result, info['status'] + return result, info["status"] - def _cf_api_call(self, api_call, method='GET', payload=None): + def _cf_api_call(self, api_call, method="GET", payload=None): result, status = self._cf_simple_api_call(api_call, method, payload) - data = result['result'] + data = result["result"] - if 'result_info' in result: - pagination = result['result_info'] - if pagination['total_pages'] > 1: - next_page = int(pagination['page']) + 1 - parameters = [f'page={next_page}'] + if "result_info" in result: + pagination = result["result_info"] + if pagination["total_pages"] > 1: + next_page = int(pagination["page"]) + 1 + parameters = [f"page={next_page}"] # strip "page" parameter from call parameters (if there are any) - if '?' in api_call: - raw_api_call, query = api_call.split('?', 1) - parameters += [param for param in query.split('&') if not param.startswith('page')] + if "?" in api_call: + raw_api_call, query = api_call.split("?", 1) + parameters += [param for param in query.split("&") if not param.startswith("page")] else: raw_api_call = api_call - while next_page <= pagination['total_pages']: + while next_page <= pagination["total_pages"]: raw_api_call += f"?{'&'.join(parameters)}" result, status = self._cf_simple_api_call(raw_api_call, method, payload) - data += result['result'] + data += result["result"] next_page += 1 return data, status @@ -636,18 +643,18 @@ def _get_zone_id(self, zone=None): if len(zones) < 1: self.module.fail_json(msg=f"No zone found with name {zone}") - return zones[0]['id'] + return zones[0]["id"] def get_zones(self, name=None): if not name: name = self.zone - param = '' + param = "" if name: param = f"?{urlencode({'name': name})}" - zones, status = self._cf_api_call(f'/zones{param}') + zones, status = self._cf_api_call(f"/zones{param}") return zones - def get_dns_records(self, zone_name=None, type=None, record=None, value=''): + def get_dns_records(self, zone_name=None, type=None, record=None, value=""): if not zone_name: zone_name = self.zone if not type: @@ -660,16 +667,16 @@ def get_dns_records(self, zone_name=None, type=None, record=None, value=''): value = self.value zone_id = self._get_zone_id() - api_call = f'/zones/{zone_id}/dns_records' + api_call = f"/zones/{zone_id}/dns_records" query = {} if type: - query['type'] = type + query["type"] = type if record: - query['name'] = record + query["name"] = record if value: - query['content'] = value + query["content"] = value if query: - api_call += f'?{urlencode(query)}' + api_call += f"?{urlencode(query)}" records, status = self._cf_api_call(api_call) return records @@ -678,20 +685,20 @@ def delete_dns_records(self, solo): records = [] content = self.value search_record = self.record - if self.type == 'SRV': - if not (self.value is None or self.value == ''): - content = join_str('\t', self.weight, self.port, self.value) - search_record = join_str('.', self.service, self.proto, self.record) - elif self.type == 'DS': - if not (self.value is None or self.value == ''): - content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) - elif self.type == 'SSHFP': - if not (self.value is None or self.value == ''): - content = join_str(' ', self.algorithm, self.hash_type, self.value.upper()) - elif self.type == 'TLSA': - if not (self.value is None or self.value == ''): - content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) - search_record = join_str('.', self.port, self.proto, self.record) + if self.type == "SRV": + if not (self.value is None or self.value == ""): + content = join_str("\t", self.weight, self.port, self.value) + search_record = join_str(".", self.service, self.proto, self.record) + elif self.type == "DS": + if not (self.value is None or self.value == ""): + content = join_str("\t", self.key_tag, self.algorithm, self.hash_type, self.value) + elif self.type == "SSHFP": + if not (self.value is None or self.value == ""): + content = join_str(" ", self.algorithm, self.hash_type, self.value.upper()) + elif self.type == "TLSA": + if not (self.value is None or self.value == ""): + content = join_str("\t", self.cert_usage, self.selector, self.hash_type, self.value) + search_record = join_str(".", self.port, self.proto, self.record) if solo: search_value = None else: @@ -702,14 +709,14 @@ def delete_dns_records(self, solo): for rr in records: if solo: - if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)): + if not ((rr["type"] == self.type) and (rr["name"] == search_record) and (rr["content"] == content)): self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records/{rr['id']}", 'DELETE') + result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records/{rr['id']}", "DELETE") else: self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records/{rr['id']}", 'DELETE') + result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records/{rr['id']}", "DELETE") return self.changed def ensure_dns_record(self): @@ -717,7 +724,7 @@ def ensure_dns_record(self): search_record = self.record new_record = None - if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']: + if self.type in ["A", "AAAA", "CNAME", "TXT", "MX", "NS", "PTR"]: if not self.value: self.module.fail_json(msg="You must provide a non-empty value to create this record type") @@ -725,35 +732,32 @@ def ensure_dns_record(self): # ignoring the value when searching for existing # CNAME records allows us to update the value if it # changes - if self.type == 'CNAME': + if self.type == "CNAME": search_value = None - new_record = { - "type": self.type, - "name": self.record, - "content": self.value, - "ttl": self.ttl - } + new_record = {"type": self.type, "name": self.record, "content": self.value, "ttl": self.ttl} - if self.type in ['A', 'AAAA', 'CNAME']: + if self.type in ["A", "AAAA", "CNAME"]: new_record["proxied"] = self.proxied - if self.type == 'MX': + if self.type == "MX": for attr in [self.priority, self.value]: - if (attr is None) or (attr == ''): + if (attr is None) or (attr == ""): self.module.fail_json(msg="You must provide priority and a value to create this record type") new_record = { "type": self.type, "name": self.record, "content": self.value, "priority": self.priority, - "ttl": self.ttl + "ttl": self.ttl, } - if self.type == 'SRV': + if self.type == "SRV": for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") + if (attr is None) or (attr == ""): + self.module.fail_json( + msg="You must provide port, priority, proto, service, weight and a value to create this record type" + ) srv_data = { "target": self.value, "port": self.port, @@ -763,17 +767,19 @@ def ensure_dns_record(self): new_record = { "type": self.type, - "name": join_str('.', self.service, self.proto, self.record), + "name": join_str(".", self.service, self.proto, self.record), "ttl": self.ttl, - 'data': srv_data, + "data": srv_data, } - search_value = join_str('\t', self.weight, self.port, self.value) - search_record = join_str('.', self.service, self.proto, self.record) + search_value = join_str("\t", self.weight, self.port, self.value) + search_record = join_str(".", self.service, self.proto, self.record) - if self.type == 'DS': + if self.type == "DS": for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") + if (attr is None) or (attr == ""): + self.module.fail_json( + msg="You must provide key_tag, algorithm, hash_type and a value to create this record type" + ) ds_data = { "key_tag": self.key_tag, "algorithm": self.algorithm, @@ -783,15 +789,17 @@ def ensure_dns_record(self): new_record = { "type": self.type, "name": self.record, - 'data': ds_data, + "data": ds_data, "ttl": self.ttl, } - search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + search_value = join_str("\t", self.key_tag, self.algorithm, self.hash_type, self.value) - if self.type == 'SSHFP': + if self.type == "SSHFP": for attr in [self.algorithm, self.hash_type, self.value]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") + if (attr is None) or (attr == ""): + self.module.fail_json( + msg="You must provide algorithm, hash_type and a value to create this record type" + ) sshfp_data = { "fingerprint": self.value.upper(), "type": self.hash_type, @@ -800,16 +808,18 @@ def ensure_dns_record(self): new_record = { "type": self.type, "name": self.record, - 'data': sshfp_data, + "data": sshfp_data, "ttl": self.ttl, } - search_value = join_str(' ', self.algorithm, self.hash_type, self.value) + search_value = join_str(" ", self.algorithm, self.hash_type, self.value) - if self.type == 'TLSA': + if self.type == "TLSA": for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]: - if (attr is None) or (attr == ''): - self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = join_str('.', self.port, self.proto, self.record) + if (attr is None) or (attr == ""): + self.module.fail_json( + msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type" + ) + search_record = join_str(".", self.port, self.proto, self.record) tlsa_data = { "usage": self.cert_usage, "selector": self.selector, @@ -819,14 +829,14 @@ def ensure_dns_record(self): new_record = { "type": self.type, "name": search_record, - 'data': tlsa_data, + "data": tlsa_data, "ttl": self.ttl, } - search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + search_value = join_str("\t", self.cert_usage, self.selector, self.hash_type, self.value) - if self.type == 'CAA': + if self.type == "CAA": for attr in [self.flag, self.tag, self.value]: - if attr == '': + if attr == "": self.module.fail_json(msg="You must provide flag, tag and a value to create this record type") caa_data = { "flags": self.flag, @@ -836,13 +846,13 @@ def ensure_dns_record(self): new_record = { "type": self.type, "name": self.record, - 'data': caa_data, + "data": caa_data, "ttl": self.ttl, } search_value = None - new_record['comment'] = self.comment or None - new_record['tags'] = self.tags or [] + new_record["comment"] = self.comment or None + new_record["tags"] = self.tags or [] zone_id = self._get_zone_id(self.zone) records = self.get_dns_records(self.zone, self.type, search_record, search_value) @@ -851,36 +861,44 @@ def ensure_dns_record(self): if len(records) > 1: # As Cloudflare API cannot filter record containing quotes # CAA records must be compared locally - if self.type == 'CAA': + if self.type == "CAA": for rr in records: - if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']: + if ( + rr["data"]["flags"] == caa_data["flags"] + and rr["data"]["tag"] == caa_data["tag"] + and rr["data"]["value"] == caa_data["value"] + ): return rr, self.changed else: - self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + self.module.fail_json( + msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!" + ) # record already exists, check if it must be updated if len(records) == 1: cur_record = records[0] do_update = False - if (self.ttl is not None) and (cur_record['ttl'] != self.ttl): + if (self.ttl is not None) and (cur_record["ttl"] != self.ttl): do_update = True - if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority): + if (self.priority is not None) and ("priority" in cur_record) and (cur_record["priority"] != self.priority): do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied): + if ("proxied" in new_record) and ("proxied" in cur_record) and (cur_record["proxied"] != self.proxied): do_update = True - if ('data' in new_record) and ('data' in cur_record): - if cur_record['data'] != new_record['data']: + if ("data" in new_record) and ("data" in cur_record): + if cur_record["data"] != new_record["data"]: do_update = True - if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']): + if (self.type == "CNAME") and (cur_record["content"] != new_record["content"]): do_update = True - if cur_record['comment'] != new_record['comment']: + if cur_record["comment"] != new_record["comment"]: do_update = True - if sorted(cur_record['tags']) != sorted(new_record['tags']): + if sorted(cur_record["tags"]) != sorted(new_record["tags"]): do_update = True if do_update: if self.module.check_mode: result = new_record else: - result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records/{records[0]['id']}", 'PUT', new_record) + result, info = self._cf_api_call( + f"/zones/{zone_id}/dns_records/{records[0]['id']}", "PUT", new_record + ) self.changed = True return result, self.changed else: @@ -888,7 +906,7 @@ def ensure_dns_record(self): if self.module.check_mode: result = new_record else: - result, info = self._cf_api_call(f'/zones/{zone_id}/dns_records', 'POST', new_record) + result, info = self._cf_api_call(f"/zones/{zone_id}/dns_records", "POST", new_record) self.changed = True return result, self.changed @@ -897,106 +915,163 @@ def main(): module = AnsibleModule( argument_spec=dict( api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])), - account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']), - account_email=dict(type='str'), - algorithm=dict(type='int'), - cert_usage=dict(type='int', choices=[0, 1, 2, 3]), - comment=dict(type='str'), - hash_type=dict(type='int', choices=[1, 2]), - key_tag=dict(type='int', no_log=False), - port=dict(type='int'), - flag=dict(type='int', choices=[0, 1]), - tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']), - tags=dict(type='list', elements='str'), - priority=dict(type='int', default=1), - proto=dict(type='str'), - proxied=dict(type='bool', default=False), - record=dict(type='str', default='@', aliases=['name']), - selector=dict(type='int', choices=[0, 1]), - service=dict(type='str'), - solo=dict(type='bool'), - state=dict(type='str', default='present', choices=['absent', 'present']), - timeout=dict(type='int', default=30), - ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']), - value=dict(type='str', aliases=['content']), - weight=dict(type='int', default=1), - zone=dict(type='str', required=True, aliases=['domain']), + account_api_key=dict(type="str", no_log=True, aliases=["account_api_token"]), + account_email=dict(type="str"), + algorithm=dict(type="int"), + cert_usage=dict(type="int", choices=[0, 1, 2, 3]), + comment=dict(type="str"), + hash_type=dict(type="int", choices=[1, 2]), + key_tag=dict(type="int", no_log=False), + port=dict(type="int"), + flag=dict(type="int", choices=[0, 1]), + tag=dict(type="str", choices=["issue", "issuewild", "iodef"]), + tags=dict(type="list", elements="str"), + priority=dict(type="int", default=1), + proto=dict(type="str"), + proxied=dict(type="bool", default=False), + record=dict(type="str", default="@", aliases=["name"]), + selector=dict(type="int", choices=[0, 1]), + service=dict(type="str"), + solo=dict(type="bool"), + state=dict(type="str", default="present", choices=["absent", "present"]), + timeout=dict(type="int", default=30), + ttl=dict(type="int", default=1), + type=dict( + type="str", + choices=["A", "AAAA", "CNAME", "DS", "MX", "NS", "SRV", "SSHFP", "TLSA", "CAA", "TXT", "PTR"], + ), + value=dict(type="str", aliases=["content"]), + weight=dict(type="int", default=1), + zone=dict(type="str", required=True, aliases=["domain"]), ), supports_check_mode=True, required_if=[ - ('state', 'present', ['record', 'type', 'value']), - ('state', 'absent', ['record']), - ('type', 'SRV', ['proto', 'service']), - ('type', 'TLSA', ['proto', 'port']), - ('type', 'CAA', ['flag', 'tag']), + ("state", "present", ["record", "type", "value"]), + ("state", "absent", ["record"]), + ("type", "SRV", ["proto", "service"]), + ("type", "TLSA", ["proto", "port"]), + ("type", "CAA", ["flag", "tag"]), ], required_together=[ - ('account_api_key', 'account_email'), + ("account_api_key", "account_email"), ], required_one_of=[ - ['api_token', 'account_api_key'], + ["api_token", "account_api_key"], ], ) - if module.params['type'] == 'SRV': - if not ((module.params['weight'] is not None and module.params['port'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['weight'] is None and module.params['port'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") - - if module.params['type'] == 'SSHFP': - if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'TLSA': - if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") - - if module.params['type'] == 'CAA': - if not ((module.params['flag'] is not None and module.params['tag'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['flag'] is None and module.params['tag'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.") - - if module.params['type'] == 'DS': - if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None - and not (module.params['value'] is None or module.params['value'] == '')) - or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None - and (module.params['value'] is None or module.params['value'] == ''))): - module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") + if module.params["type"] == "SRV": + if not ( + ( + module.params["weight"] is not None + and module.params["port"] is not None + and not (module.params["value"] is None or module.params["value"] == "") + ) + or ( + module.params["weight"] is None + and module.params["port"] is None + and (module.params["value"] is None or module.params["value"] == "") + ) + ): + module.fail_json( + msg="For SRV records the params weight, port and value all need to be defined, or not at all." + ) + + if module.params["type"] == "SSHFP": + if not ( + ( + module.params["algorithm"] is not None + and module.params["hash_type"] is not None + and not (module.params["value"] is None or module.params["value"] == "") + ) + or ( + module.params["algorithm"] is None + and module.params["hash_type"] is None + and (module.params["value"] is None or module.params["value"] == "") + ) + ): + module.fail_json( + msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all." + ) + + if module.params["type"] == "TLSA": + if not ( + ( + module.params["cert_usage"] is not None + and module.params["selector"] is not None + and module.params["hash_type"] is not None + and not (module.params["value"] is None or module.params["value"] == "") + ) + or ( + module.params["cert_usage"] is None + and module.params["selector"] is None + and module.params["hash_type"] is None + and (module.params["value"] is None or module.params["value"] == "") + ) + ): + module.fail_json( + msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all." + ) + + if module.params["type"] == "CAA": + if not ( + ( + module.params["flag"] is not None + and module.params["tag"] is not None + and not (module.params["value"] is None or module.params["value"] == "") + ) + or ( + module.params["flag"] is None + and module.params["tag"] is None + and (module.params["value"] is None or module.params["value"] == "") + ) + ): + module.fail_json( + msg="For CAA records the params flag, tag and value all need to be defined, or not at all." + ) + + if module.params["type"] == "DS": + if not ( + ( + module.params["key_tag"] is not None + and module.params["algorithm"] is not None + and module.params["hash_type"] is not None + and not (module.params["value"] is None or module.params["value"] == "") + ) + or ( + module.params["key_tag"] is None + and module.params["algorithm"] is None + and module.params["hash_type"] is None + and (module.params["value"] is None or module.params["value"] == "") + ) + ): + module.fail_json( + msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all." + ) changed = False cf_api = CloudflareAPI(module) # sanity checks - if cf_api.is_solo and cf_api.state == 'absent': + if cf_api.is_solo and cf_api.state == "absent": module.fail_json(msg="solo=true can only be used with state=present") # perform add, delete or update (only the TTL can be updated) of one or # more records - if cf_api.state == 'present': + if cf_api.state == "present": # delete all records matching record name + type if cf_api.is_solo: changed = cf_api.delete_dns_records(solo=cf_api.is_solo) result, changed = cf_api.ensure_dns_record() if isinstance(result, list): - module.exit_json(changed=changed, result={'record': result[0]}) + module.exit_json(changed=changed, result={"record": result[0]}) - module.exit_json(changed=changed, result={'record': result}) + module.exit_json(changed=changed, result={"record": result}) else: # force solo to False, just to be sure changed = cf_api.delete_dns_records(solo=False) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py index e66fbe83acd..d30f6106360 100644 --- a/plugins/modules/cobbler_sync.py +++ b/plugins/modules/cobbler_sync.py @@ -84,25 +84,25 @@ def main(): module = AnsibleModule( argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), + host=dict(type="str", default="127.0.0.1"), + port=dict(type="int"), + username=dict(type="str", default="cobbler"), + password=dict(type="str", no_log=True), + use_ssl=dict(type="bool", default=True), + validate_certs=dict(type="bool", default=True), ), supports_check_mode=True, ) - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] + username = module.params["username"] + password = module.params["password"] + port = module.params["port"] + use_ssl = module.params["use_ssl"] + validate_certs = module.params["validate_certs"] - module.params['proto'] = 'https' if use_ssl else 'http' + module.params["proto"] = "https" if use_ssl else "http" if not port: - module.params['port'] = '443' if use_ssl else '80' + module.params["port"] = "443" if use_ssl else "80" result = dict( changed=True, @@ -121,7 +121,7 @@ def main(): # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = ssl._create_unverified_context - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + url = "{proto}://{host}:{port}/cobbler_api".format(**module.params) if ssl_context: conn = xmlrpc_client.ServerProxy(url, context=ssl_context) else: @@ -130,7 +130,11 @@ def main(): try: token = conn.login(username, password) except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + module.fail_json( + msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format( + url=url, error=to_text(e), **module.params + ) + ) except Exception as e: module.fail_json(msg=f"Connection to '{url}' failed. {e}") @@ -144,5 +148,5 @@ def main(): module.exit_json(elapsed=elapsed.seconds, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py index 4a41270066a..f6d84660890 100644 --- a/plugins/modules/cobbler_system.py +++ b/plugins/modules/cobbler_system.py @@ -159,29 +159,29 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion IFPROPS_MAPPING = dict( - bondingopts='bonding_opts', - bridgeopts='bridge_opts', - connected_mode='connected_mode', - cnames='cnames', - dhcptag='dhcp_tag', - dnsname='dns_name', - ifgateway='if_gateway', - interfacetype='interface_type', - interfacemaster='interface_master', - ipaddress='ip_address', - ipv6address='ipv6_address', - ipv6defaultgateway='ipv6_default_gateway', - ipv6mtu='ipv6_mtu', - ipv6prefix='ipv6_prefix', - ipv6secondaries='ipv6_secondariesu', - ipv6staticroutes='ipv6_static_routes', - macaddress='mac_address', - management='management', - mtu='mtu', - netmask='netmask', - static='static', - staticroutes='static_routes', - virtbridge='virt_bridge', + bondingopts="bonding_opts", + bridgeopts="bridge_opts", + connected_mode="connected_mode", + cnames="cnames", + dhcptag="dhcp_tag", + dnsname="dns_name", + ifgateway="if_gateway", + interfacetype="interface_type", + interfacemaster="interface_master", + ipaddress="ip_address", + ipv6address="ipv6_address", + ipv6defaultgateway="ipv6_default_gateway", + ipv6mtu="ipv6_mtu", + ipv6prefix="ipv6_prefix", + ipv6secondaries="ipv6_secondariesu", + ipv6staticroutes="ipv6_static_routes", + macaddress="mac_address", + management="management", + mtu="mtu", + netmask="netmask", + static="static", + staticroutes="static_routes", + virtbridge="virt_bridge", ) @@ -198,33 +198,33 @@ def getsystem(conn, name, token): def main(): module = AnsibleModule( argument_spec=dict( - host=dict(type='str', default='127.0.0.1'), - port=dict(type='int'), - username=dict(type='str', default='cobbler'), - password=dict(type='str', no_log=True), - use_ssl=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), - name=dict(type='str'), - interfaces=dict(type='dict'), - properties=dict(type='dict'), - sync=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + host=dict(type="str", default="127.0.0.1"), + port=dict(type="int"), + username=dict(type="str", default="cobbler"), + password=dict(type="str", no_log=True), + use_ssl=dict(type="bool", default=True), + validate_certs=dict(type="bool", default=True), + name=dict(type="str"), + interfaces=dict(type="dict"), + properties=dict(type="dict"), + sync=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present", "query"]), ), supports_check_mode=True, ) - username = module.params['username'] - password = module.params['password'] - port = module.params['port'] - use_ssl = module.params['use_ssl'] - validate_certs = module.params['validate_certs'] + username = module.params["username"] + password = module.params["password"] + port = module.params["port"] + use_ssl = module.params["use_ssl"] + validate_certs = module.params["validate_certs"] - name = module.params['name'] - state = module.params['state'] + name = module.params["name"] + state = module.params["state"] - module.params['proto'] = 'https' if use_ssl else 'http' + module.params["proto"] = "https" if use_ssl else "http" if not port: - module.params['port'] = '443' if use_ssl else '80' + module.params["port"] = "443" if use_ssl else "80" result = dict( changed=False, @@ -243,7 +243,7 @@ def main(): # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = ssl._create_unverified_context - url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + url = "{proto}://{host}:{port}/cobbler_api".format(**module.params) if ssl_context: conn = xmlrpc_client.ServerProxy(url, context=ssl_context) else: @@ -252,52 +252,55 @@ def main(): try: token = conn.login(username, password) except xmlrpc_client.Fault as e: - module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + module.fail_json( + msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format( + url=url, error=to_text(e), **module.params + ) + ) except Exception as e: module.fail_json(msg=f"Connection to '{url}' failed. {e}") system = getsystem(conn, name, token) # result['system'] = system - if state == 'query': + if state == "query": if name: - result['system'] = system + result["system"] = system else: # Turn it into a dictionary of dictionaries # all_systems = conn.get_systems() # result['systems'] = { system['name']: system for system in all_systems } # Return a list of dictionaries - result['systems'] = conn.get_systems() - - elif state == 'present': + result["systems"] = conn.get_systems() + elif state == "present": if system: # Update existing entry - system_id = '' - if LooseVersion(str(conn.version())) >= LooseVersion('3.4'): + system_id = "" + if LooseVersion(str(conn.version())) >= LooseVersion("3.4"): system_id = conn.get_system_handle(name) else: system_id = conn.get_system_handle(name, token) - for key, value in module.params['properties'].items(): + for key, value in module.params["properties"].items(): if key not in system: module.warn(f"Property '{key}' is not a valid system property.") if system[key] != value: try: conn.modify_system(system_id, key, value, token) - result['changed'] = True + result["changed"] = True except Exception as e: module.fail_json(msg=f"Unable to change '{key}' to '{value}'. {e}") else: # Create a new entry system_id = conn.new_system(token) - conn.modify_system(system_id, 'name', name, token) - result['changed'] = True + conn.modify_system(system_id, "name", name, token) + result["changed"] = True - if module.params['properties']: - for key, value in module.params['properties'].items(): + if module.params["properties"]: + for key, value in module.params["properties"].items(): try: conn.modify_system(system_id, key, value, token) except Exception as e: @@ -305,46 +308,45 @@ def main(): # Add interface properties interface_properties = dict() - if module.params['interfaces']: - for device, values in module.params['interfaces'].items(): + if module.params["interfaces"]: + for device, values in module.params["interfaces"].items(): for key, value in values.items(): - if key == 'name': + if key == "name": continue if key not in IFPROPS_MAPPING: module.warn(f"Property '{key}' is not a valid system property.") - if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: - result['changed'] = True - interface_properties[f'{key}-{device}'] = value + if not system or system["interfaces"][device][IFPROPS_MAPPING[key]] != value: + result["changed"] = True + interface_properties[f"{key}-{device}"] = value - if result['changed'] is True: + if result["changed"] is True: conn.modify_system(system_id, "modify_interface", interface_properties, token) # Only save when the entry was changed - if not module.check_mode and result['changed']: + if not module.check_mode and result["changed"]: conn.save_system(system_id, token) - elif state == 'absent': - + elif state == "absent": if system: if not module.check_mode: conn.remove_system(name, token) - result['changed'] = True + result["changed"] = True - if not module.check_mode and module.params['sync'] and result['changed']: + if not module.check_mode and module.params["sync"] and result["changed"]: try: conn.sync(token) except Exception as e: module.fail_json(msg=f"Failed to sync Cobbler. {e}") - if state in ('absent', 'present'): - result['system'] = getsystem(conn, name, token) + if state in ("absent", "present"): + result["system"] = getsystem(conn, name, token) if module._diff: - result['diff'] = dict(before=system, after=result['system']) + result["diff"] = dict(before=system, after=result["system"]) elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py index b37962fec1b..bdb1bc983d9 100644 --- a/plugins/modules/composer.py +++ b/plugins/modules/composer.py @@ -157,7 +157,7 @@ def has_changed(string): return True -def get_available_options(module, command='install'): +def get_available_options(module, command="install"): # get all available options from a composer command using composer help to json rc, out, err = composer_command(module, ["help", command], arguments=["--no-interaction", "--format=json"]) if rc != 0: @@ -165,7 +165,7 @@ def get_available_options(module, command='install'): module.fail_json(msg=output) command_help_json = module.from_json(out) - return command_help_json['definition']['options'] + return command_help_json["definition"]["options"] def composer_command(module, command, arguments=None, options=None): @@ -174,23 +174,23 @@ def composer_command(module, command, arguments=None, options=None): if arguments is None: arguments = [] - global_command = module.params['global_command'] + global_command = module.params["global_command"] if global_command: global_arg = ["global"] else: global_arg = [] - options.extend(['--working-dir', module.params['working_dir']]) + options.extend(["--working-dir", module.params["working_dir"]]) - if module.params['executable'] is None: + if module.params["executable"] is None: php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) else: - php_path = module.params['executable'] + php_path = module.params["executable"] - if module.params['composer_executable'] is None: + if module.params["composer_executable"] is None: composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) else: - composer_path = module.params['composer_executable'] + composer_path = module.params["composer_executable"] cmd = [php_path, composer_path] + global_arg + command + options + arguments return module.run_command(cmd) @@ -215,25 +215,25 @@ def main(): ignore_platform_reqs=dict(default=False, type="bool"), composer_executable=dict(type="path"), ), - required_if=[('global_command', False, ['working_dir'])], - supports_check_mode=True + required_if=[("global_command", False, ["working_dir"])], + supports_check_mode=True, ) # Get composer command with fallback to default - command = module.params['command'] + command = module.params["command"] if re.search(r"\s", command): module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") - arguments = shlex.split(module.params['arguments']) + arguments = shlex.split(module.params["arguments"]) available_options = get_available_options(module=module, command=command) options = [] # Default options default_options = [ - 'no-ansi', - 'no-interaction', - 'no-progress', + "no-ansi", + "no-interaction", + "no-progress", ] for option in default_options: @@ -242,15 +242,15 @@ def main(): options.append(option) option_params = { - 'prefer_source': 'prefer-source', - 'prefer_dist': 'prefer-dist', - 'no_dev': 'no-dev', - 'no_scripts': 'no-scripts', - 'no_plugins': 'no-plugins', - 'apcu_autoloader': 'acpu-autoloader', - 'optimize_autoloader': 'optimize-autoloader', - 'classmap_authoritative': 'classmap-authoritative', - 'ignore_platform_reqs': 'ignore-platform-reqs', + "prefer_source": "prefer-source", + "prefer_dist": "prefer-dist", + "no_dev": "no-dev", + "no_scripts": "no-scripts", + "no_plugins": "no-plugins", + "apcu_autoloader": "acpu-autoloader", + "optimize_autoloader": "optimize-autoloader", + "classmap_authoritative": "classmap-authoritative", + "ignore_platform_reqs": "ignore-platform-reqs", } for param, option in option_params.items(): @@ -259,8 +259,8 @@ def main(): options.append(option) if module.check_mode: - if 'dry-run' in available_options: - options.append('--dry-run') + if "dry-run" in available_options: + options.append("--dry-run") else: module.exit_json(skipped=True, msg=f"command '{command}' does not support check mode, skipping") @@ -275,5 +275,5 @@ def main(): module.exit_json(changed=has_changed(output), msg=output, stdout=out + err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py index a0f87d3764e..71744123d70 100644 --- a/plugins/modules/consul.py +++ b/plugins/modules/consul.py @@ -230,10 +230,10 @@ class PatchedConsulAgentService(consul.Consul.Agent.Service): def deregister(self, service_id, token=None): params = {} if token: - params['token'] = token - return self.agent.http.put(consul.base.CB.bool(), - f'/v1/agent/service/deregister/{service_id}', - params=params) + params["token"] = token + return self.agent.http.put( + consul.base.CB.bool(), f"/v1/agent/service/deregister/{service_id}", params=params + ) python_consul_installed = True except ImportError: @@ -244,21 +244,21 @@ def deregister(self, service_id, token=None): def register_with_consul(module): - state = module.params['state'] + state = module.params["state"] - if state == 'present': + if state == "present": add(module) else: remove(module) def add(module): - ''' adds a service or a check depending on supplied configuration''' + """adds a service or a check depending on supplied configuration""" check = parse_check(module) service = parse_service(module) if not service and not check: - module.fail_json(msg='a name and port are required to register a service') + module.fail_json(msg="a name and port are required to register a service") if service: if check: @@ -269,9 +269,9 @@ def add(module): def remove(module): - ''' removes a service or a check ''' - service_id = module.params['service_id'] or module.params['service_name'] - check_id = module.params['check_id'] or module.params['check_name'] + """removes a service or a check""" + service_id = module.params["service_id"] or module.params["service_name"] + check_id = module.params["check_id"] or module.params["check_name"] if service_id: remove_service(module, service_id) else: @@ -279,30 +279,32 @@ def remove(module): def add_check(module, check): - ''' registers a check with the given agent. currently there is no way + """registers a check with the given agent. currently there is no way retrieve the full metadata of an existing check through the consul api. Without this we can't compare to the supplied check and so we must assume - a change. ''' + a change.""" if not check.name and not check.service_id: - module.fail_json(msg='a check name is required for a node level check, one not attached to a service') + module.fail_json(msg="a check name is required for a node level check, one not attached to a service") consul_api = get_consul_api(module) check.register(consul_api) - module.exit_json(changed=True, - check_id=check.check_id, - check_name=check.name, - script=check.script, - interval=check.interval, - ttl=check.ttl, - tcp=check.tcp, - http=check.http, - timeout=check.timeout, - service_id=check.service_id) + module.exit_json( + changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl, + tcp=check.tcp, + http=check.http, + timeout=check.timeout, + service_id=check.service_id, + ) def remove_check(module, check_id): - ''' removes a check using its id ''' + """removes a check using its id""" consul_api = get_consul_api(module) if check_id in consul_api.agent.checks(): @@ -313,7 +315,7 @@ def remove_check(module, check_id): def add_service(module, service): - ''' registers a service with the current agent ''' + """registers a service with the current agent""" result = service changed = False @@ -323,7 +325,6 @@ def add_service(module, service): # there is no way to retrieve the details of checks so if a check is present # in the service it must be re-registered if service.has_checks() or not existing or not existing == service: - service.register(consul_api) # check that it registered correctly registered = get_service_by_id_or_name(consul_api, service.id) @@ -331,74 +332,76 @@ def add_service(module, service): result = registered changed = True - module.exit_json(changed=changed, - service_id=result.id, - service_name=result.name, - service_port=result.port, - checks=[check.to_dict() for check in service.checks()], - tags=result.tags) + module.exit_json( + changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=[check.to_dict() for check in service.checks()], + tags=result.tags, + ) def remove_service(module, service_id): - ''' deregister a service from the given agent using its service id ''' + """deregister a service from the given agent using its service id""" consul_api = get_consul_api(module) service = get_service_by_id_or_name(consul_api, service_id) if service: - consul_api.agent.service.deregister(service_id, token=module.params['token']) + consul_api.agent.service.deregister(service_id, token=module.params["token"]) module.exit_json(changed=True, id=service_id) module.exit_json(changed=False, id=service_id) def get_consul_api(module): - consulClient = consul.Consul(host=module.params['host'], - port=module.params['port'], - scheme=module.params['scheme'], - verify=module.params['validate_certs'], - token=module.params['token']) + consulClient = consul.Consul( + host=module.params["host"], + port=module.params["port"], + scheme=module.params["scheme"], + verify=module.params["validate_certs"], + token=module.params["token"], + ) consulClient.agent.service = PatchedConsulAgentService(consulClient) return consulClient def get_service_by_id_or_name(consul_api, service_id_or_name): - ''' iterate the registered services and find one with the given id ''' + """iterate the registered services and find one with the given id""" for dummy, service in consul_api.agent.services().items(): - if service_id_or_name in (service['ID'], service['Service']): + if service_id_or_name in (service["ID"], service["Service"]): return ConsulService(loaded=service) def parse_check(module): - if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')): + if module.params["check_id"] or any(module.params[p] is not None for p in ("script", "ttl", "tcp", "http")): return ConsulCheck( - module.params['check_id'], - module.params['check_name'], - module.params['check_node'], - module.params['check_host'], - module.params['script'], - module.params['interval'], - module.params['ttl'], - module.params['notes'], - module.params['tcp'], - module.params['http'], - module.params['timeout'], - module.params['service_id'], + module.params["check_id"], + module.params["check_name"], + module.params["check_node"], + module.params["check_host"], + module.params["script"], + module.params["interval"], + module.params["ttl"], + module.params["notes"], + module.params["tcp"], + module.params["http"], + module.params["timeout"], + module.params["service_id"], ) def parse_service(module): return ConsulService( - module.params['service_id'], - module.params['service_name'], - module.params['service_address'], - module.params['service_port'], - module.params['tags'], + module.params["service_id"], + module.params["service_name"], + module.params["service_address"], + module.params["service_port"], + module.params["tags"], ) class ConsulService: - - def __init__(self, service_id=None, name=None, address=None, port=-1, - tags=None, loaded=None): + def __init__(self, service_id=None, name=None, address=None, port=-1, tags=None, loaded=None): self.id = self.name = name if service_id: self.id = service_id @@ -407,26 +410,23 @@ def __init__(self, service_id=None, name=None, address=None, port=-1, self.tags = tags self._checks = [] if loaded: - self.id = loaded['ID'] - self.name = loaded['Service'] - self.port = loaded['Port'] - self.tags = loaded['Tags'] + self.id = loaded["ID"] + self.name = loaded["Service"] + self.port = loaded["Port"] + self.tags = loaded["Tags"] def register(self, consul_api): optional = {} if self.port: - optional['port'] = self.port + optional["port"] = self.port if len(self._checks) > 0: - optional['check'] = self._checks[0].check + optional["check"] = self._checks[0].check consul_api.agent.service.register( - self.name, - service_id=self.id, - address=self.address, - tags=self.tags, - **optional) + self.name, service_id=self.id, address=self.address, tags=self.tags, **optional + ) def add_check(self, check): self._checks.append(check) @@ -438,30 +438,44 @@ def has_checks(self): return len(self._checks) > 0 def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.id == other.id and - self.name == other.name and - self.port == other.port and - self.tags == other.tags) + return ( + isinstance(other, self.__class__) + and self.id == other.id + and self.name == other.name + and self.port == other.port + and self.tags == other.tags + ) def __ne__(self, other): return not self.__eq__(other) def to_dict(self): - data = {'id': self.id, "name": self.name} + data = {"id": self.id, "name": self.name} if self.port: - data['port'] = self.port + data["port"] = self.port if self.tags and len(self.tags) > 0: - data['tags'] = self.tags + data["tags"] = self.tags if len(self._checks) > 0: - data['check'] = self._checks[0].to_dict() + data["check"] = self._checks[0].to_dict() return data class ConsulCheck: - - def __init__(self, check_id, name, node=None, host='localhost', - script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None): + def __init__( + self, + check_id, + name, + node=None, + host="localhost", + script=None, + interval=None, + ttl=None, + notes=None, + tcp=None, + http=None, + timeout=None, + service_id=None, + ): self.check_id = self.name = name if check_id: self.check_id = check_id @@ -470,12 +484,12 @@ def __init__(self, check_id, name, node=None, host='localhost', self.node = node self.host = host - self.interval = self.validate_duration('interval', interval) - self.ttl = self.validate_duration('ttl', ttl) + self.interval = self.validate_duration("interval", interval) + self.ttl = self.validate_duration("ttl", ttl) self.script = script self.tcp = tcp self.http = http - self.timeout = self.validate_duration('timeout', timeout) + self.timeout = self.validate_duration("timeout", timeout) self.check = None @@ -493,47 +507,49 @@ def __init__(self, check_id, name, node=None, host='localhost', match = re.match(regex, tcp) if not match: - raise Exception('tcp check must be in host:port format') + raise Exception("tcp check must be in host:port format") - self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) + self.check = consul.Check.tcp(match.group("host").strip("[]"), int(match.group("port")), self.interval) def validate_duration(self, name, duration): if duration: - duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + duration_units = ["ns", "us", "ms", "s", "m", "h"] if not any(duration.endswith(suffix) for suffix in duration_units): duration = f"{duration}s" return duration def register(self, consul_api): - consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, - notes=self.notes, - check=self.check) + consul_api.agent.check.register( + self.name, check_id=self.check_id, service_id=self.service_id, notes=self.notes, check=self.check + ) def __eq__(self, other): - return (isinstance(other, self.__class__) and - self.check_id == other.check_id and - self.service_id == other.service_id and - self.name == other.name and - self.script == other.script and - self.interval == other.interval) + return ( + isinstance(other, self.__class__) + and self.check_id == other.check_id + and self.service_id == other.service_id + and self.name == other.name + and self.script == other.script + and self.interval == other.interval + ) def __ne__(self, other): return not self.__eq__(other) def to_dict(self): data = {} - self._add(data, 'id', attr='check_id') - self._add(data, 'name', attr='check_name') - self._add(data, 'script') - self._add(data, 'node') - self._add(data, 'notes') - self._add(data, 'host') - self._add(data, 'interval') - self._add(data, 'ttl') - self._add(data, 'tcp') - self._add(data, 'http') - self._add(data, 'timeout') - self._add(data, 'service_id') + self._add(data, "id", attr="check_id") + self._add(data, "name", attr="check_name") + self._add(data, "script") + self._add(data, "node") + self._add(data, "notes") + self._add(data, "host") + self._add(data, "interval") + self._add(data, "ttl") + self._add(data, "tcp") + self._add(data, "http") + self._add(data, "timeout") + self._add(data, "service_id") return data def _add(self, data, key, attr=None): @@ -547,16 +563,18 @@ def _add(self, data, key, attr=None): def test_dependencies(module): if not python_consul_installed: - module.fail_json(msg="py-consul required for this module. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation") + module.fail_json( + msg="py-consul required for this module. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation" + ) def main(): module = AnsibleModule( argument_spec=dict( - host=dict(default='localhost'), - port=dict(default=8500, type='int'), - scheme=dict(default='http'), - validate_certs=dict(default=True, type='bool'), + host=dict(default="localhost"), + port=dict(default=8500, type="int"), + scheme=dict(default="http"), + validate_certs=dict(default=True, type="bool"), check_id=dict(), check_name=dict(), check_node=dict(), @@ -565,35 +583,35 @@ def main(): script=dict(), service_id=dict(), service_name=dict(), - service_address=dict(type='str'), - service_port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - interval=dict(type='str'), - ttl=dict(type='str'), - tcp=dict(type='str'), - http=dict(type='str'), - timeout=dict(type='str'), - tags=dict(type='list', elements='str'), + service_address=dict(type="str"), + service_port=dict(type="int"), + state=dict(default="present", choices=["present", "absent"]), + interval=dict(type="str"), + ttl=dict(type="str"), + tcp=dict(type="str"), + http=dict(type="str"), + timeout=dict(type="str"), + tags=dict(type="list", elements="str"), token=dict(no_log=True), ), mutually_exclusive=[ - ('script', 'ttl', 'tcp', 'http'), + ("script", "ttl", "tcp", "http"), ], required_if=[ - ('state', 'present', ['service_name']), - ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True), + ("state", "present", ["service_name"]), + ("state", "absent", ["service_id", "service_name", "check_id", "check_name"], True), ], required_by={ - 'script': 'interval', - 'http': 'interval', - 'tcp': 'interval', + "script": "interval", + "http": "interval", + "tcp": "interval", }, supports_check_mode=False, ) p = module.params test_dependencies(module) - if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']): + if p["state"] == "absent" and any(p[x] for x in ["script", "ttl", "tcp", "http", "interval"]): module.fail_json( msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed." ) @@ -608,5 +626,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py index f4a9f8ad8a2..f2cd9076402 100644 --- a/plugins/modules/consul_agent_check.py +++ b/plugins/modules/consul_agent_check.py @@ -161,31 +161,31 @@ _ARGUMENT_SPEC = { "state": dict(default="present", choices=["present", "absent"]), - "name": dict(type='str'), - "id": dict(type='str'), - "interval": dict(type='str'), - "notes": dict(type='str'), - "args": dict(type='list', elements='str'), - "http": dict(type='str'), - "tcp": dict(type='str'), - "ttl": dict(type='str'), - "timeout": dict(type='str'), - "service_id": dict(type='str'), + "name": dict(type="str"), + "id": dict(type="str"), + "interval": dict(type="str"), + "notes": dict(type="str"), + "args": dict(type="list", elements="str"), + "http": dict(type="str"), + "tcp": dict(type="str"), + "ttl": dict(type="str"), + "timeout": dict(type="str"), + "service_id": dict(type="str"), } _MUTUALLY_EXCLUSIVE = [ - ('args', 'ttl', 'tcp', 'http'), + ("args", "ttl", "tcp", "http"), ] _REQUIRED_IF = [ - ('state', 'present', ['name']), - ('state', 'absent', ('id', 'name'), True), + ("state", "present", ["name"]), + ("state", "absent", ("id", "name"), True), ] _REQUIRED_BY = { - 'args': 'interval', - 'http': 'interval', - 'tcp': 'interval', + "args": "interval", + "http": "interval", + "tcp": "interval", } _ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) @@ -195,8 +195,17 @@ class ConsulAgentCheckModule(_ConsulModule): api_endpoint = "agent/check" result_key = "check" unique_identifiers = ["id", "name"] - operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", - "Status", "Type", "ExposedPort", "Definition"} + operational_attributes = { + "Node", + "CheckID", + "Output", + "ServiceName", + "ServiceTags", + "Status", + "Type", + "ExposedPort", + "Definition", + } def endpoint_url(self, operation, identifier=None): if operation == OPERATION_READ: diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py index ca09f74842b..48778b94288 100644 --- a/plugins/modules/consul_agent_service.py +++ b/plugins/modules/consul_agent_service.py @@ -189,34 +189,35 @@ OPERATION_CREATE, OPERATION_UPDATE, OPERATION_DELETE, - _ConsulModule + _ConsulModule, ) -_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_MUTUALLY_EXCLUSIVE = [("args", "ttl", "tcp", "http")] _CHECK_REQUIRED_BY = { - 'args': 'interval', - 'http': 'interval', - 'tcp': 'interval', + "args": "interval", + "http": "interval", + "tcp": "interval", } _ARGUMENT_SPEC = { "state": dict(default="present", choices=["present", "absent"]), - "name": dict(type='str'), - "id": dict(type='str'), - "tags": dict(type='list', elements='str'), - "address": dict(type='str'), - "meta": dict(type='dict'), - "service_port": dict(type='int'), - "enable_tag_override": dict(type='bool', default=False), - "weights": dict(type='dict', options=dict( - passing=dict(type='int', default=1, no_log=False), - warning=dict(type='int', default=1) - ), default={"passing": 1, "warning": 1}) + "name": dict(type="str"), + "id": dict(type="str"), + "tags": dict(type="list", elements="str"), + "address": dict(type="str"), + "meta": dict(type="dict"), + "service_port": dict(type="int"), + "enable_tag_override": dict(type="bool", default=False), + "weights": dict( + type="dict", + options=dict(passing=dict(type="int", default=1, no_log=False), warning=dict(type="int", default=1)), + default={"passing": 1, "warning": 1}, + ), } _REQUIRED_IF = [ - ('state', 'present', ['name']), - ('state', 'absent', ('id', 'name'), True), + ("state", "present", ["name"]), + ("state", "absent", ("id", "name"), True), ] _ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py index 70cd56d8d78..3c6936a960f 100644 --- a/plugins/modules/consul_binding_rule.py +++ b/plugins/modules/consul_binding_rule.py @@ -129,9 +129,7 @@ def read_object(self): try: results = self.get(url) for result in results: - if result.get("Description").startswith( - f"{self.params['name']}: " - ): + if result.get("Description").startswith(f"{self.params['name']}: "): return result except RequestError as e: if e.status == 404: @@ -158,9 +156,7 @@ def prepare_object(self, existing, obj): "description": dict(type="str"), "auth_method": dict(type="str", required=True), "selector": dict(type="str"), - "bind_type": dict( - type="str", choices=["service", "node", "role", "templated-policy"] - ), + "bind_type": dict(type="str", choices=["service", "node", "role", "templated-policy"]), "bind_name": dict(type="str"), "bind_vars": dict(type="dict"), "state": dict(default="present", choices=["present", "absent"]), diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py index bb1f9e76784..336e49199c7 100644 --- a/plugins/modules/consul_kv.py +++ b/plugins/modules/consul_kv.py @@ -142,6 +142,7 @@ try: import consul from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError: python_consul_installed = False @@ -168,7 +169,7 @@ def _has_value_changed(consul_client, key, target_value): if not existing: return index, True try: - changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value + changed = to_text(existing["Value"], errors="surrogate_or_strict") != target_value return index, changed except UnicodeError: # Existing value was not decodable but all values we set are valid utf-8 @@ -176,57 +177,51 @@ def _has_value_changed(consul_client, key, target_value): def execute(module): - state = module.params.get('state') + state = module.params.get("state") - if state == 'acquire' or state == 'release': + if state == "acquire" or state == "release": lock(module, state) - elif state == 'present': - if module.params.get('value') is NOT_SET: + elif state == "present": + if module.params.get("value") is NOT_SET: get_value(module) else: set_value(module) - elif state == 'absent': + elif state == "absent": remove_value(module) else: module.exit_json(msg=f"Unsupported state: {state}") def lock(module, state): - consul_api = get_consul_api(module) - session = module.params.get('session') - key = module.params.get('key') - value = module.params.get('value') + session = module.params.get("session") + key = module.params.get("key") + value = module.params.get("value") if not session: - module.fail( - msg=f'{state} of lock for {key} requested but no session supplied') + module.fail(msg=f"{state} of lock for {key} requested but no session supplied") index, changed = _has_value_changed(consul_api, key, value) if changed and not module.check_mode: - if state == 'acquire': - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - acquire=session, - flags=module.params.get('flags')) + if state == "acquire": + changed = consul_api.kv.put( + key, value, cas=module.params.get("cas"), acquire=session, flags=module.params.get("flags") + ) else: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - release=session, - flags=module.params.get('flags')) + changed = consul_api.kv.put( + key, value, cas=module.params.get("cas"), release=session, flags=module.params.get("flags") + ) - module.exit_json(changed=changed, - index=index, - key=key) + module.exit_json(changed=changed, index=index, key=key) def get_value(module): consul_api = get_consul_api(module) - key = module.params.get('key') + key = module.params.get("key") - index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) + index, existing_value = consul_api.kv.get(key, recurse=module.params.get("recurse")) module.exit_json(changed=False, index=index, data=existing_value) @@ -234,8 +229,8 @@ def get_value(module): def set_value(module): consul_api = get_consul_api(module) - key = module.params.get('key') - value = module.params.get('value') + key = module.params.get("key") + value = module.params.get("value") if value is NOT_SET: raise AssertionError(f'Cannot set value of "{key}" to `NOT_SET`') @@ -243,75 +238,69 @@ def set_value(module): index, changed = _has_value_changed(consul_api, key, value) if changed and not module.check_mode: - changed = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - flags=module.params.get('flags')) + changed = consul_api.kv.put(key, value, cas=module.params.get("cas"), flags=module.params.get("flags")) stored = None - if module.params.get('retrieve'): + if module.params.get("retrieve"): index, stored = consul_api.kv.get(key) - module.exit_json(changed=changed, - index=index, - key=key, - data=stored) + module.exit_json(changed=changed, index=index, key=key, data=stored) def remove_value(module): - ''' remove the value associated with the given key. if the recurse parameter - is set then any key prefixed with the given key will be removed. ''' + """remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed.""" consul_api = get_consul_api(module) - key = module.params.get('key') + key = module.params.get("key") - index, existing = consul_api.kv.get( - key, recurse=module.params.get('recurse')) + index, existing = consul_api.kv.get(key, recurse=module.params.get("recurse")) changed = existing is not None if changed and not module.check_mode: - consul_api.kv.delete(key, module.params.get('recurse')) + consul_api.kv.delete(key, module.params.get("recurse")) - module.exit_json(changed=changed, - index=index, - key=key, - data=existing) + module.exit_json(changed=changed, index=index, key=key, data=existing) def get_consul_api(module): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token'), - dc=module.params.get('datacenter')) + return consul.Consul( + host=module.params.get("host"), + port=module.params.get("port"), + scheme=module.params.get("scheme"), + verify=module.params.get("validate_certs"), + token=module.params.get("token"), + dc=module.params.get("datacenter"), + ) def test_dependencies(module): if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") + module.fail_json( + msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation" + ) def main(): - module = AnsibleModule( argument_spec=dict( - cas=dict(type='str'), - datacenter=dict(type='str'), - flags=dict(type='str'), - key=dict(type='str', required=True, no_log=False), - host=dict(type='str', default='localhost'), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - port=dict(type='int', default=8500), - recurse=dict(type='bool'), - retrieve=dict(type='bool', default=True), - state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), - token=dict(type='str', no_log=True), - value=dict(type='str', default=NOT_SET), - session=dict(type='str'), + cas=dict(type="str"), + datacenter=dict(type="str"), + flags=dict(type="str"), + key=dict(type="str", required=True, no_log=False), + host=dict(type="str", default="localhost"), + scheme=dict(type="str", default="http"), + validate_certs=dict(type="bool", default=True), + port=dict(type="int", default=8500), + recurse=dict(type="bool"), + retrieve=dict(type="bool", default=True), + state=dict(type="str", default="present", choices=["absent", "acquire", "present", "release"]), + token=dict(type="str", no_log=True), + value=dict(type="str", default=NOT_SET), + session=dict(type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) test_dependencies(module) @@ -319,10 +308,12 @@ def main(): try: execute(module) except ConnectionError as e: - module.fail_json(msg=f"Could not connect to consul agent at {module.params.get('host')}:{module.params.get('port')}, error was {e}") + module.fail_json( + msg=f"Could not connect to consul agent at {module.params.get('host')}:{module.params.get('port')}, error was {e}" + ) except Exception as e: module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py index e1b4c68d7e4..7615b04055d 100644 --- a/plugins/modules/consul_session.py +++ b/plugins/modules/consul_session.py @@ -115,74 +115,58 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.consul import ( - AUTH_ARGUMENTS_SPEC, _ConsulModule -) +from ansible_collections.community.general.plugins.module_utils.consul import AUTH_ARGUMENTS_SPEC, _ConsulModule def execute(module, consul_module): + state = module.params.get("state") - state = module.params.get('state') - - if state in ['info', 'list', 'node']: + if state in ["info", "list", "node"]: lookup_sessions(module, consul_module) - elif state == 'present': + elif state == "present": update_session(module, consul_module) else: remove_session(module, consul_module) def list_sessions(consul_module, datacenter): - return consul_module.get( - 'session/list', - params={'dc': datacenter}) + return consul_module.get("session/list", params={"dc": datacenter}) def list_sessions_for_node(consul_module, node, datacenter): - return consul_module.get( - ('session', 'node', node), - params={'dc': datacenter}) + return consul_module.get(("session", "node", node), params={"dc": datacenter}) def get_session_info(consul_module, session_id, datacenter): - return consul_module.get( - ('session', 'info', session_id), - params={'dc': datacenter}) + return consul_module.get(("session", "info", session_id), params={"dc": datacenter}) def lookup_sessions(module, consul_module): + datacenter = module.params.get("datacenter") - datacenter = module.params.get('datacenter') - - state = module.params.get('state') + state = module.params.get("state") try: - if state == 'list': + if state == "list": sessions_list = list_sessions(consul_module, datacenter) # Ditch the index, this can be grabbed from the results if sessions_list and len(sessions_list) >= 2: sessions_list = sessions_list[1] - module.exit_json(changed=True, - sessions=sessions_list) - elif state == 'node': - node = module.params.get('node') + module.exit_json(changed=True, sessions=sessions_list) + elif state == "node": + node = module.params.get("node") sessions = list_sessions_for_node(consul_module, node, datacenter) - module.exit_json(changed=True, - node=node, - sessions=sessions) - elif state == 'info': - session_id = module.params.get('id') + module.exit_json(changed=True, node=node, sessions=sessions) + elif state == "info": + session_id = module.params.get("id") session_by_id = get_session_info(consul_module, session_id, datacenter) - module.exit_json(changed=True, - session_id=session_id, - sessions=session_by_id) + module.exit_json(changed=True, session_id=session_id, sessions=session_by_id) except Exception as e: module.fail_json(msg=f"Could not retrieve session info {e}") -def create_session(consul_module, name, behavior, ttl, node, - lock_delay, datacenter, checks): +def create_session(consul_module, name, behavior, ttl, node, lock_delay, datacenter, checks): create_data = { "LockDelay": lock_delay, "Node": node, @@ -192,97 +176,81 @@ def create_session(consul_module, name, behavior, ttl, node, } if ttl is not None: create_data["TTL"] = f"{ttl}s" # TTL is in seconds - create_session_response_dict = consul_module.put( - 'session/create', - params={ - 'dc': datacenter}, - data=create_data) + create_session_response_dict = consul_module.put("session/create", params={"dc": datacenter}, data=create_data) return create_session_response_dict["ID"] def update_session(module, consul_module): - - name = module.params.get('name') - delay = module.params.get('delay') - checks = module.params.get('checks') - datacenter = module.params.get('datacenter') - node = module.params.get('node') - behavior = module.params.get('behavior') - ttl = module.params.get('ttl') + name = module.params.get("name") + delay = module.params.get("delay") + checks = module.params.get("checks") + datacenter = module.params.get("datacenter") + node = module.params.get("node") + behavior = module.params.get("behavior") + ttl = module.params.get("ttl") try: - session = create_session(consul_module, - name=name, - behavior=behavior, - ttl=ttl, - node=node, - lock_delay=delay, - datacenter=datacenter, - checks=checks - ) - module.exit_json(changed=True, - session_id=session, - name=name, - behavior=behavior, - ttl=ttl, - delay=delay, - checks=checks, - node=node) + session = create_session( + consul_module, + name=name, + behavior=behavior, + ttl=ttl, + node=node, + lock_delay=delay, + datacenter=datacenter, + checks=checks, + ) + module.exit_json( + changed=True, + session_id=session, + name=name, + behavior=behavior, + ttl=ttl, + delay=delay, + checks=checks, + node=node, + ) except Exception as e: module.fail_json(msg=f"Could not create/update session {e}") def destroy_session(consul_module, session_id): - return consul_module.put(('session', 'destroy', session_id)) + return consul_module.put(("session", "destroy", session_id)) def remove_session(module, consul_module): - session_id = module.params.get('id') + session_id = module.params.get("id") try: destroy_session(consul_module, session_id) - module.exit_json(changed=True, - session_id=session_id) + module.exit_json(changed=True, session_id=session_id) except Exception as e: module.fail_json(msg=f"Could not remove session with id '{session_id}' {e}") def main(): argument_spec = dict( - checks=dict(type='list', elements='str'), - delay=dict(type='int', default='15'), - behavior=dict( - type='str', - default='release', - choices=[ - 'release', - 'delete']), - ttl=dict(type='int'), - id=dict(type='str'), - name=dict(type='str'), - node=dict(type='str'), - state=dict( - type='str', - default='present', - choices=[ - 'absent', - 'info', - 'list', - 'node', - 'present']), - datacenter=dict(type='str'), - **AUTH_ARGUMENTS_SPEC + checks=dict(type="list", elements="str"), + delay=dict(type="int", default="15"), + behavior=dict(type="str", default="release", choices=["release", "delete"]), + ttl=dict(type="int"), + id=dict(type="str"), + name=dict(type="str"), + node=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "info", "list", "node", "present"]), + datacenter=dict(type="str"), + **AUTH_ARGUMENTS_SPEC, ) module = AnsibleModule( argument_spec=argument_spec, required_if=[ - ('state', 'node', ['name']), - ('state', 'info', ['id']), - ('state', 'remove', ['id']), + ("state", "node", ["name"]), + ("state", "info", ["id"]), + ("state", "remove", ["id"]), ], - supports_check_mode=False + supports_check_mode=False, ) consul_module = _ConsulModule(module) @@ -292,5 +260,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py index 36f6e7539ff..63dd7e25484 100644 --- a/plugins/modules/copr.py +++ b/plugins/modules/copr.py @@ -108,6 +108,7 @@ import dnf.cli import dnf.repodict from dnf.conf import Conf + HAS_DNF_PACKAGES = True DNF_IMP_ERR = None except ImportError: @@ -159,7 +160,7 @@ def __init__(self, host, name, state, protocol, chroot=None, check_mode=False): @property def short_chroot(self): """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" - return self.chroot.rsplit('-', 1)[0] + return self.chroot.rsplit("-", 1)[0] @property def arch(self): @@ -229,7 +230,7 @@ def _download_repo_info(self): Returns: Information about the repository. """ - distribution, version = self.short_chroot.split('-', 1) + distribution, version = self.short_chroot.split("-", 1) chroot = self.short_chroot while True: repo_info, status_code = self._get(chroot) @@ -247,13 +248,9 @@ def _download_repo_info(self): distribution = "epel" else: if str(status_code) != "404": - self.raise_exception( - "This repository does not have any builds yet so you cannot enable it now." - ) + self.raise_exception("This repository does not have any builds yet so you cannot enable it now.") else: - self.raise_exception( - f"Chroot {self.chroot} does not exist in {self.name}" - ) + self.raise_exception(f"Chroot {self.chroot} does not exist in {self.name}") def _enable_repo(self, repo_filename_path, repo_content=None): """Write information to a repo file. @@ -269,12 +266,16 @@ def _enable_repo(self, repo_filename_path, repo_content=None): if not repo_content: repo_content = self._download_repo_info() if self.ansible_module.params["includepkgs"]: - includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) - repo_content_strip = repo_content.rstrip('\n') # Python 3.11 does not allow backslash chars within f-string expressions + includepkgs_value = ",".join(self.ansible_module.params["includepkgs"]) + repo_content_strip = repo_content.rstrip( + "\n" + ) # Python 3.11 does not allow backslash chars within f-string expressions repo_content = f"{repo_content_strip}\nincludepkgs={includepkgs_value}\n" if self.ansible_module.params["excludepkgs"]: - excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) - repo_content_strip = repo_content.rstrip('\n') # Python 3.11 does not allow backslash chars within f-string expressions + excludepkgs_value = ",".join(self.ansible_module.params["excludepkgs"]) + repo_content_strip = repo_content.rstrip( + "\n" + ) # Python 3.11 does not allow backslash chars within f-string expressions repo_content = f"{repo_content_strip}\nexcludepkgs={excludepkgs_value}\n" if self._compare_repo_content(repo_filename_path, repo_content): return False @@ -352,13 +353,14 @@ def _disable_repo(self, repo_filename_path): with open(repo_filename_path, "r") as file: repo_content_file = file.read() if repo_content_file != repo_content_api: - if not self.resolve_differences( - repo_content_file, repo_content_api, repo_filename_path - ): + if not self.resolve_differences(repo_content_file, repo_content_api, repo_filename_path): return False if not self.check_mode: self.base.conf.write_raw_configfile( - repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"}, + repo.repofile, + repo_id, + self.base.conf.substitutions, + {"enabled": "0"}, ) return True @@ -487,8 +489,8 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), - includepkgs=dict(type='list', elements="str"), - excludepkgs=dict(type='list', elements="str"), + includepkgs=dict(type="list", elements="str"), + excludepkgs=dict(type="list", elements="str"), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 056b60163ca..d176783dbea 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -171,31 +171,30 @@ class CPANMinus(ModuleHelper): - output_params = ['name', 'version'] + output_params = ["name", "version"] module = dict( argument_spec=dict( - name=dict(type='str', aliases=['pkg']), - version=dict(type='str'), - from_path=dict(type='path'), - notest=dict(type='bool', default=False), - locallib=dict(type='path'), - mirror=dict(type='str'), - mirror_only=dict(type='bool', default=False), - installdeps=dict(type='bool', default=False), - install_recommendations=dict(type='bool'), - install_suggestions=dict(type='bool'), - executable=dict(type='path'), - mode=dict(type='str', default='new', choices=['compatibility', 'new']), - name_check=dict(type='str') + name=dict(type="str", aliases=["pkg"]), + version=dict(type="str"), + from_path=dict(type="path"), + notest=dict(type="bool", default=False), + locallib=dict(type="path"), + mirror=dict(type="str"), + mirror_only=dict(type="bool", default=False), + installdeps=dict(type="bool", default=False), + install_recommendations=dict(type="bool"), + install_suggestions=dict(type="bool"), + executable=dict(type="path"), + mode=dict(type="str", default="new", choices=["compatibility", "new"]), + name_check=dict(type="str"), ), - required_one_of=[('name', 'from_path')], - + required_one_of=[("name", "from_path")], ) - command = 'cpanm' + command = "cpanm" command_args_formats = dict( notest=cmd_runner_fmt.as_bool("--notest"), - locallib=cmd_runner_fmt.as_opt_val('--local-lib'), - mirror=cmd_runner_fmt.as_opt_val('--mirror'), + locallib=cmd_runner_fmt.as_opt_val("--local-lib"), + mirror=cmd_runner_fmt.as_opt_val("--mirror"), mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), installdeps=cmd_runner_fmt.as_bool("--installdeps"), install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True), @@ -209,7 +208,11 @@ def __init_module__(self): if v.mode == "compatibility": if v.name_check: self.do_raise("Parameter name_check can only be used with mode=new") - self.deprecate("'mode=compatibility' is deprecated, use 'mode=new' instead", version='13.0.0', collection_name="community.general") + self.deprecate( + "'mode=compatibility' is deprecated, use 'mode=new' instead", + version="13.0.0", + collection_name="community.general", + ) else: if v.name and v.from_path: self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") @@ -220,7 +223,7 @@ def __init_module__(self): with self.runner("cpanm_version") as ctx: rc, out, err = ctx.run() - line = out.split('\n')[0] + line = out.split("\n")[0] match = re.search(r"version\s+([\d\.]+)\s+", line) if not match: self.do_raise(f"Failed to determine version number. First line of output: {line}") @@ -230,27 +233,29 @@ def _is_package_installed(self, name, locallib, version): def process(rc, out, err): return rc == 0 - if name is None or name.endswith('.tar.gz'): + if name is None or name.endswith(".tar.gz"): return False version = "" if version is None else f" {version}" env = {"PERL5LIB": f"{locallib}/lib/perl5"} if locallib else {} - runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env) + runner = CmdRunner( + self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env + ) with runner("mod", output_process=process) as ctx: - return ctx.run(mod=f'use {name}{version};') + return ctx.run(mod=f"use {name}{version};") def sanitize_pkg_spec_version(self, pkg_spec, version): if version is None: return pkg_spec - if pkg_spec.endswith('.tar.gz'): + if pkg_spec.endswith(".tar.gz"): self.do_raise(msg="parameter 'version' must not be used when installing from a file") if os.path.isdir(pkg_spec): self.do_raise(msg="parameter 'version' must not be used when installing from a directory") - if pkg_spec.endswith('.git'): - if version.startswith('~'): + if pkg_spec.endswith(".git"): + if version.startswith("~"): self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository") - version = version if version.startswith('@') else f"@{version}" - elif version[0] not in ('@', '~'): + version = version if version.startswith("@") else f"@{version}" + elif version[0] not in ("@", "~"): version = f"~{version}" return pkg_spec + version @@ -258,12 +263,12 @@ def __run__(self): def process(rc, out, err): if self.vars.mode == "compatibility" and rc != 0: self.do_raise(msg=err, cmd=self.vars.cmd_args) - return 'is up to date' not in err and 'is up to date' not in out + return "is up to date" not in err and "is up to date" not in out v = self.vars - pkg_param = 'from_path' if v.from_path else 'name' + pkg_param = "from_path" if v.from_path else "name" - if v.mode == 'compatibility': + if v.mode == "compatibility": if self._is_package_installed(v.name, v.locallib, v.version): return pkg_spec = v[pkg_param] @@ -273,16 +278,19 @@ def process(rc, out, err): return pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) - with self.runner([ - 'notest', - 'locallib', - 'mirror', - 'mirror_only', - 'installdeps', - 'install_recommendations', - 'install_suggestions', - 'pkg_spec' - ], output_process=process) as ctx: + with self.runner( + [ + "notest", + "locallib", + "mirror", + "mirror_only", + "installdeps", + "install_recommendations", + "install_suggestions", + "pkg_spec", + ], + output_process=process, + ) as ctx: self.changed = ctx.run(pkg_spec=pkg_spec) @@ -290,5 +298,5 @@ def main(): CPANMinus.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py index cd15ae25853..cfa87709ea1 100644 --- a/plugins/modules/cronvar.py +++ b/plugins/modules/cronvar.py @@ -114,25 +114,34 @@ class CronVarError(Exception): class CronVar: """ - CronVar object to write variables to crontabs. + CronVar object to write variables to crontabs. - user - the user of the crontab (defaults to root) - cron_file - a cron file under /etc/cron.d + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d """ def __init__(self, module, user=None, cron_file=None): self.module = module self.user = user self.lines = None - self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) - self.cron_cmd = self.module.get_bin_path('crontab', required=True) + self.wordchars = "".join( + chr(x) + for x in range(128) + if chr(x) + not in ( + "=", + "'", + '"', + ) + ) + self.cron_cmd = self.module.get_bin_path("crontab", required=True) if cron_file: self.cron_file = "" if os.path.isabs(cron_file): self.cron_file = cron_file else: - self.cron_file = os.path.join('/etc/cron.d', cron_file) + self.cron_file = os.path.join("/etc/cron.d", cron_file) parent_dir = os.path.dirname(self.cron_file) if parent_dir and not os.path.isdir(parent_dir): module.fail_json(msg=f"Parent directory '{parent_dir}' does not exist for cron_file: '{cron_file}'") @@ -147,7 +156,7 @@ def read(self): if self.cron_file: # read the cronfile try: - with open(self.cron_file, 'r') as f: + with open(self.cron_file, "r") as f: self.lines = f.read().splitlines() except IOError: # cron file does not exist @@ -164,8 +173,11 @@ def read(self): lines = out.splitlines() count = 0 for l in lines: - if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l - ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): + if count > 2 or ( + not re.match(r"# DO NOT EDIT THIS FILE - edit the master and reinstall.", l) + and not re.match(r"# \(/tmp/.*installed on.*\)", l) + and not re.match(r"# \(.*version.*\)", l) + ): self.lines.append(l) count += 1 @@ -177,13 +189,13 @@ def write(self, backup_file=None): Write the crontab to the system. Saves all information. """ if backup_file: - fileh = open(backup_file, 'w') + fileh = open(backup_file, "w") elif self.cron_file: - fileh = open(self.cron_file, 'w') + fileh = open(self.cron_file, "w") path = None else: - filed, path = tempfile.mkstemp(prefix='crontab') - fileh = os.fdopen(filed, 'w') + filed, path = tempfile.mkstemp(prefix="crontab") + fileh = os.fdopen(filed, "w") fileh.write(self.render()) fileh.close() @@ -215,8 +227,8 @@ def parse_for_var(self, line): lexer = shlex.shlex(line) lexer.wordchars = self.wordchars varname = lexer.get_token() - is_env_var = lexer.get_token() == '=' - value = ''.join(lexer) + is_env_var = lexer.get_token() == "=" + value = "".join(lexer) if is_env_var: return (varname, value) raise CronVarError("Not a variable.") @@ -284,43 +296,44 @@ def render(self): """ Render a proper crontab """ - result = '\n'.join(self.lines) - if result and result[-1] not in ['\n', '\r']: - result += '\n' + result = "\n".join(self.lines) + if result and result[-1] not in ["\n", "\r"]: + result += "\n" return result def _read_user_execute(self): """ Returns the command line for reading a crontab """ - user = '' + user = "" if self.user: - if platform.system() == 'SunOS': + if platform.system() == "SunOS": return f"su {shlex_quote(self.user)} -c '{shlex_quote(self.cron_cmd)} -l'" - elif platform.system() == 'AIX': + elif platform.system() == "AIX": return f"{shlex_quote(self.cron_cmd)} -l {shlex_quote(self.user)}" - elif platform.system() == 'HP-UX': + elif platform.system() == "HP-UX": return f"{self.cron_cmd} -l {shlex_quote(self.user)}" elif pwd.getpwuid(os.getuid())[0] != self.user: - user = f'-u {shlex_quote(self.user)}' + user = f"-u {shlex_quote(self.user)}" return f"{self.cron_cmd} {user} -l" def _write_execute(self, path): """ Return the command line for writing a crontab """ - user = '' + user = "" if self.user: - if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + if platform.system() in ["SunOS", "HP-UX", "AIX"]: return f"chown {shlex_quote(self.user)} {shlex_quote(path)} ; su '{shlex_quote(self.user)}' -c '{self.cron_cmd} {shlex_quote(path)}'" elif pwd.getpwuid(os.getuid())[0] != self.user: - user = f'-u {shlex_quote(self.user)}' + user = f"-u {shlex_quote(self.user)}" return f"{self.cron_cmd} {user} {shlex_quote(path)}" # ================================================== + def main(): # The following example playbooks: # @@ -338,34 +351,34 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - value=dict(type='str'), - user=dict(type='str'), - cron_file=dict(type='str'), - insertafter=dict(type='str'), - insertbefore=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - backup=dict(type='bool', default=False), + name=dict(type="str", required=True), + value=dict(type="str"), + user=dict(type="str"), + cron_file=dict(type="str"), + insertafter=dict(type="str"), + insertbefore=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + backup=dict(type="bool", default=False), ), - mutually_exclusive=[['insertbefore', 'insertafter']], + mutually_exclusive=[["insertbefore", "insertafter"]], supports_check_mode=False, ) - name = module.params['name'] - value = module.params['value'] - user = module.params['user'] - cron_file = module.params['cron_file'] - insertafter = module.params['insertafter'] - insertbefore = module.params['insertbefore'] - state = module.params['state'] - backup = module.params['backup'] - ensure_present = state == 'present' + name = module.params["name"] + value = module.params["value"] + user = module.params["user"] + cron_file = module.params["cron_file"] + insertafter = module.params["insertafter"] + insertbefore = module.params["insertbefore"] + state = module.params["state"] + backup = module.params["backup"] + ensure_present = state == "present" changed = False res_args = dict() # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(int('022', 8)) + os.umask(int("022", 8)) cronvar = CronVar(module, user, cron_file) module.debug(f'cronvar instantiated - name: "{name}"') @@ -383,7 +396,7 @@ def main(): # if requested make a backup before making a change if backup: - dummy, backup_file = tempfile.mkstemp(prefix='cronvar') + dummy, backup_file = tempfile.mkstemp(prefix="cronvar") cronvar.write(backup_file) if cronvar.cron_file and not name and not ensure_present: @@ -406,10 +419,7 @@ def main(): cronvar.remove_variable(name) changed = True - res_args = { - "vars": cronvar.get_var_names(), - "changed": changed - } + res_args = {"vars": cronvar.get_var_names(), "changed": changed} if changed: cronvar.write() @@ -417,15 +427,15 @@ def main(): # retain the backup only if crontab or cron file have changed if backup: if changed: - res_args['backup_file'] = backup_file + res_args["backup_file"] = backup_file else: os.unlink(backup_file) if cron_file: - res_args['cron_file'] = cron_file + res_args["cron_file"] = cron_file module.exit_json(**res_args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py index 31937954d89..0039ffed848 100644 --- a/plugins/modules/crypttab.py +++ b/plugins/modules/crypttab.py @@ -91,76 +91,71 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), - backing_device=dict(type='str'), - password=dict(type='path'), - opts=dict(type='str'), - path=dict(type='path', default='/etc/crypttab') + name=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["absent", "opts_absent", "opts_present", "present"]), + backing_device=dict(type="str"), + password=dict(type="path"), + opts=dict(type="str"), + path=dict(type="path", default="/etc/crypttab"), ), supports_check_mode=True, ) - backing_device = module.params['backing_device'] - password = module.params['password'] - opts = module.params['opts'] - state = module.params['state'] - path = module.params['path'] - name = module.params['name'] - if name.startswith('/dev/mapper/'): - name = name[len('/dev/mapper/'):] - - if state != 'absent' and backing_device is None and password is None and opts is None: - module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", - **module.params) - - if 'opts' in state and (backing_device is not None or password is not None): - module.fail_json(msg=f"cannot update 'backing_device' or 'password' when state={state}", - **module.params) - - for arg_name, arg in (('name', name), - ('backing_device', backing_device), - ('password', password), - ('opts', opts)): - if arg is not None and (' ' in arg or '\t' in arg or arg == ''): - module.fail_json(msg=f"invalid '{arg_name}': contains white space or is empty", - **module.params) + backing_device = module.params["backing_device"] + password = module.params["password"] + opts = module.params["opts"] + state = module.params["state"] + path = module.params["path"] + name = module.params["name"] + if name.startswith("/dev/mapper/"): + name = name[len("/dev/mapper/") :] + + if state != "absent" and backing_device is None and password is None and opts is None: + module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", **module.params) + + if "opts" in state and (backing_device is not None or password is not None): + module.fail_json(msg=f"cannot update 'backing_device' or 'password' when state={state}", **module.params) + + for arg_name, arg in (("name", name), ("backing_device", backing_device), ("password", password), ("opts", opts)): + if arg is not None and (" " in arg or "\t" in arg or arg == ""): + module.fail_json(msg=f"invalid '{arg_name}': contains white space or is empty", **module.params) try: crypttab = Crypttab(path) existing_line = crypttab.match(name) except Exception as e: - module.fail_json(msg=f"failed to open and parse crypttab file: {e}", exception=traceback.format_exc(), **module.params) + module.fail_json( + msg=f"failed to open and parse crypttab file: {e}", exception=traceback.format_exc(), **module.params + ) - if 'present' in state and existing_line is None and backing_device is None: - module.fail_json(msg="'backing_device' required to add a new entry", - **module.params) + if "present" in state and existing_line is None and backing_device is None: + module.fail_json(msg="'backing_device' required to add a new entry", **module.params) - changed, reason = False, '?' + changed, reason = False, "?" - if state == 'absent': + if state == "absent": if existing_line is not None: changed, reason = existing_line.remove() - elif state == 'present': + elif state == "present": if existing_line is not None: changed, reason = existing_line.set(backing_device, password, opts) else: changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - elif state == 'opts_present': + elif state == "opts_present": if existing_line is not None: changed, reason = existing_line.opts.add(opts) else: changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) - elif state == 'opts_absent': + elif state == "opts_absent": if existing_line is not None: changed, reason = existing_line.opts.remove(opts) if changed and not module.check_mode: - with open(path, 'wb') as f: - f.write(to_bytes(crypttab, errors='surrogate_or_strict')) + with open(path, "wb") as f: + f.write(to_bytes(crypttab, errors="surrogate_or_strict")) module.exit_json(changed=changed, msg=reason, **module.params) @@ -174,15 +169,15 @@ def __init__(self, path): if not os.path.exists(path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) - open(path, 'a').close() + open(path, "a").close() - with open(path, 'r') as f: + with open(path, "r") as f: for line in f.readlines(): self._lines.append(Line(line)) def add(self, line): self._lines.append(line) - return True, 'added line' + return True, "added line" def lines(self): for line in self._lines: @@ -199,11 +194,11 @@ def __str__(self): lines = [] for line in self._lines: lines.append(str(line)) - crypttab = '\n'.join(lines) + crypttab = "\n".join(lines) if len(crypttab) == 0: - crypttab += '\n' - if crypttab[-1] != '\n': - crypttab += '\n' + crypttab += "\n" + if crypttab[-1] != "\n": + crypttab += "\n" return crypttab @@ -216,7 +211,7 @@ def __init__(self, line=None, name=None, backing_device=None, password=None, opt self.opts = Options(opts) if line is not None: - self.line = self.line.rstrip('\n') + self.line = self.line.rstrip("\n") if self._line_valid(line): self.name, backing_device, password, opts = self._split_line(line) @@ -239,10 +234,10 @@ def set(self, backing_device, password, opts): self.opts = opts changed = True - return changed, 'updated line' + return changed, "updated line" def _line_valid(self, line): - if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): + if not line.strip() or line.startswith("#") or len(line.split()) not in (2, 3, 4): return False return True @@ -257,14 +252,11 @@ def _split_line(self, line): except IndexError: field3 = None - return (fields[0], - fields[1], - field2, - field3) + return (fields[0], fields[1], field2, field3) def remove(self): - self.line, self.name, self.backing_device = '', None, None - return True, 'removed line' + self.line, self.name, self.backing_device = "", None, None + return True, "removed line" def valid(self): if self.name is not None and self.backing_device is not None: @@ -278,22 +270,22 @@ def __str__(self): if self.password is not None: fields.append(self.password) else: - fields.append('none') + fields.append("none") if self.opts: fields.append(str(self.opts)) - return ' '.join(fields) + return " ".join(fields) return self.line class Options(dict): - """opts_string looks like: 'discard,foo=bar,baz=greeble' """ + """opts_string looks like: 'discard,foo=bar,baz=greeble'""" def __init__(self, opts_string): super().__init__() self.itemlist = [] if opts_string is not None: - for opt in opts_string.split(','): - kv = opt.split('=') + for opt in opts_string.split(","): + kv = opt.split("=") if len(kv) > 1: k, v = (kv[0], kv[1]) else: @@ -309,7 +301,7 @@ def add(self, opts_string): else: changed = True self[k] = v - return changed, 'updated options' + return changed, "updated options" def remove(self, opts_string): changed = False @@ -317,7 +309,7 @@ def remove(self, opts_string): if k in self: del self[k] changed = True - return changed, 'removed options' + return changed, "removed options" def keys(self): return self.itemlist @@ -349,9 +341,9 @@ def __str__(self): if v is None: ret.append(k) else: - ret.append(f'{k}={v}') - return ','.join(ret) + ret.append(f"{k}={v}") + return ",".join(ret) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py index d7264dce084..ea62fd2fa31 100644 --- a/plugins/modules/datadog_downtime.py +++ b/plugins/modules/datadog_downtime.py @@ -194,13 +194,12 @@ def main(): configuration = Configuration( host=module.params["api_host"], - api_key={ - "apiKeyAuth": module.params["api_key"], - "appKeyAuth": module.params["app_key"] - } + api_key={"apiKeyAuth": module.params["api_key"], "appKeyAuth": module.params["app_key"]}, ) with ApiClient(configuration) as api_client: - api_client.user_agent = f"ansible_collection/community_general (module_name datadog_downtime) {api_client.user_agent}" + api_client.user_agent = ( + f"ansible_collection/community_general (module_name datadog_downtime) {api_client.user_agent}" + ) api_instance = DowntimesApi(api_client) # Validate api and app keys @@ -275,11 +274,7 @@ def _update_downtime(module, current_downtime, api_client): resp = api.create_downtime(downtime) else: resp = api.update_downtime(module.params["id"], downtime) - if _equal_dicts( - resp.to_dict(), - current_downtime.to_dict(), - ["active", "creator_id", "updater_id"] - ): + if _equal_dicts(resp.to_dict(), current_downtime.to_dict(), ["active", "creator_id", "updater_id"]): module.exit_json(changed=False, downtime=resp.to_dict()) else: module.exit_json(changed=True, downtime=resp.to_dict()) diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py index c34951992e6..5b71d32d316 100644 --- a/plugins/modules/datadog_event.py +++ b/plugins/modules/datadog_event.py @@ -130,6 +130,7 @@ DATADOG_IMP_ERR = None try: from datadog import initialize, api + HAS_DATADOG = True except Exception: DATADOG_IMP_ERR = traceback.format_exc() @@ -144,29 +145,29 @@ def main(): argument_spec=dict( api_key=dict(required=True, no_log=True), app_key=dict(required=True, no_log=True), - api_host=dict(type='str'), + api_host=dict(type="str"), title=dict(required=True), text=dict(required=True), - date_happened=dict(type='int'), - priority=dict(default='normal', choices=['normal', 'low']), + date_happened=dict(type="int"), + priority=dict(default="normal", choices=["normal", "low"]), host=dict(), - tags=dict(type='list', elements='str'), - alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), + tags=dict(type="list", elements="str"), + alert_type=dict(default="info", choices=["error", "warning", "info", "success"]), aggregation_key=dict(no_log=False), - validate_certs=dict(default=True, type='bool'), + validate_certs=dict(default=True, type="bool"), ) ) # Prepare Datadog if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + module.fail_json(msg=missing_required_lib("datadogpy"), exception=DATADOG_IMP_ERR) options = { - 'api_key': module.params['api_key'], - 'app_key': module.params['app_key'], + "api_key": module.params["api_key"], + "app_key": module.params["app_key"], } - if module.params['api_host'] is not None: - options['api_host'] = module.params['api_host'] + if module.params["api_host"] is not None: + options["api_host"] = module.params["api_host"] initialize(**options) @@ -175,17 +176,19 @@ def main(): def _post_event(module): try: - if module.params['host'] is None: - module.params['host'] = platform.node().split('.')[0] - msg = api.Event.create(title=module.params['title'], - text=module.params['text'], - host=module.params['host'], - tags=module.params['tags'], - priority=module.params['priority'], - alert_type=module.params['alert_type'], - aggregation_key=module.params['aggregation_key'], - source_type_name='ansible') - if msg['status'] != 'ok': + if module.params["host"] is None: + module.params["host"] = platform.node().split(".")[0] + msg = api.Event.create( + title=module.params["title"], + text=module.params["text"], + host=module.params["host"], + tags=module.params["tags"], + priority=module.params["priority"], + alert_type=module.params["alert_type"], + aggregation_key=module.params["aggregation_key"], + source_type_name="ansible", + ) + if msg["status"] != "ok": module.fail_json(msg=msg) module.exit_json(changed=True, msg=msg) @@ -193,5 +196,5 @@ def _post_event(module): module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py index e9fe852d56c..db197fad3e2 100644 --- a/plugins/modules/datadog_monitor.py +++ b/plugins/modules/datadog_monitor.py @@ -244,6 +244,7 @@ DATADOG_IMP_ERR = None try: from datadog import initialize, api + HAS_DATADOG = True except Exception: DATADOG_IMP_ERR = traceback.format_exc() @@ -259,43 +260,54 @@ def main(): api_key=dict(required=True, no_log=True), api_host=dict(), app_key=dict(required=True, no_log=True), - state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), - type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert', - 'log alert', 'query alert', 'trace-analytics alert', - 'rum alert', 'composite']), + state=dict(required=True, choices=["present", "absent", "mute", "unmute"]), + type=dict( + choices=[ + "metric alert", + "service check", + "event alert", + "event-v2 alert", + "process alert", + "log alert", + "query alert", + "trace-analytics alert", + "rum alert", + "composite", + ] + ), name=dict(required=True), query=dict(), notification_message=dict(no_log=True), - silenced=dict(type='dict'), - notify_no_data=dict(default=False, type='bool'), + silenced=dict(type="dict"), + notify_no_data=dict(default=False, type="bool"), no_data_timeframe=dict(), timeout_h=dict(), renotify_interval=dict(), escalation_message=dict(), - notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict'), - tags=dict(type='list', elements='str'), - locked=dict(default=False, type='bool'), - require_full_window=dict(type='bool'), + notify_audit=dict(default=False, type="bool"), + thresholds=dict(type="dict"), + tags=dict(type="list", elements="str"), + locked=dict(default=False, type="bool"), + require_full_window=dict(type="bool"), new_host_delay=dict(), evaluation_delay=dict(), id=dict(), - include_tags=dict(default=True, type='bool'), - priority=dict(type='int'), - notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']), - renotify_occurrences=dict(type='int'), - renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']), + include_tags=dict(default=True, type="bool"), + priority=dict(type="int"), + notification_preset_name=dict(choices=["show_all", "hide_query", "hide_handles", "hide_all"]), + renotify_occurrences=dict(type="int"), + renotify_statuses=dict(type="list", elements="str", choices=["alert", "warn", "no data"]), ) ) # Prepare Datadog if not HAS_DATADOG: - module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + module.fail_json(msg=missing_required_lib("datadogpy"), exception=DATADOG_IMP_ERR) options = { - 'api_key': module.params['api_key'], - 'api_host': module.params['api_host'], - 'app_key': module.params['app_key'] + "api_key": module.params["api_key"], + "api_host": module.params["api_host"], + "app_key": module.params["app_key"], } initialize(**options) @@ -304,53 +316,58 @@ def main(): # if not, then fail here. response = api.Monitor.get_all() if isinstance(response, dict): - msg = response.get('errors', None) + msg = response.get("errors", None) if msg: module.fail_json(msg=f"Failed to connect Datadog server using given app_key and api_key : {msg[0]}") - if module.params['state'] == 'present': + if module.params["state"] == "present": install_monitor(module) - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": delete_monitor(module) - elif module.params['state'] == 'mute': + elif module.params["state"] == "mute": mute_monitor(module) - elif module.params['state'] == 'unmute': + elif module.params["state"] == "unmute": unmute_monitor(module) def _fix_template_vars(message): if message: - return message.replace('[[', '{{').replace(']]', '}}') + return message.replace("[[", "{{").replace("]]", "}}") return message def _get_monitor(module): - if module.params['id'] is not None: - monitor = api.Monitor.get(module.params['id']) - if 'errors' in monitor: - module.fail_json(msg=f"Failed to retrieve monitor with id {module.params['id']}, errors are {monitor['errors']}") + if module.params["id"] is not None: + monitor = api.Monitor.get(module.params["id"]) + if "errors" in monitor: + module.fail_json( + msg=f"Failed to retrieve monitor with id {module.params['id']}, errors are {monitor['errors']}" + ) return monitor else: monitors = api.Monitor.get_all() for monitor in monitors: - if monitor['name'] == _fix_template_vars(module.params['name']): + if monitor["name"] == _fix_template_vars(module.params["name"]): return monitor return {} def _post_monitor(module, options): try: - kwargs = dict(type=module.params['type'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - priority=module.params['priority'], - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] + kwargs = dict( + type=module.params["type"], + query=module.params["query"], + name=_fix_template_vars(module.params["name"]), + message=_fix_template_vars(module.params["notification_message"]), + escalation_message=_fix_template_vars(module.params["escalation_message"]), + priority=module.params["priority"], + options=options, + ) + if module.params["tags"] is not None: + kwargs["tags"] = module.params["tags"] msg = api.Monitor.create(**kwargs) - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) + if "errors" in msg: + module.fail_json(msg=str(msg["errors"])) else: module.exit_json(changed=True, msg=msg) except Exception as e: @@ -365,19 +382,24 @@ def _equal_dicts(a, b, ignore_keys): def _update_monitor(module, monitor, options): try: - kwargs = dict(id=monitor['id'], query=module.params['query'], - name=_fix_template_vars(module.params['name']), - message=_fix_template_vars(module.params['notification_message']), - escalation_message=_fix_template_vars(module.params['escalation_message']), - priority=module.params['priority'], - options=options) - if module.params['tags'] is not None: - kwargs['tags'] = module.params['tags'] + kwargs = dict( + id=monitor["id"], + query=module.params["query"], + name=_fix_template_vars(module.params["name"]), + message=_fix_template_vars(module.params["notification_message"]), + escalation_message=_fix_template_vars(module.params["escalation_message"]), + priority=module.params["priority"], + options=options, + ) + if module.params["tags"] is not None: + kwargs["tags"] = module.params["tags"] msg = api.Monitor.update(**kwargs) - if 'errors' in msg: - module.fail_json(msg=str(msg['errors'])) - elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): + if "errors" in msg: + module.fail_json(msg=str(msg["errors"])) + elif _equal_dicts( + msg, monitor, ["creator", "overall_state", "modified", "matching_downtimes", "overall_state_modified"] + ): module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) @@ -387,27 +409,30 @@ def _update_monitor(module, monitor, options): def install_monitor(module): options = { - "silenced": module.params['silenced'], - "notify_no_data": module.boolean(module.params['notify_no_data']), - "no_data_timeframe": module.params['no_data_timeframe'], - "timeout_h": module.params['timeout_h'], - "renotify_interval": module.params['renotify_interval'], - "escalation_message": module.params['escalation_message'], - "notify_audit": module.boolean(module.params['notify_audit']), - "locked": module.boolean(module.params['locked']), - "require_full_window": module.params['require_full_window'], - "new_host_delay": module.params['new_host_delay'], - "evaluation_delay": module.params['evaluation_delay'], - "include_tags": module.params['include_tags'], - "notification_preset_name": module.params['notification_preset_name'], - "renotify_occurrences": module.params['renotify_occurrences'], - "renotify_statuses": module.params['renotify_statuses'], + "silenced": module.params["silenced"], + "notify_no_data": module.boolean(module.params["notify_no_data"]), + "no_data_timeframe": module.params["no_data_timeframe"], + "timeout_h": module.params["timeout_h"], + "renotify_interval": module.params["renotify_interval"], + "escalation_message": module.params["escalation_message"], + "notify_audit": module.boolean(module.params["notify_audit"]), + "locked": module.boolean(module.params["locked"]), + "require_full_window": module.params["require_full_window"], + "new_host_delay": module.params["new_host_delay"], + "evaluation_delay": module.params["evaluation_delay"], + "include_tags": module.params["include_tags"], + "notification_preset_name": module.params["notification_preset_name"], + "renotify_occurrences": module.params["renotify_occurrences"], + "renotify_statuses": module.params["renotify_statuses"], } - if module.params['type'] == "service check": - options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} - if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: - options["thresholds"] = module.params['thresholds'] + if module.params["type"] == "service check": + options["thresholds"] = module.params["thresholds"] or {"ok": 1, "critical": 1, "warning": 1} + if ( + module.params["type"] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] + and module.params["thresholds"] is not None + ): + options["thresholds"] = module.params["thresholds"] monitor = _get_monitor(module) if not monitor: @@ -421,7 +446,7 @@ def delete_monitor(module): if not monitor: module.exit_json(changed=False) try: - msg = api.Monitor.delete(monitor['id']) + msg = api.Monitor.delete(monitor["id"]) module.exit_json(changed=True, msg=msg) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) @@ -431,15 +456,20 @@ def mute_monitor(module): monitor = _get_monitor(module) if not monitor: module.fail_json(msg=f"Monitor {module.params['name']} not found!") - elif monitor['options']['silenced']: - module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0: + elif monitor["options"]["silenced"]: + module.fail_json( + msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first." + ) + elif ( + module.params["silenced"] is not None + and len(set(monitor["options"]["silenced"]) ^ set(module.params["silenced"])) == 0 + ): module.exit_json(changed=False) try: - if module.params['silenced'] is None or module.params['silenced'] == "": - msg = api.Monitor.mute(id=monitor['id']) + if module.params["silenced"] is None or module.params["silenced"] == "": + msg = api.Monitor.mute(id=monitor["id"]) else: - msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + msg = api.Monitor.mute(id=monitor["id"], silenced=module.params["silenced"]) module.exit_json(changed=True, msg=msg) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) @@ -449,14 +479,14 @@ def unmute_monitor(module): monitor = _get_monitor(module) if not monitor: module.fail_json(msg=f"Monitor {module.params['name']} not found!") - elif not monitor['options']['silenced']: + elif not monitor["options"]["silenced"]: module.exit_json(changed=False) try: - msg = api.Monitor.unmute(monitor['id']) + msg = api.Monitor.unmute(monitor["id"]) module.exit_json(changed=True, msg=msg) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py index 8eb487c383f..c7a30dde8e0 100644 --- a/plugins/modules/dconf.py +++ b/plugins/modules/dconf.py @@ -135,7 +135,7 @@ from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils import deps -glib_module_name = 'gi.repository.GLib' +glib_module_name = "gi.repository.GLib" try: from gi.repository.GLib import Variant, GError @@ -178,7 +178,7 @@ def __init__(self, module): # If no existing D-Bus session was detected, check if dbus-run-session # is available. if self.dbus_session_bus_address is None: - self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True) + self.dbus_run_session_cmd = self.module.get_bin_path("dbus-run-session", required=True) def _get_existing_dbus_session(self): """ @@ -199,15 +199,25 @@ def _get_existing_dbus_session(self): try: process = psutil.Process(pid) process_real_uid, dummy, dummy = process.uids() - if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): - dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] - self.module.debug(f"Found D-Bus user session candidate at address: {dbus_session_bus_address_candidate}") - dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) - command = [dbus_send_cmd, f'--address={dbus_session_bus_address_candidate}', '--type=signal', '/', 'com.example.test'] + if process_real_uid == uid and "DBUS_SESSION_BUS_ADDRESS" in process.environ(): + dbus_session_bus_address_candidate = process.environ()["DBUS_SESSION_BUS_ADDRESS"] + self.module.debug( + f"Found D-Bus user session candidate at address: {dbus_session_bus_address_candidate}" + ) + dbus_send_cmd = self.module.get_bin_path("dbus-send", required=True) + command = [ + dbus_send_cmd, + f"--address={dbus_session_bus_address_candidate}", + "--type=signal", + "/", + "com.example.test", + ] rc, dummy, dummy = self.module.run_command(command) if rc == 0: - self.module.debug(f"Verified D-Bus user session candidate as usable at address: {dbus_session_bus_address_candidate}") + self.module.debug( + f"Verified D-Bus user session candidate as usable at address: {dbus_session_bus_address_candidate}" + ) return dbus_session_bus_address_candidate @@ -240,16 +250,17 @@ def run_command(self, command): rc, out, err = self.module.run_command(command) if self.dbus_session_bus_address is None and rc == 127: - self.module.fail_json(msg=f"Failed to run passed-in command, dbus-run-session faced an internal error: {err}") + self.module.fail_json( + msg=f"Failed to run passed-in command, dbus-run-session faced an internal error: {err}" + ) else: - extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} + extra_environment = {"DBUS_SESSION_BUS_ADDRESS": self.dbus_session_bus_address} rc, out, err = self.module.run_command(command, environ_update=extra_environment) return rc, out, err class DconfPreference: - def __init__(self, module, check_mode=False): """ Initialises instance of the class. @@ -264,7 +275,7 @@ def __init__(self, module, check_mode=False): self.module = module self.check_mode = check_mode # Check if dconf binary exists - self.dconf_bin = self.module.get_bin_path('dconf', required=True) + self.dconf_bin = self.module.get_bin_path("dconf", required=True) @staticmethod def variants_are_equal(canonical_value, user_value): @@ -302,14 +313,12 @@ def read(self, key): rc, out, err = self.module.run_command(command) if rc != 0: - self.module.fail_json(msg=f'dconf failed while reading the value with error: {err}', - out=out, - err=err) + self.module.fail_json(msg=f"dconf failed while reading the value with error: {err}", out=out, err=err) - if out == '': + if out == "": value = None else: - value = out.rstrip('\n') + value = out.rstrip("\n") return value @@ -343,9 +352,9 @@ def write(self, key, value): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg=f'dconf failed while writing key {key}, value {value} with error: {err}', - out=out, - err=err) + self.module.fail_json( + msg=f"dconf failed while writing key {key}, value {value} with error: {err}", out=out, err=err + ) # Value was changed. return True @@ -381,9 +390,7 @@ def reset(self, key): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg=f'dconf failed while resetting the value with error: {err}', - out=out, - err=err) + self.module.fail_json(msg=f"dconf failed while resetting the value with error: {err}", out=out, err=err) # Value was changed. return True @@ -393,14 +400,14 @@ def main(): # Setup the Ansible module module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent', 'read']), - key=dict(required=True, type='str', no_log=False), + state=dict(default="present", choices=["present", "absent", "read"]), + key=dict(required=True, type="str", no_log=False), # Converted to str below after special handling of bool. - value=dict(type='raw'), + value=dict(type="raw"), ), supports_check_mode=True, required_if=[ - ('state', 'present', ['value']), + ("state", "present", ["value"]), ], ) @@ -415,13 +422,11 @@ def main(): if has_respawned(): # This shouldn't be possible; short-circuit early if it happens. - module.fail_json( - msg=f"{glib_module_name} must be installed and visible from {sys.executable}.") + module.fail_json(msg=f"{glib_module_name} must be installed and visible from {sys.executable}.") - interpreters = ['/usr/bin/python3', '/usr/bin/python'] + interpreters = ["/usr/bin/python3", "/usr/bin/python"] - interpreter = probe_interpreters_for_module( - interpreters, glib_module_name) + interpreter = probe_interpreters_for_module(interpreters, glib_module_name) if interpreter: # Found the Python bindings; respawn this module under the @@ -435,18 +440,18 @@ def main(): # about converting strings that look like booleans into booleans. Convert # the boolean into a string of the type dconf will understand. Any type for # the value other than boolean is just converted into a string directly. - if module.params['value'] is not None: - if isinstance(module.params['value'], bool): - module.params['value'] = 'true' if module.params['value'] else 'false' + if module.params["value"] is not None: + if isinstance(module.params["value"], bool): + module.params["value"] = "true" if module.params["value"] else "false" else: - module.params['value'] = to_native( - module.params['value'], errors='surrogate_or_strict') + module.params["value"] = to_native(module.params["value"], errors="surrogate_or_strict") if Variant is None: module.warn( - 'WARNING: The gi.repository Python library is not available; ' - 'using string comparison to check value equality. This fallback ' - 'will be deprecated in a future version of community.general.') + "WARNING: The gi.repository Python library is not available; " + "using string comparison to check value equality. This fallback " + "will be deprecated in a future version of community.general." + ) deps.validate(module) @@ -454,16 +459,16 @@ def main(): dconf = DconfPreference(module, module.check_mode) # Process based on different states. - if module.params['state'] == 'read': - value = dconf.read(module.params['key']) + if module.params["state"] == "read": + value = dconf.read(module.params["key"]) module.exit_json(changed=False, value=value) - elif module.params['state'] == 'present': - changed = dconf.write(module.params['key'], module.params['value']) + elif module.params["state"] == "present": + changed = dconf.write(module.params["key"], module.params["value"]) module.exit_json(changed=changed) - elif module.params['state'] == 'absent': - changed = dconf.reset(module.params['key']) + elif module.params["state"] == "absent": + changed = dconf.reset(module.params["key"]) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py index 2321497a7b9..e65a4f4448e 100644 --- a/plugins/modules/decompress.py +++ b/plugins/modules/decompress.py @@ -121,17 +121,17 @@ def decompress(b_src, b_dest, handler): class Decompress(ModuleHelper): destination_filename_template = "%s_decompressed" - output_params = 'dest' + output_params = "dest" module = dict( argument_spec=dict( - src=dict(type='path', required=True), - dest=dict(type='path'), - format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']), - remove=dict(type='bool', default=False) + src=dict(type="path", required=True), + dest=dict(type="path"), + format=dict(type="str", default="gz", choices=["gz", "bz2", "xz"]), + remove=dict(type="bool", default=False), ), add_file_common_args=True, - supports_check_mode=True + supports_check_mode=True, ) def __init_module__(self): @@ -142,8 +142,8 @@ def __init_module__(self): self.configure() def configure(self): - b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') - b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + b_dest = to_bytes(self.vars.dest, errors="surrogate_or_strict") + b_src = to_bytes(self.vars.src, errors="surrogate_or_strict") if not os.path.exists(b_src): if self.vars.remove and os.path.exists(b_dest): self.module.exit_json(changed=False) @@ -155,15 +155,15 @@ def configure(self): self.do_raise(msg=f"Destination is a directory, cannot decompress: '{b_dest}'") def __run__(self): - b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') - b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + b_dest = to_bytes(self.vars.dest, errors="surrogate_or_strict") + b_src = to_bytes(self.vars.src, errors="surrogate_or_strict") file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest) handler = self.handlers[self.vars.format] try: tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir) self.module.add_cleanup_file(temppath) - b_temppath = to_bytes(temppath, errors='surrogate_or_strict') + b_temppath = to_bytes(temppath, errors="surrogate_or_strict") decompress(b_src, b_temppath, handler) except OSError as e: self.do_raise(msg=f"Unable to create temporary file '{e}'") @@ -187,7 +187,7 @@ def get_destination_filename(self): src = self.vars.src fmt_extension = f".{self.vars.format}" if src.endswith(fmt_extension) and len(src) > len(fmt_extension): - filename = src[:-len(fmt_extension)] + filename = src[: -len(fmt_extension)] else: filename = Decompress.destination_filename_template % src return filename @@ -197,5 +197,5 @@ def main(): Decompress.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py index 2629d59d30a..f4d84aad9d6 100644 --- a/plugins/modules/deploy_helper.py +++ b/plugins/modules/deploy_helper.py @@ -272,20 +272,19 @@ class DeployHelper: - def __init__(self, module): self.module = module self.file_args = module.load_file_common_arguments(module.params) - self.clean = module.params['clean'] - self.current_path = module.params['current_path'] - self.keep_releases = module.params['keep_releases'] - self.path = module.params['path'] - self.release = module.params['release'] - self.releases_path = module.params['releases_path'] - self.shared_path = module.params['shared_path'] - self.state = module.params['state'] - self.unfinished_filename = module.params['unfinished_filename'] + self.clean = module.params["clean"] + self.current_path = module.params["current_path"] + self.keep_releases = module.params["keep_releases"] + self.path = module.params["path"] + self.release = module.params["release"] + self.releases_path = module.params["releases_path"] + self.shared_path = module.params["shared_path"] + self.state = module.params["state"] + self.unfinished_filename = module.params["unfinished_filename"] def gather_facts(self): current_path = os.path.join(self.path, self.current_path) @@ -297,7 +296,7 @@ def gather_facts(self): previous_release, previous_release_path = self._get_last_release(current_path) - if not self.release and (self.state == 'query' or self.state == 'present'): + if not self.release and (self.state == "query" or self.state == "present"): self.release = time.strftime("%Y%m%d%H%M%S") if self.release: @@ -306,15 +305,15 @@ def gather_facts(self): new_release_path = None return { - 'project_path': self.path, - 'current_path': current_path, - 'releases_path': releases_path, - 'shared_path': shared_path, - 'previous_release': previous_release, - 'previous_release_path': previous_release_path, - 'new_release': self.release, - 'new_release_path': new_release_path, - 'unfinished_filename': self.unfinished_filename + "project_path": self.path, + "current_path": current_path, + "releases_path": releases_path, + "shared_path": shared_path, + "previous_release": previous_release, + "previous_release_path": previous_release_path, + "new_release": self.release, + "new_release_path": new_release_path, + "unfinished_filename": self.unfinished_filename, } def delete_path(self, path): @@ -422,16 +421,16 @@ def cleanup(self, releases_path, reserve_version): if not self.module.check_mode: releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) - for release in releases[self.keep_releases:]: + for release in releases[self.keep_releases :]: changes += self.delete_path(os.path.join(releases_path, release)) elif len(releases) > self.keep_releases: - changes += (len(releases) - self.keep_releases) + changes += len(releases) - self.keep_releases return changes def _get_file_args(self, path): file_args = self.file_args.copy() - file_args['path'] = path + file_args["path"] = path return file_args def _get_last_release(self, current_path): @@ -446,75 +445,72 @@ def _get_last_release(self, current_path): def main(): - module = AnsibleModule( argument_spec=dict( - path=dict(aliases=['dest'], required=True, type='path'), - release=dict(type='str'), - releases_path=dict(type='str', default='releases'), - shared_path=dict(type='path', default='shared'), - current_path=dict(type='path', default='current'), - keep_releases=dict(type='int', default=5), - clean=dict(type='bool', default=True), - unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'), - state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + path=dict(aliases=["dest"], required=True, type="path"), + release=dict(type="str"), + releases_path=dict(type="str", default="releases"), + shared_path=dict(type="path", default="shared"), + current_path=dict(type="path", default="current"), + keep_releases=dict(type="int", default=5), + clean=dict(type="bool", default=True), + unfinished_filename=dict(type="str", default="DEPLOY_UNFINISHED"), + state=dict(choices=["present", "absent", "clean", "finalize", "query"], default="present"), ), required_if=[ - ('state', 'finalize', ['release']), + ("state", "finalize", ["release"]), ], add_file_common_args=True, - supports_check_mode=True + supports_check_mode=True, ) deploy_helper = DeployHelper(module) facts = deploy_helper.gather_facts() - result = { - 'state': deploy_helper.state - } + result = {"state": deploy_helper.state} changes = 0 - if deploy_helper.state == 'query': - result['ansible_facts'] = {'deploy_helper': facts} + if deploy_helper.state == "query": + result["ansible_facts"] = {"deploy_helper": facts} - elif deploy_helper.state == 'present': - deploy_helper.check_link(facts['current_path']) - changes += deploy_helper.create_path(facts['project_path']) - changes += deploy_helper.create_path(facts['releases_path']) + elif deploy_helper.state == "present": + deploy_helper.check_link(facts["current_path"]) + changes += deploy_helper.create_path(facts["project_path"]) + changes += deploy_helper.create_path(facts["releases_path"]) if deploy_helper.shared_path: - changes += deploy_helper.create_path(facts['shared_path']) + changes += deploy_helper.create_path(facts["shared_path"]) - result['ansible_facts'] = {'deploy_helper': facts} + result["ansible_facts"] = {"deploy_helper": facts} - elif deploy_helper.state == 'finalize': + elif deploy_helper.state == "finalize": if deploy_helper.keep_releases <= 0: module.fail_json(msg="'keep_releases' should be at least 1") - changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) - changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + changes += deploy_helper.remove_unfinished_file(facts["new_release_path"]) + changes += deploy_helper.create_link(facts["new_release_path"], facts["current_path"]) if deploy_helper.clean: - changes += deploy_helper.remove_unfinished_link(facts['project_path']) - changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) - changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + changes += deploy_helper.remove_unfinished_link(facts["project_path"]) + changes += deploy_helper.remove_unfinished_builds(facts["releases_path"]) + changes += deploy_helper.cleanup(facts["releases_path"], facts["new_release"]) - elif deploy_helper.state == 'clean': - changes += deploy_helper.remove_unfinished_link(facts['project_path']) - changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) - changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + elif deploy_helper.state == "clean": + changes += deploy_helper.remove_unfinished_link(facts["project_path"]) + changes += deploy_helper.remove_unfinished_builds(facts["releases_path"]) + changes += deploy_helper.cleanup(facts["releases_path"], facts["new_release"]) - elif deploy_helper.state == 'absent': + elif deploy_helper.state == "absent": # destroy the facts - result['ansible_facts'] = {'deploy_helper': []} - changes += deploy_helper.delete_path(facts['project_path']) + result["ansible_facts"] = {"deploy_helper": []} + changes += deploy_helper.delete_path(facts["project_path"]) if changes > 0: - result['changed'] = True + result["changed"] = True else: - result['changed'] = False + result["changed"] = False module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py index 966ea83f40a..73d23cf08a3 100644 --- a/plugins/modules/dimensiondata_network.py +++ b/plugins/modules/dimensiondata_network.py @@ -141,36 +141,32 @@ def __init__(self): super().__init__( module=AnsibleModule( argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(type='str', required=True), - description=dict(type='str'), - service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), - state=dict(default='present', choices=['present', 'absent']) + name=dict(type="str", required=True), + description=dict(type="str"), + service_plan=dict(default="ESSENTIALS", choices=["ADVANCED", "ESSENTIALS"]), + state=dict(default="present", choices=["present", "absent"]), ), - required_together=DimensionDataModule.required_together() + required_together=DimensionDataModule.required_together(), ) ) - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.service_plan = self.module.params['service_plan'] - self.state = self.module.params['state'] + self.name = self.module.params["name"] + self.description = self.module.params["description"] + self.service_plan = self.module.params["service_plan"] + self.state = self.module.params["state"] def state_present(self): network = self._get_network() if network: - self.module.exit_json( - changed=False, - msg='Network already exists', - network=self._network_to_dict(network) - ) + self.module.exit_json(changed=False, msg="Network already exists", network=self._network_to_dict(network)) network = self._create_network() self.module.exit_json( changed=True, msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), - network=self._network_to_dict(network) + network=self._network_to_dict(network), ) def state_absent(self): @@ -178,15 +174,13 @@ def state_absent(self): if not network: self.module.exit_json( - changed=False, - msg='Network "%s" does not exist' % self.name, - network=self._network_to_dict(network) + changed=False, msg='Network "%s" does not exist' % self.name, network=self._network_to_dict(network) ) self._delete_network(network) def _get_network(self): - if self.mcp_version == '1.0': + if self.mcp_version == "1.0": networks = self.driver.list_networks(location=self.location) else: networks = self.driver.ex_list_network_domains(location=self.location) @@ -198,107 +192,85 @@ def _get_network(self): return None def _network_to_dict(self, network): - network_dict = dict( - id=network.id, - name=network.name, - description=network.description - ) + network_dict = dict(id=network.id, name=network.name, description=network.description) if isinstance(network.location, NodeLocation): - network_dict['location'] = network.location.id + network_dict["location"] = network.location.id else: - network_dict['location'] = network.location + network_dict["location"] = network.location - if self.mcp_version == '1.0': - network_dict['private_net'] = network.private_net - network_dict['multicast'] = network.multicast - network_dict['status'] = None + if self.mcp_version == "1.0": + network_dict["private_net"] = network.private_net + network_dict["multicast"] = network.multicast + network_dict["status"] = None else: - network_dict['private_net'] = None - network_dict['multicast'] = None - network_dict['status'] = network.status + network_dict["private_net"] = None + network_dict["multicast"] = None + network_dict["status"] = network.status return network_dict def _create_network(self): - # Make sure service_plan argument is defined - if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: - self.module.fail_json( - msg='service_plan required when creating network and location is MCP 2.0' - ) + if self.mcp_version == "2.0" and "service_plan" not in self.module.params: + self.module.fail_json(msg="service_plan required when creating network and location is MCP 2.0") # Create network try: - if self.mcp_version == '1.0': - network = self.driver.ex_create_network( - self.location, - self.name, - description=self.description - ) + if self.mcp_version == "1.0": + network = self.driver.ex_create_network(self.location, self.name, description=self.description) else: network = self.driver.ex_create_network_domain( - self.location, - self.name, - self.module.params['service_plan'], - description=self.description + self.location, self.name, self.module.params["service_plan"], description=self.description ) except DimensionDataAPIException as e: - self.module.fail_json( msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() ) - if self.module.params['wait'] is True: - network = self._wait_for_network_state(network.id, 'NORMAL') + if self.module.params["wait"] is True: + network = self._wait_for_network_state(network.id, "NORMAL") return network def _delete_network(self, network): try: - if self.mcp_version == '1.0': + if self.mcp_version == "1.0": deleted = self.driver.ex_delete_network(network) else: deleted = self.driver.ex_delete_network_domain(network) if deleted: - self.module.exit_json( - changed=True, - msg="Deleted network with id %s" % network.id - ) + self.module.exit_json(changed=True, msg="Deleted network with id %s" % network.id) - self.module.fail_json( - "Unexpected failure deleting network with id %s" % network.id - ) + self.module.fail_json("Unexpected failure deleting network with id %s" % network.id) except DimensionDataAPIException as e: - self.module.fail_json( - msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() - ) + self.module.fail_json(msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()) def _wait_for_network_state(self, net_id, state_to_wait_for): try: return self.driver.connection.wait_for_state( state_to_wait_for, self.driver.ex_get_network_domain, - self.module.params['wait_poll_interval'], - self.module.params['wait_time'], - net_id + self.module.params["wait_poll_interval"], + self.module.params["wait_time"], + net_id, ) except DimensionDataAPIException as e: self.module.fail_json( - msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), - exception=traceback.format_exc() + msg="Network did not reach % state in time: %s" % (state_to_wait_for, to_native(e)), + exception=traceback.format_exc(), ) def main(): module = DimensionDataNetworkModule() - if module.state == 'present': + if module.state == "present": module.state_present() - elif module.state == 'absent': + elif module.state == "absent": module.state_absent() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py index 4e09877f083..216dc00ef28 100644 --- a/plugins/modules/dimensiondata_vlan.py +++ b/plugins/modules/dimensiondata_vlan.py @@ -157,7 +157,10 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError +from ansible_collections.community.general.plugins.module_utils.dimensiondata import ( + DimensionDataModule, + UnknownNetworkError, +) try: from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException @@ -183,30 +186,28 @@ def __init__(self): super().__init__( module=AnsibleModule( argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(required=True, type='str'), - description=dict(default='', type='str'), - network_domain=dict(required=True, type='str'), - private_ipv4_base_address=dict(default='', type='str'), - private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent', 'readonly']) + name=dict(required=True, type="str"), + description=dict(default="", type="str"), + network_domain=dict(required=True, type="str"), + private_ipv4_base_address=dict(default="", type="str"), + private_ipv4_prefix_size=dict(default=0, type="int"), + allow_expand=dict(default=False, type="bool"), + state=dict(default="present", choices=["present", "absent", "readonly"]), ), - required_together=DimensionDataModule.required_together() + required_together=DimensionDataModule.required_together(), ) ) - self.name = self.module.params['name'] - self.description = self.module.params['description'] - self.network_domain_selector = self.module.params['network_domain'] - self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] - self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] - self.state = self.module.params['state'] - self.allow_expand = self.module.params['allow_expand'] + self.name = self.module.params["name"] + self.description = self.module.params["description"] + self.network_domain_selector = self.module.params["network_domain"] + self.private_ipv4_base_address = self.module.params["private_ipv4_base_address"] + self.private_ipv4_prefix_size = self.module.params["private_ipv4_prefix_size"] + self.state = self.module.params["state"] + self.allow_expand = self.module.params["allow_expand"] - if self.wait and self.state != 'present': - self.module.fail_json( - msg='The wait parameter is only supported when state is "present".' - ) + if self.wait and self.state != "present": + self.module.fail_json(msg='The wait parameter is only supported when state is "present".') def state_present(self): """ @@ -220,14 +221,14 @@ def state_present(self): if self.module.check_mode: self.module.exit_json( msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}" (should be present).', - changed=True + changed=True, ) vlan = self._create_vlan(network_domain) self.module.exit_json( msg=f'Created VLAN "{self.name}" in network domain "{self.network_domain_selector}".', vlan=vlan_to_dict(vlan), - changed=True + changed=True, ) else: diff = VlanDiff(vlan, self.module.params) @@ -235,7 +236,7 @@ def state_present(self): self.module.exit_json( msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (no changes detected).', vlan=vlan_to_dict(vlan), - changed=False + changed=False, ) return @@ -249,16 +250,16 @@ def state_present(self): if diff.needs_expand() and not self.allow_expand: self.module.fail_json( - msg=f'The configured private IPv4 network size ({self.private_ipv4_prefix_size}-bit prefix) for ' - f'the VLAN differs from its current network size ({vlan.private_ipv4_range_size}-bit prefix) ' - 'and needs to be expanded. Use allow_expand=true if this is what you want.' + msg=f"The configured private IPv4 network size ({self.private_ipv4_prefix_size}-bit prefix) for " + f"the VLAN differs from its current network size ({vlan.private_ipv4_range_size}-bit prefix) " + "and needs to be expanded. Use allow_expand=true if this is what you want." ) if self.module.check_mode: self.module.exit_json( msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (changes detected).', vlan=vlan_to_dict(vlan), - changed=True + changed=True, ) if diff.needs_edit(): @@ -274,7 +275,7 @@ def state_present(self): self.module.exit_json( msg=f'Updated VLAN "{self.name}" in network domain "{self.network_domain_selector}".', vlan=vlan_to_dict(vlan), - changed=True + changed=True, ) def state_readonly(self): @@ -286,10 +287,7 @@ def state_readonly(self): vlan = self._get_vlan(network_domain) if vlan: - self.module.exit_json( - vlan=vlan_to_dict(vlan), - changed=False - ) + self.module.exit_json(vlan=vlan_to_dict(vlan), changed=False) else: self.module.fail_json( msg=f'VLAN "{self.name}" does not exist in network domain "{self.network_domain_selector}".' @@ -305,8 +303,7 @@ def state_absent(self): vlan = self._get_vlan(network_domain) if not vlan: self.module.exit_json( - msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}".', - changed=False + msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}".', changed=False ) return @@ -315,14 +312,13 @@ def state_absent(self): self.module.exit_json( msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (should be absent).', vlan=vlan_to_dict(vlan), - changed=True + changed=True, ) self._delete_vlan(vlan) self.module.exit_json( - msg=f'Deleted VLAN "{self.name}" from network domain "{self.network_domain_selector}".', - changed=True + msg=f'Deleted VLAN "{self.name}" from network domain "{self.network_domain_selector}".', changed=True ) def _get_vlan(self, network_domain): @@ -334,10 +330,7 @@ def _get_vlan(self, network_domain): :rtype: DimensionDataVlan """ - vlans = self.driver.ex_list_vlans( - location=self.location, - network_domain=network_domain - ) + vlans = self.driver.ex_list_vlans(location=self.location, network_domain=network_domain) matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] if matching_vlans: return matching_vlans[0] @@ -346,15 +339,11 @@ def _get_vlan(self, network_domain): def _create_vlan(self, network_domain): vlan = self.driver.ex_create_vlan( - network_domain, - self.name, - self.private_ipv4_base_address, - self.description, - self.private_ipv4_prefix_size + network_domain, self.name, self.private_ipv4_base_address, self.description, self.private_ipv4_prefix_size ) if self.wait: - vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') + vlan = self._wait_for_vlan_state(vlan.id, "NORMAL") return vlan @@ -364,7 +353,7 @@ def _delete_vlan(self, vlan): # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). if self.wait: - self._wait_for_vlan_state(vlan, 'NOT_FOUND') + self._wait_for_vlan_state(vlan, "NOT_FOUND") except DimensionDataAPIException as api_exception: self.module.fail_json( @@ -374,37 +363,33 @@ def _delete_vlan(self, vlan): def _wait_for_vlan_state(self, vlan, state_to_wait_for): network_domain = self._get_network_domain() - wait_poll_interval = self.module.params['wait_poll_interval'] - wait_time = self.module.params['wait_time'] + wait_poll_interval = self.module.params["wait_poll_interval"] + wait_time = self.module.params["wait_time"] # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. try: return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_vlan, - wait_poll_interval, - wait_time, - vlan + state_to_wait_for, self.driver.ex_get_vlan, wait_poll_interval, wait_time, vlan ) except DimensionDataAPIException as api_exception: - if api_exception.code != 'RESOURCE_NOT_FOUND': + if api_exception.code != "RESOURCE_NOT_FOUND": raise return DimensionDataVlan( id=vlan.id, - status='NOT_FOUND', - name='', - description='', - private_ipv4_range_address='', + status="NOT_FOUND", + name="", + description="", + private_ipv4_range_address="", private_ipv4_range_size=0, - ipv4_gateway='', - ipv6_range_address='', + ipv4_gateway="", + ipv6_range_address="", ipv6_range_size=0, - ipv6_gateway='', + ipv6_gateway="", location=self.location, - network_domain=network_domain + network_domain=network_domain, ) def _get_network_domain(self): @@ -415,9 +400,7 @@ def _get_network_domain(self): """ try: - return self.get_network_domain( - self.network_domain_selector, self.location - ) + return self.get_network_domain(self.network_domain_selector, self.location) except UnknownNetworkError: self.module.fail_json( msg=f'Cannot find network domain "{self.network_domain_selector}" in datacenter "{self.location}".' @@ -451,13 +434,17 @@ def __init__(self, vlan, module_params): self.vlan = vlan self.module_params = module_params - self.name_changed = module_params['name'] != vlan.name - self.description_changed = module_params['description'] != vlan.description - self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address - self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size + self.name_changed = module_params["name"] != vlan.name + self.description_changed = module_params["description"] != vlan.description + self.private_ipv4_base_address_changed = ( + module_params["private_ipv4_base_address"] != vlan.private_ipv4_range_address + ) + self.private_ipv4_prefix_size_changed = ( + module_params["private_ipv4_prefix_size"] != vlan.private_ipv4_range_size + ) # Is configured prefix size greater than or less than the actual prefix size? - private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size + private_ipv4_prefix_size_difference = module_params["private_ipv4_prefix_size"] - vlan.private_ipv4_range_size self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 @@ -482,11 +469,13 @@ def ensure_legal_change(self): # Cannot change base address for private IPv4 network. if self.private_ipv4_base_address_changed: - raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') + raise InvalidVlanChangeError("Cannot change the private IPV4 base address for an existing VLAN.") # Cannot shrink private IPv4 network (by increasing prefix size). if self.private_ipv4_prefix_size_increased: - raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') + raise InvalidVlanChangeError( + "Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported)." + ) def needs_edit(self): """ @@ -511,30 +500,30 @@ def needs_expand(self): def vlan_to_dict(vlan): return { - 'id': vlan.id, - 'name': vlan.name, - 'description': vlan.description, - 'location': vlan.location.id, - 'private_ipv4_base_address': vlan.private_ipv4_range_address, - 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, - 'private_ipv4_gateway_address': vlan.ipv4_gateway, - 'ipv6_base_address': vlan.ipv6_range_address, - 'ipv6_prefix_size': vlan.ipv6_range_size, - 'ipv6_gateway_address': vlan.ipv6_gateway, - 'status': vlan.status + "id": vlan.id, + "name": vlan.name, + "description": vlan.description, + "location": vlan.location.id, + "private_ipv4_base_address": vlan.private_ipv4_range_address, + "private_ipv4_prefix_size": vlan.private_ipv4_range_size, + "private_ipv4_gateway_address": vlan.ipv4_gateway, + "ipv6_base_address": vlan.ipv6_range_address, + "ipv6_prefix_size": vlan.ipv6_range_size, + "ipv6_gateway_address": vlan.ipv6_gateway, + "status": vlan.status, } def main(): module = DimensionDataVlanModule() - if module.state == 'present': + if module.state == "present": module.state_present() - elif module.state == 'readonly': + elif module.state == "readonly": module.state_readonly() - elif module.state == 'absent': + elif module.state == "absent": module.state_absent() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py index d694ee64bf2..661386e8153 100644 --- a/plugins/modules/discord.py +++ b/plugins/modules/discord.py @@ -130,88 +130,88 @@ def discord_check_mode(module): + webhook_id = module.params["webhook_id"] + webhook_token = module.params["webhook_token"] - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - - headers = { - 'content-type': 'application/json' - } + headers = {"content-type": "application/json"} url = f"https://discord.com/api/webhooks/{webhook_id}/{webhook_token}" - response, info = fetch_url(module, url, method='GET', headers=headers) + response, info = fetch_url(module, url, method="GET", headers=headers) return response, info def discord_text_msg(module): + webhook_id = module.params["webhook_id"] + webhook_token = module.params["webhook_token"] + content = module.params["content"] + user = module.params["username"] + avatar_url = module.params["avatar_url"] + tts = module.params["tts"] + embeds = module.params["embeds"] - webhook_id = module.params['webhook_id'] - webhook_token = module.params['webhook_token'] - content = module.params['content'] - user = module.params['username'] - avatar_url = module.params['avatar_url'] - tts = module.params['tts'] - embeds = module.params['embeds'] - - headers = { - 'content-type': 'application/json' - } + headers = {"content-type": "application/json"} url = f"https://discord.com/api/webhooks/{webhook_id}/{webhook_token}" payload = { - 'content': content, - 'username': user, - 'avatar_url': avatar_url, - 'tts': tts, - 'embeds': embeds, + "content": content, + "username": user, + "avatar_url": avatar_url, + "tts": tts, + "embeds": embeds, } payload = module.jsonify(payload) - response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') + response, info = fetch_url(module, url, data=payload, headers=headers, method="POST") return response, info def main(): module = AnsibleModule( argument_spec=dict( - webhook_id=dict(type='str', required=True), - webhook_token=dict(type='str', required=True, no_log=True), - content=dict(type='str'), - username=dict(type='str'), - avatar_url=dict(type='str'), - tts=dict(type='bool', default=False), - embeds=dict(type='list', elements='dict'), + webhook_id=dict(type="str", required=True), + webhook_token=dict(type="str", required=True, no_log=True), + content=dict(type="str"), + username=dict(type="str"), + avatar_url=dict(type="str"), + tts=dict(type="bool", default=False), + embeds=dict(type="list", elements="dict"), ), - required_one_of=[['content', 'embeds']], - supports_check_mode=True + required_one_of=[["content", "embeds"]], + supports_check_mode=True, ) result = dict( changed=False, - http_code='', + http_code="", ) if module.check_mode: response, info = discord_check_mode(module) - if info['status'] != 200: + if info["status"] != 200: try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + module.fail_json( + http_code=info["status"], msg=info["msg"], response=module.from_json(info["body"]), info=info + ) except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + module.fail_json(http_code=info["status"], msg=info["msg"], info=info) else: - module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) + module.exit_json( + msg=info["msg"], changed=False, http_code=info["status"], response=module.from_json(response.read()) + ) else: response, info = discord_text_msg(module) - if info['status'] != 204: + if info["status"] != 204: try: - module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + module.fail_json( + http_code=info["status"], msg=info["msg"], response=module.from_json(info["body"]), info=info + ) except Exception: - module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + module.fail_json(http_code=info["status"], msg=info["msg"], info=info) else: - module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) + module.exit_json(msg=info["msg"], changed=True, http_code=info["status"]) if __name__ == "__main__": diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py index f2ee3570725..55fe1090797 100644 --- a/plugins/modules/django_check.py +++ b/plugins/modules/django_check.py @@ -110,5 +110,5 @@ def main(): DjangoCheck.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py index a6c3f409e53..0afd545ab77 100644 --- a/plugins/modules/django_command.py +++ b/plugins/modules/django_command.py @@ -88,5 +88,5 @@ def main(): DjangoCommand.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py index 76a31ab0b15..b21c70862b8 100644 --- a/plugins/modules/django_createcachetable.py +++ b/plugins/modules/django_createcachetable.py @@ -70,5 +70,5 @@ def main(): DjangoCreateCacheTable.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/django_dumpdata.py b/plugins/modules/django_dumpdata.py index 5c819b2755c..fd5e01208cb 100644 --- a/plugins/modules/django_dumpdata.py +++ b/plugins/modules/django_dumpdata.py @@ -109,7 +109,9 @@ class DjangoDumpData(DjangoModuleHelper): supports_check_mode=False, ) django_admin_cmd = "dumpdata" - django_admin_arg_order = "all format indent excludes database_dash natural_foreign natural_primary primary_keys fixture apps_models" + django_admin_arg_order = ( + "all format indent excludes database_dash natural_foreign natural_primary primary_keys fixture apps_models" + ) _django_args = ["data", "database_dash"] def __init_module__(self): @@ -120,5 +122,5 @@ def main(): DjangoDumpData.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/django_loaddata.py b/plugins/modules/django_loaddata.py index 75b388de9af..bac94632ef6 100644 --- a/plugins/modules/django_loaddata.py +++ b/plugins/modules/django_loaddata.py @@ -86,5 +86,5 @@ def main(): DjangoLoadData.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py index 257808df2cb..4d963fccde8 100644 --- a/plugins/modules/django_manage.py +++ b/plugins/modules/django_manage.py @@ -189,7 +189,7 @@ def _fail(module, cmd, out, err, **kwargs): - msg = '' + msg = "" if out: msg += f"stdout: {out}" if err: @@ -198,16 +198,15 @@ def _fail(module, cmd, out, err, **kwargs): def _ensure_virtualenv(module): - - venv_param = module.params['virtualenv'] + venv_param = module.params["virtualenv"] if venv_param is None: return - vbin = os.path.join(venv_param, 'bin') - activate = os.path.join(vbin, 'activate') + vbin = os.path.join(venv_param, "bin") + activate = os.path.join(vbin, "activate") if not os.path.exists(activate): - module.fail_json(msg=f'{venv_param} does not point to a valid virtual environment') + module.fail_json(msg=f"{venv_param} does not point to a valid virtual environment") os.environ["PATH"] = f"{vbin}:{os.environ['PATH']}" os.environ["VIRTUAL_ENV"] = venv_param @@ -226,9 +225,11 @@ def loaddata_filter_output(line): def migrate_filter_output(line): - return ("Migrating forwards " in line) \ - or ("Installed" in line and "Installed 0 object" not in line) \ + return ( + ("Migrating forwards " in line) + or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) + ) def collectstatic_filter_output(line): @@ -237,96 +238,117 @@ def collectstatic_filter_output(line): def main(): command_allowed_param_map = dict( - createcachetable=('cache_table', 'database', ), - flush=('database', ), - loaddata=('database', 'fixtures', ), - test=('failfast', 'testrunner', 'apps', ), - migrate=('apps', 'skip', 'merge', 'database',), - collectstatic=('clear', 'link', ), + createcachetable=( + "cache_table", + "database", + ), + flush=("database",), + loaddata=( + "database", + "fixtures", + ), + test=( + "failfast", + "testrunner", + "apps", + ), + migrate=( + "apps", + "skip", + "merge", + "database", + ), + collectstatic=( + "clear", + "link", + ), ) command_required_param_map = dict( - loaddata=('fixtures', ), + loaddata=("fixtures",), ) # forces --noinput on every command that needs it noinput_commands = ( - 'flush', - 'migrate', - 'test', - 'collectstatic', + "flush", + "migrate", + "test", + "collectstatic", ) # These params are allowed for certain commands only - specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner') + specific_params = ("apps", "clear", "database", "failfast", "fixtures", "testrunner") # These params are automatically added to the command if present - general_params = ('settings', 'pythonpath', 'database',) - specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') - end_of_command_params = ('apps', 'cache_table', 'fixtures') + general_params = ( + "settings", + "pythonpath", + "database", + ) + specific_boolean_params = ("clear", "failfast", "skip", "merge", "link") + end_of_command_params = ("apps", "cache_table", "fixtures") module = AnsibleModule( argument_spec=dict( - command=dict(required=True, type='str'), - project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']), - settings=dict(type='path'), - pythonpath=dict(type='path', aliases=['python_path']), - virtualenv=dict(type='path', aliases=['virtual_env']), - + command=dict(required=True, type="str"), + project_path=dict(required=True, type="path", aliases=["app_path", "chdir"]), + settings=dict(type="path"), + pythonpath=dict(type="path", aliases=["python_path"]), + virtualenv=dict(type="path", aliases=["virtual_env"]), apps=dict(), - cache_table=dict(type='str'), - clear=dict(default=False, type='bool'), - database=dict(type='str'), - failfast=dict(default=False, type='bool', aliases=['fail_fast']), - fixtures=dict(type='str'), - testrunner=dict(type='str', aliases=['test_runner']), - skip=dict(type='bool'), - merge=dict(type='bool'), - link=dict(type='bool'), + cache_table=dict(type="str"), + clear=dict(default=False, type="bool"), + database=dict(type="str"), + failfast=dict(default=False, type="bool", aliases=["fail_fast"]), + fixtures=dict(type="str"), + testrunner=dict(type="str", aliases=["test_runner"]), + skip=dict(type="bool"), + merge=dict(type="bool"), + link=dict(type="bool"), ), ) - command_split = shlex.split(module.params['command']) + command_split = shlex.split(module.params["command"]) command_bin = command_split[0] - project_path = module.params['project_path'] - virtualenv = module.params['virtualenv'] + project_path = module.params["project_path"] + virtualenv = module.params["virtualenv"] for param in specific_params: value = module.params[param] if value and param not in command_allowed_param_map[command_bin]: - module.fail_json(msg=f'{param} param is incompatible with command={command_bin}') + module.fail_json(msg=f"{param} param is incompatible with command={command_bin}") for param in command_required_param_map.get(command_bin, ()): if not module.params[param]: - module.fail_json(msg=f'{param} param is required for command={command_bin}') + module.fail_json(msg=f"{param} param is required for command={command_bin}") _ensure_virtualenv(module) run_cmd_args = ["./manage.py"] + command_split - if command_bin in noinput_commands and '--noinput' not in command_split: + if command_bin in noinput_commands and "--noinput" not in command_split: run_cmd_args.append("--noinput") for param in general_params: if module.params[param]: - run_cmd_args.append(f'--{param}={module.params[param]}') + run_cmd_args.append(f"--{param}={module.params[param]}") for param in specific_boolean_params: if module.params[param]: - run_cmd_args.append(f'--{param}') + run_cmd_args.append(f"--{param}") # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - if param in ('fixtures', 'apps'): + if param in ("fixtures", "apps"): run_cmd_args.extend(shlex.split(module.params[param])) else: run_cmd_args.append(module.params[param]) rc, out, err = module.run_command(run_cmd_args, cwd=project_path) if rc != 0: - if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err: - out = 'already exists.' + if command_bin == "createcachetable" and "table" in err and "already exists" in err: + out = "already exists." else: if "Unknown command:" in err: _fail(module, run_cmd_args, err, f"Unknown django command: {command_bin}") @@ -334,7 +356,7 @@ def main(): changed = False - lines = out.split('\n') + lines = out.split("\n") filt = globals().get(f"{command_bin}_filter_output", None) if filt: filtered_output = list(filter(filt, lines)) @@ -344,9 +366,17 @@ def main(): if check_changed: changed = check_changed(out) - module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path, - virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath']) + module.exit_json( + changed=changed, + out=out, + cmd=run_cmd_args, + app_path=project_path, + project_path=project_path, + virtualenv=virtualenv, + settings=module.params["settings"], + pythonpath=module.params["pythonpath"], + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py index 69a985b8280..84c7c1da9c9 100644 --- a/plugins/modules/dnf_config_manager.py +++ b/plugins/modules/dnf_config_manager.py @@ -126,40 +126,40 @@ import re DNF_BIN = "/usr/bin/dnf" -REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$') -REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$') +REPO_ID_RE = re.compile(r"^Repo-id\s*:\s*(\S+)$") +REPO_STATUS_RE = re.compile(r"^Repo-status\s*:\s*(disabled|enabled)$") def get_repo_states(module): - rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True) + rc, out, err = module.run_command([DNF_BIN, "repolist", "--all", "--verbose"], check_rc=True) repos = dict() - last_repo = '' - for i, line in enumerate(out.split('\n')): + last_repo = "" + for i, line in enumerate(out.split("\n")): m = REPO_ID_RE.match(line) if m: if len(last_repo) > 0: - module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status') + module.fail_json(msg="dnf repolist parse failure: parsed another repo id before next status") last_repo = m.group(1) continue m = REPO_STATUS_RE.match(line) if m: if len(last_repo) == 0: - module.fail_json(msg='dnf repolist parse failure: parsed status before repo id') + module.fail_json(msg="dnf repolist parse failure: parsed status before repo id") repos[last_repo] = m.group(1) - last_repo = '' + last_repo = "" return repos def set_repo_states(module, repo_ids, state): - module.run_command([DNF_BIN, 'config-manager', '--assumeyes', f'--set-{state}'] + repo_ids, check_rc=True) + module.run_command([DNF_BIN, "config-manager", "--assumeyes", f"--set-{state}"] + repo_ids, check_rc=True) def pack_repo_states_for_return(states): enabled = [] disabled = [] for repo_id in states: - if states[repo_id] == 'enabled': + if states[repo_id] == "enabled": enabled.append(repo_id) else: disabled.append(repo_id) @@ -168,33 +168,28 @@ def pack_repo_states_for_return(states): enabled.sort() disabled.sort() - return {'enabled': enabled, 'disabled': disabled} + return {"enabled": enabled, "disabled": disabled} def main(): module_args = dict( - name=dict(type='list', elements='str', default=[]), - state=dict(type='str', choices=['enabled', 'disabled'], default='enabled') + name=dict(type="list", elements="str", default=[]), + state=dict(type="str", choices=["enabled", "disabled"], default="enabled"), ) - result = dict( - changed=False - ) + result = dict(changed=False) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + module.run_command_environ_update = dict(LANGUAGE="C", LC_ALL="C") if not os.path.exists(DNF_BIN): module.fail_json(msg=f"{DNF_BIN} was not found") repo_states = get_repo_states(module) - result['repo_states_pre'] = pack_repo_states_for_return(repo_states) + result["repo_states_pre"] = pack_repo_states_for_return(repo_states) - desired_repo_state = module.params['state'] - names = module.params['name'] + desired_repo_state = module.params["state"] + names = module.params["name"] to_change = [] for repo_id in names: @@ -202,8 +197,8 @@ def main(): module.fail_json(msg=f"did not find repo with ID '{repo_id}' in dnf repolist --all --verbose") if repo_states[repo_id] != desired_repo_state: to_change.append(repo_id) - result['changed'] = len(to_change) > 0 - result['changed_repos'] = to_change + result["changed"] = len(to_change) > 0 + result["changed_repos"] = to_change if module.check_mode: module.exit_json(**result) @@ -212,7 +207,7 @@ def main(): set_repo_states(module, to_change, desired_repo_state) repo_states_post = get_repo_states(module) - result['repo_states_post'] = pack_repo_states_for_return(repo_states_post) + result["repo_states_post"] = pack_repo_states_for_return(repo_states_post) for repo_id in to_change: if repo_states_post[repo_id] != desired_repo_state: diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py index ff3fe31eca2..963cba4ddde 100644 --- a/plugins/modules/dnf_versionlock.py +++ b/plugins/modules/dnf_versionlock.py @@ -158,19 +158,18 @@ def do_versionlock(module, command, patterns=None, raw=False): outs = [] for p in patterns: rc, out, err = module.run_command( - [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p], - check_rc=True) + [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p], check_rc=True + ) outs.append(out) out = "\n".join(outs) else: - rc, out, err = module.run_command( - [DNF_BIN, "-q", "versionlock", command], check_rc=True) + rc, out, err = module.run_command([DNF_BIN, "-q", "versionlock", command], check_rc=True) return out # This is equivalent to the _match function of the versionlock plugin. def match(entry, pattern): - entry = entry.lstrip('!') + entry = entry.lstrip("!") if entry == pattern: return True m = NEVRA_RE.match(entry) @@ -186,7 +185,7 @@ def match(entry, pattern): f"{m['name']}-{m['version']}-{m['release']}.{m['arch']}", f"{m['name']}-{m['epoch']}:{m['version']}-{m['release']}", f"{m['epoch']}:{m['name']}-{m['version']}-{m['release']}.{m['arch']}", - f"{m['name']}-{m['epoch']}:{m['version']}-{m['release']}.{m['arch']}" + f"{m['name']}-{m['epoch']}:{m['version']}-{m['release']}.{m['arch']}", ): if fnmatch.fnmatch(name, pattern): return True @@ -196,18 +195,14 @@ def match(entry, pattern): def get_packages(module, patterns, only_installed=False): packages_available_map_name_evrs = {} rc, out, err = module.run_command( - [DNF_BIN, "-q", "repoquery"] + - (["--installed"] if only_installed else []) + - patterns, - check_rc=True) + [DNF_BIN, "-q", "repoquery"] + (["--installed"] if only_installed else []) + patterns, check_rc=True + ) for p in out.split(): # Extract the NEVRA pattern. m = NEVRA_RE.match(p) if not m: - module.fail_json( - msg=f"failed to parse nevra for {p}", - rc=rc, out=out, err=err) + module.fail_json(msg=f"failed to parse nevra for {p}", rc=rc, out=out, err=err) evr = f"{m['epoch']}:{m['version']}-{m['release']}" @@ -258,8 +253,7 @@ def main(): argument_spec=dict( name=dict(type="list", elements="str", default=[]), raw=dict(type="bool", default=False), - state=dict(type="str", default="present", - choices=["present", "absent", "excluded", "clean"]), + state=dict(type="str", default="present", choices=["present", "absent", "excluded", "clean"]), ), supports_check_mode=True, ) @@ -272,7 +266,7 @@ def main(): # Check module pre-requisites. global DNF_BIN - DNF_BIN = module.get_bin_path('dnf', True) + DNF_BIN = module.get_bin_path("dnf", True) package_mgr = get_package_mgr() if package_mgr == "dnf" and not os.path.exists(VERSIONLOCK_CONF): module.fail_json(msg="plugin versionlock is required") @@ -289,7 +283,6 @@ def main(): specs_todelete = [] if state in ["present", "excluded"]: - if raw: # Add raw patterns as specs to add. for p in patterns: @@ -297,15 +290,10 @@ def main(): specs_toadd.append(p) else: # Get available packages that match the patterns. - packages_map_name_evrs = get_packages( - module, - patterns) + packages_map_name_evrs = get_packages(module, patterns) # Get installed packages that match the patterns. - packages_installed_map_name_evrs = get_packages( - module, - patterns, - only_installed=True) + packages_installed_map_name_evrs = get_packages(module, patterns, only_installed=True) # Obtain the list of package specs that require an entry in the # locklist. This list is composed by: @@ -325,7 +313,6 @@ def main(): msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw) elif state == "absent": - if raw: # Add raw patterns as specs to delete. for p in patterns: @@ -340,8 +327,7 @@ def main(): specs_todelete.append(p) if specs_todelete and not module.check_mode: - msg = do_versionlock( - module, "delete", patterns=specs_todelete, raw=raw) + msg = do_versionlock(module, "delete", patterns=specs_todelete, raw=raw) elif state == "clean": specs_todelete = locklist_pre @@ -357,7 +343,7 @@ def main(): "msg": msg, "locklist_pre": locklist_pre, "specs_toadd": specs_toadd, - "specs_todelete": specs_todelete + "specs_todelete": specs_todelete, } if not module.check_mode: response["locklist_post"] = get_package_list(module, package_mgr=package_mgr) diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py index 8d0305c18be..b557c39bf31 100644 --- a/plugins/modules/dnsimple.py +++ b/plugins/modules/dnsimple.py @@ -172,7 +172,7 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -class DNSimpleV2(): +class DNSimpleV2: """class which uses dnsimple-python >= 2""" def __init__(self, account_email, account_api_token, sandbox, module): @@ -188,11 +188,18 @@ def __init__(self, account_email, account_api_token, sandbox, module): def dnsimple_client(self): """creates a dnsimple client object""" if self.account_email and self.account_api_token: - client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general") + client = Client( + sandbox=self.sandbox, + email=self.account_email, + access_token=self.account_api_token, + user_agent="ansible/community.general", + ) else: - msg = "Option account_email or account_api_token not provided. " \ - "Dnsimple authentication with a .dnsimple config file is not " \ - "supported with dnsimple-python>=2.0.0" + msg = ( + "Option account_email or account_api_token not provided. " + "Dnsimple authentication with a .dnsimple config file is not " + "supported with dnsimple-python>=2.0.0" + ) raise DNSimpleException(msg) client.identity.whoami() self.client = client @@ -205,9 +212,11 @@ def dnsimple_account(self): if not account: accounts = Accounts(self.client).list_accounts().data if len(accounts) != 1: - msg = "The provided dnsimple token is a user token with multiple accounts." \ - "Use an account token or a user token with access to a single account." \ + msg = ( + "The provided dnsimple token is a user token with multiple accounts." + "Use an account token or a user token with access to a single account." "See https://support.dnsimple.com/articles/api-access-token/" + ) raise DNSimpleException(msg) account = accounts[0] self.account = account @@ -239,9 +248,9 @@ def delete_domain(self, domain): def get_records(self, zone, dnsimple_filter=None): """return dns resource records which match a specified filter""" - records_list = self._get_paginated_result(self.client.zones.list_records, - account_id=self.account.id, - zone=zone, filter=dnsimple_filter) + records_list = self._get_paginated_result( + self.client.zones.list_records, account_id=self.account.id, zone=zone, filter=dnsimple_filter + ) return [d.__dict__ for d in records_list] def delete_record(self, domain, rid): @@ -277,6 +286,7 @@ def _get_paginated_result(self, operation, **options): from dnsimple.service import Accounts from dnsimple.version import version as dnsimple_version from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput + HAS_DNSIMPLE = True except ImportError: DNSIMPLE_IMP_ERR.append(traceback.format_exc()) @@ -287,52 +297,66 @@ def _get_paginated_result(self, operation, **options): def main(): module = AnsibleModule( argument_spec=dict( - account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), - account_api_token=dict(type='str', - no_log=True, - fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), - domain=dict(type='str'), - record=dict(type='str'), - record_ids=dict(type='list', elements='str'), - type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', - 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', - 'PTR', 'AAAA', 'SSHFP', 'HINFO', - 'POOL', 'CAA']), - ttl=dict(type='int', default=3600), - value=dict(type='str'), - priority=dict(type='int'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - solo=dict(type='bool', default=False), - sandbox=dict(type='bool', default=False), + account_email=dict(type="str", fallback=(env_fallback, ["DNSIMPLE_EMAIL"])), + account_api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["DNSIMPLE_API_TOKEN"])), + domain=dict(type="str"), + record=dict(type="str"), + record_ids=dict(type="list", elements="str"), + type=dict( + type="str", + choices=[ + "A", + "ALIAS", + "CNAME", + "MX", + "SPF", + "URL", + "TXT", + "NS", + "SRV", + "NAPTR", + "PTR", + "AAAA", + "SSHFP", + "HINFO", + "POOL", + "CAA", + ], + ), + ttl=dict(type="int", default=3600), + value=dict(type="str"), + priority=dict(type="int"), + state=dict(type="str", choices=["present", "absent"], default="present"), + solo=dict(type="bool", default=False), + sandbox=dict(type="bool", default=False), ), - required_together=[ - ['record', 'value'] - ], + required_together=[["record", "value"]], supports_check_mode=True, ) if not HAS_DNSIMPLE: - module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) - - account_email = module.params.get('account_email') - account_api_token = module.params.get('account_api_token') - domain = module.params.get('domain') - record = module.params.get('record') - record_ids = module.params.get('record_ids') - record_type = module.params.get('type') - ttl = module.params.get('ttl') - value = module.params.get('value') - priority = module.params.get('priority') - state = module.params.get('state') - is_solo = module.params.get('solo') - sandbox = module.params.get('sandbox') + module.fail_json(msg=missing_required_lib("dnsimple"), exception=DNSIMPLE_IMP_ERR[0]) + + account_email = module.params.get("account_email") + account_api_token = module.params.get("account_api_token") + domain = module.params.get("domain") + record = module.params.get("record") + record_ids = module.params.get("record_ids") + record_type = module.params.get("type") + ttl = module.params.get("ttl") + value = module.params.get("value") + priority = module.params.get("priority") + state = module.params.get("state") + is_solo = module.params.get("solo") + sandbox = module.params.get("sandbox") DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] try: if DNSIMPLE_MAJOR_VERSION < 2: module.fail_json( - msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.') + msg="Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0." + ) ds = DNSimpleV2(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do # No domain, return a list @@ -348,7 +372,7 @@ def main(): typed_domain = str(domain) dr = ds.get_domain(typed_domain) # domain does not exist - if state == 'present': + if state == "present": if dr: module.exit_json(changed=False, result=dr) else: @@ -373,15 +397,18 @@ def main(): if not value: module.fail_json(msg="Missing the record value") - records_list = ds.get_records(domain, dnsimple_filter={'name': record}) - rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) - if state == 'present': + records_list = ds.get_records(domain, dnsimple_filter={"name": record}) + rr = next( + (r for r in records_list if r["name"] == record and r["type"] == record_type and r["content"] == value), + None, + ) + if state == "present": changed = False if is_solo: # delete any records that have the same name and record type - same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] + same_type = [r["id"] for r in records_list if r["name"] == record and r["type"] == record_type] if rr: - same_type = [rid for rid in same_type if rid != rr['id']] + same_type = [rid for rid in same_type if rid != rr["id"]] if same_type: if not module.check_mode: for rid in same_type: @@ -389,11 +416,11 @@ def main(): changed = True if rr: # check if we need to update - if rr['ttl'] != ttl or rr['priority'] != priority: + if rr["ttl"] != ttl or rr["priority"] != priority: if module.check_mode: module.exit_json(changed=True) else: - response = ds.update_record(domain, rr['id'], ttl, priority) + response = ds.update_record(domain, rr["id"], ttl, priority) module.exit_json(changed=True, result=response) else: module.exit_json(changed=changed, result=rr) @@ -408,7 +435,7 @@ def main(): else: if rr: if not module.check_mode: - ds.delete_record(domain, rr['id']) + ds.delete_record(domain, rr["id"]) module.exit_json(changed=True) else: module.exit_json(changed=False) @@ -416,9 +443,9 @@ def main(): # Make sure these record_ids either all exist or none if record_ids: current_records = ds.get_records(domain, dnsimple_filter=None) - current_record_ids = [str(d['id']) for d in current_records] + current_record_ids = [str(d["id"]) for d in current_records] wanted_record_ids = [str(r) for r in record_ids] - if state == 'present': + if state == "present": difference = list(set(wanted_record_ids) - set(current_record_ids)) if difference: module.fail_json(msg=f"Missing the following records: {difference}") @@ -443,5 +470,5 @@ def main(): module.fail_json(msg="Unknown what you wanted me to do") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py index cdec8e4dda4..1b18a9d62bb 100644 --- a/plugins/modules/dnsimple_info.py +++ b/plugins/modules/dnsimple_info.py @@ -235,10 +235,9 @@ def build_url(account, key, is_sandbox): - headers = {'Accept': 'application/json', - 'Authorization': f'Bearer {key}'} - sandbox = '.sandbox' if is_sandbox else '' - url = f'https://api{sandbox}.dnsimple.com/v2/{account}' + headers = {"Accept": "application/json", "Authorization": f"Bearer {key}"} + sandbox = ".sandbox" if is_sandbox else "" + url = f"https://api{sandbox}.dnsimple.com/v2/{account}" req = Request(url=url, headers=headers) prepped_request = req.prepare() return prepped_request @@ -247,8 +246,8 @@ def build_url(account, key, is_sandbox): def iterate_data(module, request_object): base_url = request_object.url response = Session().send(request_object) - if 'pagination' not in response.json(): - module.fail_json('API Call failed, check ID, key and sandbox values') + if "pagination" not in response.json(): + module.fail_json("API Call failed, check ID, key and sandbox values") data = response.json()["data"] total_pages = response.json()["pagination"]["total_pages"] @@ -256,25 +255,28 @@ def iterate_data(module, request_object): while page < total_pages: page = page + 1 - request_object.url = f'{base_url}&page={page}' + request_object.url = f"{base_url}&page={page}" new_results = Session().send(request_object) - data = data + new_results.json()['data'] + data = data + new_results.json()["data"] return data def record_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = f"{req_obj.url}/zones/{dnsimple_mod.params['name']}/records?name={dnsimple_mod.params['record']}", 'GET' + req_obj.url, req_obj.method = ( + f"{req_obj.url}/zones/{dnsimple_mod.params['name']}/records?name={dnsimple_mod.params['record']}", + "GET", + ) return iterate_data(dnsimple_mod, req_obj) def domain_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = f"{req_obj.url}/zones/{dnsimple_mod.params['name']}/records?per_page=100", 'GET' + req_obj.url, req_obj.method = f"{req_obj.url}/zones/{dnsimple_mod.params['name']}/records?per_page=100", "GET" return iterate_data(dnsimple_mod, req_obj) def account_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = f"{req_obj.url}/zones/?per_page=100", 'GET' + req_obj.url, req_obj.method = f"{req_obj.url}/zones/?per_page=100", "GET" return iterate_data(dnsimple_mod, req_obj) @@ -285,44 +287,37 @@ def main(): "api_key": {"required": True, "type": "str", "no_log": True}, "name": {"required": False, "type": "str"}, "record": {"required": False, "type": "str"}, - "sandbox": {"required": False, "type": "bool", "default": False} + "sandbox": {"required": False, "type": "bool", "default": False}, } - result = { - 'changed': False - } + result = {"changed": False} - module = AnsibleModule( - argument_spec=fields, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=fields, supports_check_mode=True) params = module.params - req = build_url(params['account_id'], - params['api_key'], - params['sandbox']) + req = build_url(params["account_id"], params["api_key"], params["sandbox"]) deps.validate(module) # At minimum we need account and key - if params['account_id'] and params['api_key']: + if params["account_id"] and params["api_key"]: # If we have a record return info on that record - if params['name'] and params['record']: - result['dnsimple_record_info'] = record_info(module, req) + if params["name"] and params["record"]: + result["dnsimple_record_info"] = record_info(module, req) module.exit_json(**result) # If we have the account only and domain, return records for the domain - elif params['name']: - result['dnsimple_records_info'] = domain_info(module, req) + elif params["name"]: + result["dnsimple_records_info"] = domain_info(module, req) module.exit_json(**result) # If we have the account only, return domains else: - result['dnsimple_domain_info'] = account_info(module, req) + result["dnsimple_domain_info"] = account_info(module, req) module.exit_json(**result) else: module.fail_json(msg="Need at least account_id and api_key") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py index 0549325f47a..7016982ac54 100644 --- a/plugins/modules/dnsmadeeasy.py +++ b/plugins/modules/dnsmadeeasy.py @@ -368,7 +368,6 @@ class DME2: - def __init__(self, apikey, secret, domain, sandbox, module): self.module = module @@ -376,37 +375,39 @@ def __init__(self, apikey, secret, domain, sandbox, module): self.secret = secret if sandbox: - self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' + self.baseurl = "https://api.sandbox.dnsmadeeasy.com/V2.0/" self.module.warn(warning=f"Sandbox is enabled. All actions are made against the URL {self.baseurl}") else: - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' + self.baseurl = "https://api.dnsmadeeasy.com/V2.0/" self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => + self.domain_map = None # ["domain_name"] => ID + self.record_map = None # ["record_name"] => ID + self.records = None # ["record_ID"] => self.all_records = None self.contactList_map = None # ["contactList_name"] => ID # Lookup the domain ID if passed as a domain name vs. ID if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] + self.domain = self.getDomainByName(self.domain)["id"] self.record_url = f"dns/managed/{self.domain}/records" - self.monitor_url = 'monitor' - self.contactList_url = 'contactList' + self.monitor_url = "monitor" + self.contactList_url = "contactList" def _headers(self): currTime = self._get_date() hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} + headers = { + "x-dnsme-apiKey": self.api, + "x-dnsme-hmac": hashstring, + "x-dnsme-requestDate": currTime, + "content-type": "application/json", + } return headers def _get_date(self): - locale.setlocale(locale.LC_TIME, 'C') + locale.setlocale(locale.LC_TIME, "C") return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) def _create_hash(self, rightnow): @@ -418,7 +419,7 @@ def query(self, resource, method, data=None): data = urlencode(data) response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): + if info["status"] not in (200, 201, 204): self.module.fail_json(msg=f"{url} returned {info['status']}, with body: {info['msg']}") try: @@ -428,22 +429,22 @@ def query(self, resource, method, data=None): def getDomain(self, domain_id): if not self.domain_map: - self._instMap('domain') + self._instMap("domain") return self.domains.get(domain_id, False) def getDomainByName(self, domain_name): if not self.domain_map: - self._instMap('domain') + self._instMap("domain") return self.getDomain(self.domain_map.get(domain_name, 0)) def getDomains(self): - return self.query('dns/managed', 'GET')['data'] + return self.query("dns/managed", "GET")["data"] def getRecord(self, record_id): if not self.record_map: - self._instMap('record') + self._instMap("record") return self.records.get(record_id, False) @@ -459,7 +460,7 @@ def getMatchingRecord(self, record_name, record_type, record_value): if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: for result in self.all_records: - if result['name'] == record_name and result['type'] == record_type: + if result["name"] == record_name and result["type"] == record_type: return result return False elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: @@ -473,14 +474,14 @@ def getMatchingRecord(self, record_name, record_type, record_value): value = record_value.split(" ")[3] else: value = record_value - if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + if result["name"] == record_name and result["type"] == record_type and result["value"] == value: return result return False else: - raise Exception('record_type not yet supported') + raise Exception("record_type not yet supported") def getRecords(self): - return self.query(self.record_url, 'GET')['data'] + return self.query(self.record_url, "GET")["data"] def _instMap(self, type): # @TODO cache this call so it is executed only once per ansible execution @@ -489,105 +490,106 @@ def _instMap(self, type): # iterate over e.g. self.getDomains() || self.getRecords() for result in getattr(self, f"get{type.title()}s")(): - - map[result['name']] = result['id'] - results[result['id']] = result + map[result["name"]] = result["id"] + results[result["id"]] = result # e.g. self.domain_map || self.record_map setattr(self, f"{type}_map", map) setattr(self, f"{type}s", results) # e.g. self.domains || self.records def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) + return json.dumps(data, separators=(",", ":")) def createRecord(self, data): # @TODO update the cache w/ resultant record + id when implemented - return self.query(self.record_url, 'POST', data) + return self.query(self.record_url, "POST", data) def updateRecord(self, record_id, data): # @TODO update the cache w/ resultant record + id when implemented - return self.query(f"{self.record_url}/{record_id}", 'PUT', data) + return self.query(f"{self.record_url}/{record_id}", "PUT", data) def deleteRecord(self, record_id): # @TODO remove record from the cache when implemented - return self.query(f"{self.record_url}/{record_id}", 'DELETE') + return self.query(f"{self.record_url}/{record_id}", "DELETE") def getMonitor(self, record_id): - return self.query(f"{self.monitor_url}/{record_id}", 'GET') + return self.query(f"{self.monitor_url}/{record_id}", "GET") def updateMonitor(self, record_id, data): - return self.query(f"{self.monitor_url}/{record_id}", 'PUT', data) + return self.query(f"{self.monitor_url}/{record_id}", "PUT", data) def prepareMonitor(self, data): - return json.dumps(data, separators=(',', ':')) + return json.dumps(data, separators=(",", ":")) def getContactList(self, contact_list_id): if not self.contactList_map: - self._instMap('contactList') + self._instMap("contactList") return self.contactLists.get(contact_list_id, False) def getContactlists(self): - return self.query(self.contactList_url, 'GET')['data'] + return self.query(self.contactList_url, "GET")["data"] def getContactListByName(self, name): if not self.contactList_map: - self._instMap('contactList') + self._instMap("contactList") return self.getContactList(self.contactList_map.get(name, 0)) + # =========================================== # Module execution. # def main(): - module = AnsibleModule( argument_spec=dict( account_key=dict(required=True, no_log=True), account_secret=dict(required=True, no_log=True), domain=dict(required=True), - sandbox=dict(default=False, type='bool'), - state=dict(required=True, choices=['present', 'absent']), + sandbox=dict(default=False, type="bool"), + state=dict(required=True, choices=["present", "absent"]), record_name=dict(), - record_type=dict(choices=[ - 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), + record_type=dict(choices=["A", "AAAA", "CNAME", "ANAME", "HTTPRED", "MX", "NS", "PTR", "SRV", "TXT"]), record_value=dict(), - record_ttl=dict(default=1800, type='int'), - monitor=dict(default=False, type='bool'), - systemDescription=dict(default=''), - maxEmails=dict(default=1, type='int'), - protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), - port=dict(default=80, type='int'), - sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), + record_ttl=dict(default=1800, type="int"), + monitor=dict(default=False, type="bool"), + systemDescription=dict(default=""), + maxEmails=dict(default=1, type="int"), + protocol=dict(default="HTTP", choices=["TCP", "UDP", "HTTP", "DNS", "SMTP", "HTTPS"]), + port=dict(default=80, type="int"), + sensitivity=dict(default="Medium", choices=["Low", "Medium", "High"]), contactList=dict(), httpFqdn=dict(), httpFile=dict(), httpQueryString=dict(), - failover=dict(default=False, type='bool'), - autoFailover=dict(default=False, type='bool'), + failover=dict(default=False, type="bool"), + autoFailover=dict(default=False, type="bool"), ip1=dict(), ip2=dict(), ip3=dict(), ip4=dict(), ip5=dict(), - validate_certs=dict(default=True, type='bool'), + validate_certs=dict(default=True, type="bool"), ), - required_together=[ - ['record_value', 'record_ttl', 'record_type'] - ], + required_together=[["record_value", "record_ttl", "record_type"]], required_if=[ - ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], - ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] - ] + ["failover", True, ["autoFailover", "port", "protocol", "ip1", "ip2"]], + ["monitor", True, ["port", "protocol", "maxEmails", "systemDescription", "ip1"]], + ], ) protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) sensitivities = dict(Low=8, Medium=5, High=3) - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module.params["sandbox"], module) + DME = DME2( + module.params["account_key"], + module.params["account_secret"], + module.params["domain"], + module.params["sandbox"], + module, + ) state = module.params["state"] record_name = module.params["record_name"] record_type = module.params["record_type"] @@ -598,15 +600,16 @@ def main(): domain_records = DME.getRecords() if not domain_records: module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") + msg="The requested domain name is not accessible with this api_key; try using its ID if known." + ) module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one current_record = DME.getMatchingRecord(record_name, record_type, record_value) - new_record = {'name': record_name} + new_record = {"name": record_name} for i in ["record_value", "record_type", "record_ttl"]: if not module.params[i] is None: - new_record[i[len("record_"):]] = module.params[i] + new_record[i[len("record_") :]] = module.params[i] # Special handling for mx record if new_record["type"] == "MX": new_record["mxLevel"] = new_record["value"].split(" ")[0] @@ -622,29 +625,45 @@ def main(): # Fetch existing monitor if the A record indicates it should exist and build the new monitor current_monitor = dict() new_monitor = dict() - if current_record and current_record['type'] == 'A' and current_record.get('monitor'): - current_monitor = DME.getMonitor(current_record['id']) + if current_record and current_record["type"] == "A" and current_record.get("monitor"): + current_monitor = DME.getMonitor(current_record["id"]) # Build the new monitor - for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', - 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', - 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: + for i in [ + "monitor", + "systemDescription", + "protocol", + "port", + "sensitivity", + "maxEmails", + "contactList", + "httpFqdn", + "httpFile", + "httpQueryString", + "failover", + "autoFailover", + "ip1", + "ip2", + "ip3", + "ip4", + "ip5", + ]: if module.params[i] is not None: - if i == 'protocol': + if i == "protocol": # The API requires protocol to be a numeric in the range 1-6 - new_monitor['protocolId'] = protocols[module.params[i]] - elif i == 'sensitivity': + new_monitor["protocolId"] = protocols[module.params[i]] + elif i == "sensitivity": # The API requires sensitivity to be a numeric of 8, 5, or 3 new_monitor[i] = sensitivities[module.params[i]] - elif i == 'contactList': + elif i == "contactList": # The module accepts either the name or the id of the contact list contact_list_id = module.params[i] - if not contact_list_id.isdigit() and contact_list_id != '': + if not contact_list_id.isdigit() and contact_list_id != "": contact_list = DME.getContactListByName(contact_list_id) if not contact_list: module.fail_json(msg=f"Contact list {contact_list_id} does not exist") - contact_list_id = contact_list.get('id', '') - new_monitor['contactListId'] = contact_list_id + contact_list_id = contact_list.get("id", "") + new_monitor["contactListId"] = contact_list_id else: # The module option names match the API field names new_monitor[i] = module.params[i] @@ -657,7 +676,7 @@ def main(): # are surrounded by quotes. if str(current_record[i]).strip('"') != str(new_record[i]): record_changed = True - new_record['id'] = str(current_record['id']) + new_record["id"] = str(current_record["id"]) monitor_changed = False if current_monitor: @@ -666,19 +685,20 @@ def main(): monitor_changed = True # Follow Keyword Controlled Behavior - if state == 'present': + if state == "present": # return the record if no value is specified if "value" not in new_record: if not current_record: module.fail_json( - msg=f"A record with name '{record_name}' does not exist for domain '{module.params['domain']}.'") + msg=f"A record with name '{record_name}' does not exist for domain '{module.params['domain']}.'" + ) module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) # create record and monitor as the record does not exist if not current_record: record = DME.createRecord(DME.prepareRecord(new_record)) - if new_monitor.get('monitor') and record_type == "A": - monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) + if new_monitor.get("monitor") and record_type == "A": + monitor = DME.updateMonitor(record["id"], DME.prepareMonitor(new_monitor)) module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) else: module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor)) @@ -686,10 +706,10 @@ def main(): # update the record updated = False if record_changed: - DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) + DME.updateRecord(current_record["id"], DME.prepareRecord(new_record)) updated = True if monitor_changed: - DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) + DME.updateMonitor(current_monitor["recordId"], DME.prepareMonitor(new_monitor)) updated = True if updated: module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) @@ -697,20 +717,19 @@ def main(): # return the record (no changes) module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - elif state == 'absent': + elif state == "absent": changed = False # delete the record (and the monitor/failover) if it exists if current_record: - DME.deleteRecord(current_record['id']) + DME.deleteRecord(current_record["id"]) module.exit_json(changed=True) # record does not exist, return w/o change. module.exit_json(changed=changed) else: - module.fail_json( - msg=f"'{state}' is an unknown value for the state argument") + module.fail_json(msg=f"'{state}' is an unknown value for the state argument") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py index 12af06089cf..f86ac6a089c 100644 --- a/plugins/modules/dpkg_divert.py +++ b/plugins/modules/dpkg_divert.py @@ -150,50 +150,50 @@ def diversion_state(module, command, path): - diversion = dict(path=path, state='absent', divert=None, holder=None) - rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True) + diversion = dict(path=path, state="absent", divert=None, holder=None) + rc, out, err = module.run_command([command, "--listpackage", path], check_rc=True) if out: - diversion['state'] = 'present' - diversion['holder'] = out.rstrip() - rc, out, err = module.run_command([command, '--truename', path], check_rc=True) - diversion['divert'] = out.rstrip() + diversion["state"] = "present" + diversion["holder"] = out.rstrip() + rc, out, err = module.run_command([command, "--truename", path], check_rc=True) + diversion["divert"] = out.rstrip() return diversion def main(): module = AnsibleModule( argument_spec=dict( - path=dict(required=True, type='path'), - state=dict(type='str', default='present', choices=['absent', 'present']), - holder=dict(type='str'), - divert=dict(type='path'), - rename=dict(type='bool', default=False), - force=dict(type='bool', default=False), + path=dict(required=True, type="path"), + state=dict(type="str", default="present", choices=["absent", "present"]), + holder=dict(type="str"), + divert=dict(type="path"), + rename=dict(type="bool", default=False), + force=dict(type="bool", default=False), ), supports_check_mode=True, ) - path = module.params['path'] - state = module.params['state'] - holder = module.params['holder'] - divert = module.params['divert'] - rename = module.params['rename'] - force = module.params['force'] + path = module.params["path"] + state = module.params["state"] + holder = module.params["holder"] + divert = module.params["divert"] + rename = module.params["rename"] + force = module.params["force"] diversion_wanted = dict(path=path, state=state) changed = False - DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) + DPKG_DIVERT = module.get_bin_path("dpkg-divert", required=True) MAINCOMMAND = [DPKG_DIVERT] # Option --listpackage is needed and comes with 1.15.0 - rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) - [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] + rc, stdout, stderr = module.run_command([DPKG_DIVERT, "--version"], check_rc=True) + [current_version] = [x for x in stdout.splitlines()[0].split() if re.match("^[0-9]+[.][0-9]", x)] if LooseVersion(current_version) < LooseVersion("1.15.0"): module.fail_json(msg="Unsupported dpkg version (<1.15.0).") - no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) + no_rename_is_supported = LooseVersion(current_version) >= LooseVersion("1.19.1") - b_path = to_bytes(path, errors='surrogate_or_strict') + b_path = to_bytes(path, errors="surrogate_or_strict") path_exists = os.path.exists(b_path) # Used for things not doable with a single dpkg-divert command (as forced # renaming of files, and diversion's 'holder' or 'divert' updates). @@ -201,48 +201,48 @@ def main(): truename_exists = False diversion_before = diversion_state(module, DPKG_DIVERT, path) - if diversion_before['state'] == 'present': - b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') + if diversion_before["state"] == "present": + b_divert = to_bytes(diversion_before["divert"], errors="surrogate_or_strict") truename_exists = os.path.exists(b_divert) # Append options as requested in the task parameters, but ignore some of # them when removing the diversion. if rename: - MAINCOMMAND.append('--rename') + MAINCOMMAND.append("--rename") elif no_rename_is_supported: - MAINCOMMAND.append('--no-rename') + MAINCOMMAND.append("--no-rename") - if state == 'present': - if holder and holder != 'LOCAL': - MAINCOMMAND.extend(['--package', holder]) - diversion_wanted['holder'] = holder + if state == "present": + if holder and holder != "LOCAL": + MAINCOMMAND.extend(["--package", holder]) + diversion_wanted["holder"] = holder else: - MAINCOMMAND.append('--local') - diversion_wanted['holder'] = 'LOCAL' + MAINCOMMAND.append("--local") + diversion_wanted["holder"] = "LOCAL" if divert: - MAINCOMMAND.extend(['--divert', divert]) + MAINCOMMAND.extend(["--divert", divert]) target = divert else: - target = f'{path}.distrib' + target = f"{path}.distrib" - MAINCOMMAND.extend(['--add', path]) - diversion_wanted['divert'] = target - b_target = to_bytes(target, errors='surrogate_or_strict') + MAINCOMMAND.extend(["--add", path]) + diversion_wanted["divert"] = target + b_target = to_bytes(target, errors="surrogate_or_strict") target_exists = os.path.exists(b_target) else: - MAINCOMMAND.extend(['--remove', path]) - diversion_wanted['divert'] = None - diversion_wanted['holder'] = None + MAINCOMMAND.extend(["--remove", path]) + diversion_wanted["divert"] = None + diversion_wanted["holder"] = None # Start to populate the returned objects. diversion = diversion_before.copy() - maincommand = ' '.join(MAINCOMMAND) + maincommand = " ".join(MAINCOMMAND) commands = [maincommand] if module.check_mode or diversion_wanted == diversion_before: - MAINCOMMAND.insert(1, '--test') + MAINCOMMAND.insert(1, "--test") diversion_after = diversion_wanted # Just try and see @@ -257,34 +257,39 @@ def main(): # - The renaming is forbidden by dpkg-divert (i.e. both the file and the # diverted file exist) - elif state != diversion_before['state']: + elif state != diversion_before["state"]: # There should be no case with 'divert' and 'holder' when creating the # diversion from none, and they're ignored when removing the diversion. # So this is all about renaming... - if rename and path_exists and ( - (state == 'absent' and truename_exists) or - (state == 'present' and target_exists)): + if ( + rename + and path_exists + and ((state == "absent" and truename_exists) or (state == "present" and target_exists)) + ): if not force: msg = "Set 'force' param to True to force renaming of files." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) + module.fail_json( + changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion + ) else: msg = "Unexpected error while changing state of the diversion." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) + module.fail_json( + changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion + ) to_remove = path - if state == 'present': + if state == "present": to_remove = target if not module.check_mode: try: - b_remove = to_bytes(to_remove, errors='surrogate_or_strict') + b_remove = to_bytes(to_remove, errors="surrogate_or_strict") os.unlink(b_remove) except OSError as e: - msg = f'Failed to remove {to_remove}: {e}' - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) + msg = f"Failed to remove {to_remove}: {e}" + module.fail_json( + changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion + ) rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) messages = [stdout.rstrip()] @@ -293,25 +298,25 @@ def main(): # of an existing diversion. dpkg-divert does not handle this, and we have # to remove the existing diversion first, and then set a new one. else: - RMDIVERSION = [DPKG_DIVERT, '--remove', path] + RMDIVERSION = [DPKG_DIVERT, "--remove", path] if no_rename_is_supported: - RMDIVERSION.insert(1, '--no-rename') - rmdiversion = ' '.join(RMDIVERSION) + RMDIVERSION.insert(1, "--no-rename") + rmdiversion = " ".join(RMDIVERSION) if module.check_mode: - RMDIVERSION.insert(1, '--test') + RMDIVERSION.insert(1, "--test") if rename: - MAINCOMMAND.remove('--rename') + MAINCOMMAND.remove("--rename") if no_rename_is_supported: - MAINCOMMAND.insert(1, '--no-rename') - maincommand = ' '.join(MAINCOMMAND) + MAINCOMMAND.insert(1, "--no-rename") + maincommand = " ".join(MAINCOMMAND) commands = [rmdiversion, maincommand] rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) if module.check_mode: - messages = [rmdout.rstrip(), 'Running in check mode'] + messages = [rmdout.rstrip(), "Running in check mode"] else: rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) messages = [rmdout.rstrip(), stdout.rstrip()] @@ -319,11 +324,11 @@ def main(): # Avoid if possible to orphan files (i.e. to dereference them in diversion # database but let them in place), but do not make renaming issues fatal. # BTW, this module is not about state of files involved in the diversion. - old = diversion_before['divert'] - new = diversion_wanted['divert'] + old = diversion_before["divert"] + new = diversion_wanted["divert"] if new != old: - b_old = to_bytes(old, errors='surrogate_or_strict') - b_new = to_bytes(new, errors='surrogate_or_strict') + b_old = to_bytes(old, errors="surrogate_or_strict") + b_new = to_bytes(new, errors="surrogate_or_strict") if os.path.exists(b_old) and not os.path.exists(b_new): try: os.rename(b_old, b_new) @@ -336,20 +341,20 @@ def main(): diversion = diversion_after.copy() diff = dict() if module._diff: - diff['before'] = diversion_before - diff['after'] = diversion_after + diff["before"] = diversion_before + diff["after"] = diversion_after if diversion_after != diversion_before: changed = True if diversion_after == diversion_wanted: - module.exit_json(changed=changed, diversion=diversion, - commands=commands, messages=messages, diff=diff) + module.exit_json(changed=changed, diversion=diversion, commands=commands, messages=messages, diff=diff) else: msg = "Unexpected error: see stdout and stderr for details." - module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, - stderr=stderr, stdout=stdout, diversion=diversion) + module.fail_json( + changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py index cef29751ee6..9b1e291b944 100644 --- a/plugins/modules/easy_install.py +++ b/plugins/modules/easy_install.py @@ -95,15 +95,15 @@ def install_package(module, name, easy_install, executable_arguments): def _is_package_installed(module, name, easy_install, executable_arguments): # Copy and add to the arguments executable_arguments = executable_arguments[:] - executable_arguments.append('--dry-run') + executable_arguments.append("--dry-run") rc, out, err = install_package(module, name, easy_install, executable_arguments) if rc: module.fail_json(msg=err) - return 'Downloading' not in out + return "Downloading" not in out def _get_easy_install(module, env=None, executable=None): - candidate_easy_inst_basenames = ['easy_install'] + candidate_easy_inst_basenames = ["easy_install"] easy_install = None if executable is not None: if os.path.isabs(executable): @@ -115,7 +115,7 @@ def _get_easy_install(module, env=None, executable=None): opt_dirs = [] else: # Try easy_install with the virtualenv directory first. - opt_dirs = [f'{env}/bin'] + opt_dirs = [f"{env}/bin"] for basename in candidate_easy_inst_basenames: easy_install = module.get_bin_path(basename, False, opt_dirs) if easy_install is not None: @@ -131,39 +131,37 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), - state=dict(default='present', - choices=['present', 'latest'], - type='str'), + state=dict(default="present", choices=["present", "latest"], type="str"), virtualenv=dict(), - virtualenv_site_packages=dict(default=False, type='bool'), - virtualenv_command=dict(default='virtualenv'), - executable=dict(default='easy_install'), + virtualenv_site_packages=dict(default=False, type="bool"), + virtualenv_command=dict(default="virtualenv"), + executable=dict(default="easy_install"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - name = module.params['name'] - env = module.params['virtualenv'] - executable = module.params['executable'] - site_packages = module.params['virtualenv_site_packages'] - virtualenv_command = module.params['virtualenv_command'] + name = module.params["name"] + env = module.params["virtualenv"] + executable = module.params["executable"] + site_packages = module.params["virtualenv_site_packages"] + virtualenv_command = module.params["virtualenv_command"] executable_arguments = [] - if module.params['state'] == 'latest': - executable_arguments.append('--upgrade') + if module.params["state"] == "latest": + executable_arguments.append("--upgrade") rc = 0 - err = '' - out = '' + err = "" + out = "" if env: virtualenv = module.get_bin_path(virtualenv_command, True) - if not os.path.exists(os.path.join(env, 'bin', 'activate')): + if not os.path.exists(os.path.join(env, "bin", "activate")): if module.check_mode: module.exit_json(changed=True) - command = f'{virtualenv} {env}' + command = f"{virtualenv} {env}" if site_packages: - command += ' --system-site-packages' + command += " --system-site-packages" cwd = tempfile.gettempdir() rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) @@ -191,9 +189,8 @@ def main(): if rc != 0: module.fail_json(msg=err, cmd=cmd) - module.exit_json(changed=changed, binary=easy_install, - name=name, virtualenv=env) + module.exit_json(changed=changed, binary=easy_install, name=name, virtualenv=env) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py index 240f123fd9f..b265eaf0f81 100644 --- a/plugins/modules/ejabberd_user.py +++ b/plugins/modules/ejabberd_user.py @@ -71,7 +71,7 @@ class EjabberdUser: - """ This object represents a user resource for an ejabberd server. The + """This object represents a user resource for an ejabberd server. The object manages user creation and deletion using ejabberdctl. The following commands are currently supported: * ejabberdctl register @@ -80,10 +80,10 @@ class EjabberdUser: def __init__(self, module): self.module = module - self.state = module.params.get('state') - self.host = module.params.get('host') - self.user = module.params.get('username') - self.pwd = module.params.get('password') + self.state = module.params.get("state") + self.host = module.params.get("host") + self.user = module.params.get("username") + self.pwd = module.params.get("password") self.runner = CmdRunner( module, command="ejabberdctl", @@ -98,28 +98,29 @@ def __init__(self, module): @property def changed(self): - """ This method will check the current user and see if the password has + """This method will check the current user and see if the password has changed. It will return True if the user does not match the supplied credentials and False if it does not """ - return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc))) + return self.run_command("check_password", "user host pwd", (lambda rc, out, err: bool(rc))) @property def exists(self): - """ This method will check to see if the supplied username exists for + """This method will check to see if the supplied username exists for host specified. If the user exists True is returned, otherwise False is returned """ - return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc))) + return self.run_command("check_account", "user host", (lambda rc, out, err: not bool(rc))) def log(self, entry): - """ This method does nothing """ + """This method does nothing""" pass def run_command(self, cmd, options, process=None): - """ This method will run the any command specified and return the + """This method will run the any command specified and return the returns using the Ansible common module """ + def _proc(*a): return a @@ -132,32 +133,30 @@ def _proc(*a): return res def update(self): - """ The update method will update the credentials for the user provided - """ - return self.run_command('change_password', 'user host pwd') + """The update method will update the credentials for the user provided""" + return self.run_command("change_password", "user host pwd") def create(self): - """ The create method will create a new user on the host with the + """The create method will create a new user on the host with the password provided """ - return self.run_command('register', 'user host pwd') + return self.run_command("register", "user host pwd") def delete(self): - """ The delete method will delete the user from the host - """ - return self.run_command('unregister', 'user host') + """The delete method will delete the user from the host""" + return self.run_command("unregister", "user host") def main(): module = AnsibleModule( argument_spec=dict( - host=dict(required=True, type='str'), - username=dict(required=True, type='str'), - password=dict(type='str', no_log=True), - state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True, type="str"), + username=dict(required=True, type="str"), + password=dict(type="str", no_log=True), + state=dict(default="present", choices=["present", "absent"]), ), required_if=[ - ('state', 'present', ['password']), + ("state", "present", ["password"]), ], supports_check_mode=True, ) @@ -167,7 +166,7 @@ def main(): rc = None result = dict(changed=False) - if obj.state == 'absent': + if obj.state == "absent": if obj.exists: if module.check_mode: module.exit_json(changed=True) @@ -175,7 +174,7 @@ def main(): if rc != 0: module.fail_json(msg=err, rc=rc) - elif obj.state == 'present': + elif obj.state == "present": if not obj.exists: if module.check_mode: module.exit_json(changed=True) @@ -188,12 +187,12 @@ def main(): module.fail_json(msg=err, rc=rc) if rc is None: - result['changed'] = False + result["changed"] = False else: - result['changed'] = True + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py index d494988df33..f8800f85c64 100644 --- a/plugins/modules/elasticsearch_plugin.py +++ b/plugins/modules/elasticsearch_plugin.py @@ -118,15 +118,9 @@ from ansible.module_utils.basic import AnsibleModule -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) +PACKAGE_STATE_MAP = dict(present="install", absent="remove") -PLUGIN_BIN_PATHS = tuple([ - '/usr/share/elasticsearch/bin/elasticsearch-plugin', - '/usr/share/elasticsearch/bin/plugin' -]) +PLUGIN_BIN_PATHS = tuple(["/usr/share/elasticsearch/bin/elasticsearch-plugin", "/usr/share/elasticsearch/bin/plugin"]) def parse_plugin_repo(string): @@ -143,7 +137,7 @@ def parse_plugin_repo(string): # remove es- prefix for string in ("elasticsearch-", "es-"): if repo.startswith(string): - return repo[len(string):] + return repo[len(string) :] return repo @@ -155,14 +149,14 @@ def is_plugin_present(plugin_name, plugin_dir): def parse_error(string): reason = "ERROR: " try: - return string[string.index(reason) + len(reason):].strip() + return string[string.index(reason) + len(reason) :].strip() except ValueError: return string def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): cmd = [plugin_bin, PACKAGE_STATE_MAP["present"]] - is_old_command = (os.path.basename(plugin_bin) == 'plugin') + is_old_command = os.path.basename(plugin_bin) == "plugin" # Timeout and version are only valid for plugin, not elasticsearch-plugin if is_old_command: @@ -175,12 +169,16 @@ def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_hos cmd[2] = plugin_name if proxy_host and proxy_port: - java_opts = [f"-Dhttp.proxyHost={proxy_host}", - f"-Dhttp.proxyPort={proxy_port}", - f"-Dhttps.proxyHost={proxy_host}", - f"-Dhttps.proxyPort={proxy_port}"] - module.run_command_environ_update = dict(CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x - ES_JAVA_OPTS=" ".join(java_opts)) # Older Elasticsearch versions + java_opts = [ + f"-Dhttp.proxyHost={proxy_host}", + f"-Dhttp.proxyPort={proxy_port}", + f"-Dhttps.proxyHost={proxy_host}", + f"-Dhttps.proxyPort={proxy_port}", + ] + module.run_command_environ_update = dict( + CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x + ES_JAVA_OPTS=" ".join(java_opts), + ) # Older Elasticsearch versions # Legacy ES 1.x if url: @@ -247,7 +245,9 @@ def get_plugin_bin(module, plugin_bin=None): break if not valid_plugin_bin: - module.fail_json(msg=f'{plugin_bin} does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.') + module.fail_json( + msg=f"{plugin_bin} does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed." + ) return valid_plugin_bin @@ -260,15 +260,15 @@ def main(): src=dict(), url=dict(), timeout=dict(default="1m"), - force=dict(type='bool', default=False), + force=dict(type="bool", default=False), plugin_bin=dict(type="path"), plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), proxy_host=dict(), proxy_port=dict(), - version=dict() + version=dict(), ), mutually_exclusive=[("src", "url")], - supports_check_mode=True + supports_check_mode=True, ) name = module.params["name"] @@ -294,7 +294,9 @@ def main(): module.exit_json(changed=False, name=name, state=state) if state == "present": - changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) + changed, cmd, out, err = install_plugin( + module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force + ) elif state == "absent": changed, cmd, out, err = remove_plugin(module, plugin_bin, name) @@ -302,5 +304,5 @@ def main(): module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py index f2c47aef2db..a9637b69edb 100644 --- a/plugins/modules/emc_vnx_sg_member.py +++ b/plugins/modules/emc_vnx_sg_member.py @@ -86,8 +86,14 @@ LIB_IMP_ERR = None try: from storops import VNXSystem - from storops.exception import VNXCredentialError, VNXStorageGroupError, \ - VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError + from storops.exception import ( + VNXCredentialError, + VNXStorageGroupError, + VNXAluAlreadyAttachedError, + VNXAttachAluError, + VNXDetachAluNotFoundError, + ) + HAS_LIB = True except Exception: LIB_IMP_ERR = traceback.format_exc() @@ -96,31 +102,24 @@ def run_module(): module_args = dict( - name=dict(type='str', required=True), - lunid=dict(type='int', required=True), - state=dict(default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + lunid=dict(type="int", required=True), + state=dict(default="present", choices=["present", "absent"]), ) module_args.update(emc_vnx_argument_spec) - result = dict( - changed=False, - hluid=None - ) + result = dict(changed=False, hluid=None) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_LIB: - module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), - exception=LIB_IMP_ERR) + module.fail_json(msg=missing_required_lib("storops >= 0.5.10"), exception=LIB_IMP_ERR) - sp_user = module.params['sp_user'] - sp_address = module.params['sp_address'] - sp_password = module.params['sp_password'] - alu = module.params['lunid'] + sp_user = module.params["sp_user"] + sp_address = module.params["sp_address"] + sp_password = module.params["sp_password"] + alu = module.params["lunid"] # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current @@ -130,35 +129,32 @@ def run_module(): try: vnx = VNXSystem(sp_address, sp_user, sp_password) - sg = vnx.get_sg(module.params['name']) + sg = vnx.get_sg(module.params["name"]) if sg.existed: - if module.params['state'] == 'present': + if module.params["state"] == "present": if not sg.has_alu(alu): try: - result['hluid'] = sg.attach_alu(alu) - result['changed'] = True + result["hluid"] = sg.attach_alu(alu) + result["changed"] = True except VNXAluAlreadyAttachedError: - result['hluid'] = sg.get_hlu(alu) + result["hluid"] = sg.get_hlu(alu) except (VNXAttachAluError, VNXStorageGroupError) as e: - module.fail_json(msg=f'Error attaching {alu}: {e} ', - **result) + module.fail_json(msg=f"Error attaching {alu}: {e} ", **result) else: - result['hluid'] = sg.get_hlu(alu) - if module.params['state'] == 'absent' and sg.has_alu(alu): + result["hluid"] = sg.get_hlu(alu) + if module.params["state"] == "absent" and sg.has_alu(alu): try: sg.detach_alu(alu) - result['changed'] = True + result["changed"] = True except VNXDetachAluNotFoundError: # being not attached when using absent is OK pass except VNXStorageGroupError as e: - module.fail_json(msg=f'Error detaching alu {alu}: {e} ', - **result) + module.fail_json(msg=f"Error detaching alu {alu}: {e} ", **result) else: - module.fail_json(msg=f"No such storage group named {module.params['name']}", - **result) + module.fail_json(msg=f"No such storage group named {module.params['name']}", **result) except VNXCredentialError as e: - module.fail_json(msg=f'{e}', **result) + module.fail_json(msg=f"{e}", **result) module.exit_json(**result) @@ -167,5 +163,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py index 10e59be839f..121d7402d0a 100644 --- a/plugins/modules/etcd3.py +++ b/plugins/modules/etcd3.py @@ -130,6 +130,7 @@ try: import etcd3 + HAS_ETCD = True ETCD_IMP_ERR = None except ImportError: @@ -141,17 +142,17 @@ def run_module(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=2379), - state=dict(type='str', required=True, choices=['present', 'absent']), - user=dict(type='str'), - password=dict(type='str', no_log=True), - ca_cert=dict(type='path'), - client_cert=dict(type='path'), - client_key=dict(type='path'), - timeout=dict(type='int'), + key=dict(type="str", required=True, no_log=False), + value=dict(type="str", required=True), + host=dict(type="str", default="localhost"), + port=dict(type="int", default=2379), + state=dict(type="str", required=True, choices=["present", "absent"]), + user=dict(type="str"), + password=dict(type="str", no_log=True), + ca_cert=dict(type="path"), + client_cert=dict(type="path"), + client_key=dict(type="path"), + timeout=dict(type="int"), ) # seed the result dict in the object @@ -170,7 +171,7 @@ def run_module(): module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - required_together=[['client_cert', 'client_key'], ['user', 'password']], + required_together=[["client_cert", "client_key"], ["user", "password"]], ) # It is possible to set `ca_cert` to verify the server identity without @@ -178,54 +179,57 @@ def run_module(): # so required_together is enough # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence # of either `client_cert` or `client_key` is enough - if module.params['ca_cert'] is None and module.params['client_cert'] is not None: + if module.params["ca_cert"] is None and module.params["client_cert"] is not None: module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") - result['key'] = module.params.get('key') - module.params['cert_cert'] = module.params.pop('client_cert') - module.params['cert_key'] = module.params.pop('client_key') + result["key"] = module.params.get("key") + module.params["cert_cert"] = module.params.pop("client_cert") + module.params["cert_key"] = module.params.pop("client_key") if not HAS_ETCD: - module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) + module.fail_json(msg=missing_required_lib("etcd3"), exception=ETCD_IMP_ERR) - allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', - 'timeout', 'user', 'password'] + allowed_keys = ["host", "port", "ca_cert", "cert_cert", "cert_key", "timeout", "user", "password"] client_params = {key: value for key, value in module.params.items() if key in allowed_keys} try: etcd = etcd3.client(**client_params) except Exception as exp: - module.fail_json(msg=f'Cannot connect to etcd cluster: {exp}', exception=traceback.format_exc()) + module.fail_json(msg=f"Cannot connect to etcd cluster: {exp}", exception=traceback.format_exc()) try: - cluster_value = etcd.get(module.params['key']) + cluster_value = etcd.get(module.params["key"]) except Exception as exp: - module.fail_json(msg=f'Cannot reach data: {exp}', exception=traceback.format_exc()) + module.fail_json(msg=f"Cannot reach data: {exp}", exception=traceback.format_exc()) # Make the cluster_value[0] a string for string comparisons - result['old_value'] = to_native(cluster_value[0]) + result["old_value"] = to_native(cluster_value[0]) - if module.params['state'] == 'absent': + if module.params["state"] == "absent": if cluster_value[0] is not None: if module.check_mode: - result['changed'] = True + result["changed"] = True else: try: - etcd.delete(module.params['key']) + etcd.delete(module.params["key"]) except Exception as exp: - module.fail_json(msg=f"Cannot delete {module.params['key']}: {exp}", exception=traceback.format_exc()) + module.fail_json( + msg=f"Cannot delete {module.params['key']}: {exp}", exception=traceback.format_exc() + ) else: - result['changed'] = True - elif module.params['state'] == 'present': - if result['old_value'] != module.params['value']: + result["changed"] = True + elif module.params["state"] == "present": + if result["old_value"] != module.params["value"]: if module.check_mode: - result['changed'] = True + result["changed"] = True else: try: - etcd.put(module.params['key'], module.params['value']) + etcd.put(module.params["key"], module.params["value"]) except Exception as exp: - module.fail_json(msg=f"Cannot add or edit key {module.params['key']}: {exp}", exception=traceback.format_exc()) + module.fail_json( + msg=f"Cannot add or edit key {module.params['key']}: {exp}", exception=traceback.format_exc() + ) else: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg="State not recognized") @@ -245,5 +249,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/facter_facts.py b/plugins/modules/facter_facts.py index 8ef5d7776bd..d38805ca81d 100644 --- a/plugins/modules/facter_facts.py +++ b/plugins/modules/facter_facts.py @@ -65,22 +65,20 @@ def main(): module = AnsibleModule( argument_spec=dict( - arguments=dict(type='list', elements='str'), + arguments=dict(type="list", elements="str"), ), supports_check_mode=True, ) - facter_path = module.get_bin_path( - 'facter', - opt_dirs=['/opt/puppetlabs/bin']) + facter_path = module.get_bin_path("facter", opt_dirs=["/opt/puppetlabs/bin"]) cmd = [facter_path, "--json"] - if module.params['arguments']: - cmd += module.params['arguments'] + if module.params["arguments"]: + cmd += module.params["arguments"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(ansible_facts=dict(facter=json.loads(out))) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py index 59275ccb2e2..d61f9c11dbf 100644 --- a/plugins/modules/filesize.py +++ b/plugins/modules/filesize.py @@ -226,28 +226,60 @@ # others (ls, df, lvresize, lsblk...). SIZE_UNITS = dict( B=1, - kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, - MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, - GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, - TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, - PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, - EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, - ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, - YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, + kB=1000**1, + KB=1000**1, + KiB=1024**1, + K=1024**1, + k=1024**1, + MB=1000**2, + mB=1000**2, + MiB=1024**2, + M=1024**2, + m=1024**2, + GB=1000**3, + gB=1000**3, + GiB=1024**3, + G=1024**3, + g=1024**3, + TB=1000**4, + tB=1000**4, + TiB=1024**4, + T=1024**4, + t=1024**4, + PB=1000**5, + pB=1000**5, + PiB=1024**5, + P=1024**5, + p=1024**5, + EB=1000**6, + eB=1000**6, + EiB=1024**6, + E=1024**6, + e=1024**6, + ZB=1000**7, + zB=1000**7, + ZiB=1024**7, + Z=1024**7, + z=1024**7, + YB=1000**8, + yB=1000**8, + YiB=1024**8, + Y=1024**8, + y=1024**8, ) def bytes_to_human(size, iec=False): """Return human-readable size (with SI or IEC suffix) from bytes. This is - only to populate the returned result of the module, not to handle the - file itself (we only rely on bytes for that). + only to populate the returned result of the module, not to handle the + file itself (we only rely on bytes for that). """ - unit = 'B' - for (u, v) in SIZE_UNITS.items(): + unit = "B" + for u, v in SIZE_UNITS.items(): if size < v: continue if iec: - if 'i' not in u or size / v >= 1024: + if "i" not in u or size / v >= 1024: continue else: if v % 5 or size / v >= 1000: @@ -255,19 +287,19 @@ def bytes_to_human(size, iec=False): unit = u hsize = round(size / SIZE_UNITS[unit], 2) - if unit == 'B': + if unit == "B": hsize = int(hsize) - unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) - if unit == 'KB': - unit = 'kB' + unit = re.sub(r"^(.)", lambda m: m.expand(r"\1").upper(), unit) + if unit == "KB": + unit = "kB" - return f'{hsize} {unit}' + return f"{hsize} {unit}" def smart_blocksize(size, unit, product, bsize): """Ensure the total size can be written as blocks*blocksize, with blocks - and blocksize being integers. + and blocksize being integers. """ if not product % bsize: return bsize @@ -279,13 +311,13 @@ def smart_blocksize(size, unit, product, bsize): unit_size = SIZE_UNITS[unit] if size == int(size): - if unit_size > SIZE_UNITS['MiB']: + if unit_size > SIZE_UNITS["MiB"]: if unit_size % 5: - return SIZE_UNITS['MiB'] - return SIZE_UNITS['MB'] + return SIZE_UNITS["MiB"] + return SIZE_UNITS["MB"] return unit_size - if unit == 'B': + if unit == "B": raise AssertionError("byte is the smallest unit and requires an integer value") if 0 < product < bsize: @@ -299,11 +331,11 @@ def smart_blocksize(size, unit, product, bsize): def split_size_unit(string, isint=False): """Split a string between the size value (int or float) and the unit. - Support optional space(s) between the numeric value and the unit. + Support optional space(s) between the numeric value and the unit. """ - unit = re.sub(r'(\d|\.)', r'', string).strip() - value = float(re.sub(unit, r'', string).strip()) - if isint and unit in ('B', ''): + unit = re.sub(r"(\d|\.)", r"", string).strip() + value = float(re.sub(unit, r"", string).strip()) + if isint and unit in ("B", ""): if int(value) != value: raise AssertionError("invalid blocksize value: bytes require an integer value") @@ -312,14 +344,16 @@ def split_size_unit(string, isint=False): product = int(round(value)) else: if unit not in SIZE_UNITS.keys(): - raise AssertionError(f"invalid size unit ({unit}): unit must be one of {', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get))}, or none.") + raise AssertionError( + f"invalid size unit ({unit}): unit must be one of {', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get))}, or none." + ) product = int(round(value * SIZE_UNITS[unit])) return value, unit, product def size_string(value): """Convert a raw value to a string, but only if it is an integer, a float - or a string itself. + or a string itself. """ if not isinstance(value, (int, float, str)): raise AssertionError(f"invalid value type ({type(value)}): size must be integer, float or string") @@ -328,60 +362,64 @@ def size_string(value): def size_spec(args): """Return a dictionary with size specifications, especially the size in - bytes (after rounding it to an integer number of blocks). + bytes (after rounding it to an integer number of blocks). """ - blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] + blocksize_in_bytes = split_size_unit(args["blocksize"], True)[2] if blocksize_in_bytes == 0: raise AssertionError("block size cannot be equal to zero") - size_value, size_unit, size_result = split_size_unit(args['size']) + size_value, size_unit, size_result = split_size_unit(args["size"]) if not size_unit: blocks = int(math.ceil(size_value)) else: blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) blocks = int(math.ceil(size_result / blocksize_in_bytes)) - args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) - args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, - iec=bytes_to_human(round_bytes, True), - si=bytes_to_human(round_bytes)) - return args['size_spec'] + args["size_diff"] = round_bytes = int(blocks * blocksize_in_bytes) + args["size_spec"] = dict( + blocks=blocks, + blocksize=blocksize_in_bytes, + bytes=round_bytes, + iec=bytes_to_human(round_bytes, True), + si=bytes_to_human(round_bytes), + ) + return args["size_spec"] def current_size(args): """Return the size of the file at the given location if it exists, or None.""" - path = args['path'] + path = args["path"] if os.path.exists(path): if not os.path.isfile(path): raise AssertionError(f"{path} exists but is not a regular file") - args['file_size'] = os.stat(path).st_size + args["file_size"] = os.stat(path).st_size else: - args['file_size'] = None - return args['file_size'] + args["file_size"] = None + return args["file_size"] def complete_dd_cmdline(args, dd_cmd): """Compute dd options to grow or truncate a file.""" - if args['file_size'] == args['size_spec']['bytes'] and not args['force']: + if args["file_size"] == args["size_spec"]["bytes"] and not args["force"]: # Nothing to do. return list() - bs = args['size_spec']['blocksize'] + bs = args["size_spec"]["blocksize"] # For sparse files (create, truncate, grow): write count=0 block. - if args['sparse']: - seek = args['size_spec']['blocks'] - elif args['force'] or not os.path.exists(args['path']): # Create file + if args["sparse"]: + seek = args["size_spec"]["blocks"] + elif args["force"] or not os.path.exists(args["path"]): # Create file seek = 0 - elif args['size_diff'] < 0: # Truncate file - seek = args['size_spec']['blocks'] - elif args['size_diff'] % bs: # Grow file - seek = int(args['file_size'] / bs) + 1 + elif args["size_diff"] < 0: # Truncate file + seek = args["size_spec"]["blocks"] + elif args["size_diff"] % bs: # Grow file + seek = int(args["file_size"] / bs) + 1 else: - seek = int(args['file_size'] / bs) + seek = int(args["file_size"] / bs) - count = args['size_spec']['blocks'] - seek - dd_cmd += [f'bs={bs}', f'seek={seek}', f'count={count}'] + count = args["size_spec"]["blocks"] - seek + dd_cmd += [f"bs={bs}", f"seek={seek}", f"count={count}"] return dd_cmd @@ -389,12 +427,12 @@ def complete_dd_cmdline(args, dd_cmd): def main(): module = AnsibleModule( argument_spec=dict( - path=dict(type='path', required=True), - size=dict(type='raw', required=True), - blocksize=dict(type='raw'), - source=dict(type='path', default='/dev/zero'), - sparse=dict(type='bool', default=False), - force=dict(type='bool', default=False), + path=dict(type="path", required=True), + size=dict(type="raw", required=True), + blocksize=dict(type="raw"), + source=dict(type="path", default="/dev/zero"), + sparse=dict(type="bool", default=False), + force=dict(type="bool", default=False), ), supports_check_mode=True, add_file_common_args=True, @@ -402,71 +440,69 @@ def main(): args = dict(**module.params) diff = dict(before=dict(), after=dict()) - if args['sparse'] and args['force']: - module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') - if not os.path.exists(os.path.dirname(args['path'])): - module.fail_json(msg='parent directory of the file must exist prior to run this module') - if not args['blocksize']: - args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) + if args["sparse"] and args["force"]: + module.fail_json(msg="parameters values are mutually exclusive: force=true|sparse=true") + if not os.path.exists(os.path.dirname(args["path"])): + module.fail_json(msg="parent directory of the file must exist prior to run this module") + if not args["blocksize"]: + args["blocksize"] = str(os.statvfs(os.path.dirname(args["path"])).f_frsize) try: - args['size'] = size_string(args['size']) - args['blocksize'] = size_string(args['blocksize']) + args["size"] = size_string(args["size"]) + args["blocksize"] = size_string(args["blocksize"]) initial_filesize = current_size(args) size_descriptors = size_spec(args) except AssertionError as err: module.fail_json(msg=to_native(err)) - expected_filesize = size_descriptors['bytes'] + expected_filesize = size_descriptors["bytes"] if initial_filesize: - args['size_diff'] = expected_filesize - initial_filesize - diff['after']['size'] = expected_filesize - diff['before']['size'] = initial_filesize + args["size_diff"] = expected_filesize - initial_filesize + diff["after"]["size"] = expected_filesize + diff["before"]["size"] = initial_filesize - result = dict( - changed=args['force'], - size_diff=args['size_diff'], - path=args['path'], - filesize=size_descriptors) + result = dict(changed=args["force"], size_diff=args["size_diff"], path=args["path"], filesize=size_descriptors) - dd_bin = module.get_bin_path('dd', True) + dd_bin = module.get_bin_path("dd", True) dd_cmd = [dd_bin, f"if={args['source']}", f"of={args['path']}"] - if expected_filesize != initial_filesize or args['force']: - result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) + if expected_filesize != initial_filesize or args["force"]: + result["cmd"] = " ".join(complete_dd_cmdline(args, dd_cmd)) if module.check_mode: - result['changed'] = True + result["changed"] = True else: - result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) + result["rc"], dummy, result["stderr"] = module.run_command(dd_cmd) - diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) + diff["after"]["size"] = result_filesize = result["size_diff"] = current_size(args) if initial_filesize: - result['size_diff'] = result_filesize - initial_filesize - if not args['force']: - result['changed'] = result_filesize != initial_filesize + result["size_diff"] = result_filesize - initial_filesize + if not args["force"]: + result["changed"] = result_filesize != initial_filesize - if result['rc']: + if result["rc"]: msg = f"dd error while creating file {args['path']} with size {args['size']} from source {args['source']}: see stderr for details" module.fail_json(msg=msg, **result) if result_filesize != expected_filesize: - msg = (f"module error while creating file {args['path']} with size {args['size']} " - f"from source {args['source']}: file is {result_filesize} bytes long") + msg = ( + f"module error while creating file {args['path']} with size {args['size']} " + f"from source {args['source']}: file is {result_filesize} bytes long" + ) module.fail_json(msg=msg, **result) # dd follows symlinks, and so does this module, while file module doesn't. # If we call it, this is to manage file's mode, owner and so on, not the # symlink's ones. file_params = dict(**module.params) - if os.path.islink(args['path']): - file_params['path'] = result['path'] = os.path.realpath(args['path']) + if os.path.islink(args["path"]): + file_params["path"] = result["path"] = os.path.realpath(args["path"]) - if args['file_size'] is not None: + if args["file_size"] is not None: file_args = module.load_file_common_arguments(file_params) - result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) - result['diff'] = diff + result["changed"] = module.set_fs_attributes_if_different(file_args, result["changed"], diff=diff) + result["diff"] = diff module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py index f0b108dd786..2a4748b5dcc 100644 --- a/plugins/modules/filesystem.py +++ b/plugins/modules/filesystem.py @@ -164,13 +164,13 @@ def __init__(self, module, path): self.path = path def size(self): - """ Return size in bytes of device. Returns int """ + """Return size in bytes of device. Returns int""" statinfo = os.stat(self.path) if stat.S_ISBLK(statinfo.st_mode): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) devsize_in_bytes = int(out) - elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': + elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == "FreeBSD": diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) devsize_in_bytes = int(out.split()[2]) @@ -186,12 +186,13 @@ def get_mountpoint(self): cmd_findmnt = self.module.get_bin_path("findmnt", required=True) # find mountpoint - rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", - "TARGET", "--source", self.path], check_rc=False) + rc, mountpoint, dummy = self.module.run_command( + [cmd_findmnt, "--mtab", "--noheadings", "--output", "TARGET", "--source", self.path], check_rc=False + ) if rc != 0: mountpoint = None else: - mountpoint = mountpoint.split('\n')[0] + mountpoint = mountpoint.split("\n")[0] return mountpoint @@ -200,7 +201,6 @@ def __str__(self): class Filesystem: - MKFS: str | None = None MKFS_FORCE_FLAGS: list[str] | None = [] MKFS_SET_UUID_OPTIONS: list[str] | None = None @@ -213,7 +213,7 @@ class Filesystem: CHANGE_UUID_OPTION: str | None = None CHANGE_UUID_OPTION_HAS_ARG = True - LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} + LANG_ENV = {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"} def __init__(self, module): self.module = module @@ -224,9 +224,9 @@ def fstype(self): def get_fs_size(self, dev): """Return size in bytes of filesystem on device (integer). - Should query the info with a per-fstype command that can access the - device whenever it is mounted or not, and parse the command output. - Parser must ensure to return an integer, or raise a ValueError. + Should query the info with a per-fstype command that can access the + device whenever it is mounted or not, and parse the command output. + Parser must ensure to return an integer, or raise a ValueError. """ raise NotImplementedError() @@ -253,7 +253,7 @@ def wipefs(self, dev): # not doable here if it needs get_mountpoint() (to prevent corruption of # a mounted filesystem), since 'findmnt' is not available on FreeBSD, # even in util-linux port for this OS. - wipefs = self.module.get_bin_path('wipefs', required=True) + wipefs = self.module.get_bin_path("wipefs", required=True) cmd = [wipefs, "--all", str(dev)] self.module.run_command(cmd, check_rc=True) @@ -303,31 +303,33 @@ def change_uuid_cmd(self, new_uuid, target): def change_uuid(self, new_uuid, dev): """Change filesystem UUID. Returns stdout of used command""" if self.module.check_mode: - self.module.exit_json(change=True, msg=f'Changing {self.fstype} filesystem UUID on device {dev}') + self.module.exit_json(change=True, msg=f"Changing {self.fstype} filesystem UUID on device {dev}") - dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True) + dummy, out, dummy = self.module.run_command( + self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True + ) return out class Ext(Filesystem): - MKFS_FORCE_FLAGS = ['-F'] - MKFS_SET_UUID_OPTIONS = ['-U'] - INFO = 'tune2fs' - GROW = 'resize2fs' - CHANGE_UUID = 'tune2fs' + MKFS_FORCE_FLAGS = ["-F"] + MKFS_SET_UUID_OPTIONS = ["-U"] + INFO = "tune2fs" + GROW = "resize2fs" + CHANGE_UUID = "tune2fs" CHANGE_UUID_OPTION = "-U" def get_fs_size(self, dev): """Get Block count and Block size and return their product.""" cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + dummy, out, dummy = self.module.run_command([cmd, "-l", str(dev)], check_rc=True, environ_update=self.LANG_ENV) block_count = block_size = None for line in out.splitlines(): - if 'Block count:' in line: - block_count = int(line.split(':')[1].strip()) - elif 'Block size:' in line: - block_size = int(line.split(':')[1].strip()) + if "Block count:" in line: + block_count = int(line.split(":")[1].strip()) + elif "Block size:" in line: + block_size = int(line.split(":")[1].strip()) if None not in (block_size, block_count): break else: @@ -337,22 +339,22 @@ def get_fs_size(self, dev): class Ext2(Ext): - MKFS = 'mkfs.ext2' + MKFS = "mkfs.ext2" class Ext3(Ext): - MKFS = 'mkfs.ext3' + MKFS = "mkfs.ext3" class Ext4(Ext): - MKFS = 'mkfs.ext4' + MKFS = "mkfs.ext4" class XFS(Filesystem): - MKFS = 'mkfs.xfs' - MKFS_FORCE_FLAGS = ['-f'] - INFO = 'xfs_info' - GROW = 'xfs_growfs' + MKFS = "mkfs.xfs" + MKFS_FORCE_FLAGS = ["-f"] + INFO = "xfs_info" + GROW = "xfs_growfs" GROW_MOUNTPOINT_ONLY = True CHANGE_UUID = "xfs_admin" CHANGE_UUID_OPTION = "-U" @@ -375,12 +377,12 @@ def get_fs_size(self, dev): block_size = block_count = None for line in out.splitlines(): - col = line.split('=') - if col[0].strip() == 'data': - if col[1].strip() == 'bsize': + col = line.split("=") + if col[0].strip() == "data": + if col[1].strip() == "bsize": block_size = int(col[2].split()[0]) - if col[2].split()[1] == 'blocks': - block_count = int(col[3].split(',')[0]) + if col[2].split()[1] == "blocks": + block_count = int(col[3].split(",")[0]) if None not in (block_size, block_count): break else: @@ -390,22 +392,23 @@ def get_fs_size(self, dev): class Reiserfs(Filesystem): - MKFS = 'mkfs.reiserfs' - MKFS_FORCE_FLAGS = ['-q'] + MKFS = "mkfs.reiserfs" + MKFS_FORCE_FLAGS = ["-q"] class Bcachefs(Filesystem): - MKFS = 'mkfs.bcachefs' - MKFS_FORCE_FLAGS = ['--force'] - MKFS_SET_UUID_OPTIONS = ['-U', '--uuid'] - INFO = 'bcachefs' - GROW = 'bcachefs' - GROW_MAX_SPACE_FLAGS = ['device', 'resize'] + MKFS = "mkfs.bcachefs" + MKFS_FORCE_FLAGS = ["--force"] + MKFS_SET_UUID_OPTIONS = ["-U", "--uuid"] + INFO = "bcachefs" + GROW = "bcachefs" + GROW_MAX_SPACE_FLAGS = ["device", "resize"] def get_fs_size(self, dev): """Return size in bytes of filesystem on device (integer).""" - dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), - 'show-super', str(dev)], check_rc=True) + dummy, stdout, dummy = self.module.run_command( + [self.module.get_bin_path(self.INFO), "show-super", str(dev)], check_rc=True + ) for line in stdout.splitlines(): if "Size: " in line: @@ -437,28 +440,28 @@ def get_fs_size(self, dev): class Btrfs(Filesystem): - MKFS = 'mkfs.btrfs' - INFO = 'btrfs' - GROW = 'btrfs' - GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max'] + MKFS = "mkfs.btrfs" + INFO = "btrfs" + GROW = "btrfs" + GROW_MAX_SPACE_FLAGS = ["filesystem", "resize", "max"] GROW_MOUNTPOINT_ONLY = True def __init__(self, module): super().__init__(module) mkfs = self.module.get_bin_path(self.MKFS, required=True) - dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) + dummy, stdout, stderr = self.module.run_command([mkfs, "--version"], check_rc=True) match = re.search(r" v([0-9.]+)", stdout) if not match: # v0.20-rc1 use stderr match = re.search(r" v([0-9.]+)", stderr) if match: # v0.20-rc1 doesn't have --force parameter added in following version v3.12 - if LooseVersion(match.group(1)) >= LooseVersion('3.12'): - self.MKFS_FORCE_FLAGS = ['-f'] + if LooseVersion(match.group(1)) >= LooseVersion("3.12"): + self.MKFS_FORCE_FLAGS = ["-f"] else: # assume version is greater or equal to 3.12 - self.MKFS_FORCE_FLAGS = ['-f'] - self.module.warn(f'Unable to identify mkfs.btrfs version ({stdout!r}, {stderr!r})') + self.MKFS_FORCE_FLAGS = ["-f"] + self.module.warn(f"Unable to identify mkfs.btrfs version ({stdout!r}, {stderr!r})") def get_fs_size(self, dev): """Return size in bytes of filesystem on device (integer).""" @@ -466,8 +469,9 @@ def get_fs_size(self, dev): if not mountpoint: self.module.fail_json(msg=f"{dev} needs to be mounted for {self.fstype} operations") - dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), - 'filesystem', 'usage', '-b', mountpoint], check_rc=True) + dummy, stdout, dummy = self.module.run_command( + [self.module.get_bin_path(self.INFO), "filesystem", "usage", "-b", mountpoint], check_rc=True + ) for line in stdout.splitlines(): if "Device size" in line: return int(line.split()[-1]) @@ -475,14 +479,14 @@ def get_fs_size(self, dev): class Ocfs2(Filesystem): - MKFS = 'mkfs.ocfs2' - MKFS_FORCE_FLAGS = ['-Fx'] + MKFS = "mkfs.ocfs2" + MKFS_FORCE_FLAGS = ["-Fx"] class F2fs(Filesystem): - MKFS = 'mkfs.f2fs' - INFO = 'dump.f2fs' - GROW = 'resize.f2fs' + MKFS = "mkfs.f2fs" + INFO = "dump.f2fs" + GROW = "resize.f2fs" def __init__(self, module): super().__init__(module) @@ -494,8 +498,8 @@ def __init__(self, module): if match is not None: # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem # before that version -f switch wasn't used - if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): - self.MKFS_FORCE_FLAGS = ['-f'] + if LooseVersion(match.group(1)) >= LooseVersion("1.9.0"): + self.MKFS_FORCE_FLAGS = ["-f"] def get_fs_size(self, dev): """Get sector size and total FS sectors and return their product.""" @@ -503,10 +507,10 @@ def get_fs_size(self, dev): dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) sector_size = sector_count = None for line in out.splitlines(): - if 'Info: sector size = ' in line: + if "Info: sector size = " in line: # expected: 'Info: sector size = 512' sector_size = int(line.split()[4]) - elif 'Info: total FS sectors = ' in line: + elif "Info: total FS sectors = " in line: # expected: 'Info: total FS sectors = 102400 (50 MB)' sector_count = int(line.split()[5]) if None not in (sector_size, sector_count): @@ -518,28 +522,30 @@ def get_fs_size(self, dev): class VFAT(Filesystem): - INFO = 'fatresize' - GROW = 'fatresize' - GROW_MAX_SPACE_FLAGS = ['-s', 'max'] + INFO = "fatresize" + GROW = "fatresize" + GROW_MAX_SPACE_FLAGS = ["-s", "max"] def __init__(self, module): super().__init__(module) - if platform.system() == 'FreeBSD': - self.MKFS = 'newfs_msdos' + if platform.system() == "FreeBSD": + self.MKFS = "newfs_msdos" else: - self.MKFS = 'mkfs.vfat' + self.MKFS = "mkfs.vfat" def get_fs_size(self, dev): """Get and return size of filesystem, in bytes.""" cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + dummy, out, dummy = self.module.run_command( + [cmd, "--info", str(dev)], check_rc=True, environ_update=self.LANG_ENV + ) fssize = None for line in out.splitlines()[1:]: - parts = line.split(':', 1) + parts = line.split(":", 1) if len(parts) < 2: continue param, value = parts - if param.strip() in ('Size', 'Cur size'): + if param.strip() in ("Size", "Cur size"): fssize = int(value.strip()) break else: @@ -549,34 +555,36 @@ def get_fs_size(self, dev): class LVM(Filesystem): - MKFS = 'pvcreate' - MKFS_FORCE_FLAGS = ['-f'] - MKFS_SET_UUID_OPTIONS = ['-u', '--uuid'] - MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile'] - INFO = 'pvs' - GROW = 'pvresize' - CHANGE_UUID = 'pvchange' - CHANGE_UUID_OPTION = '-u' + MKFS = "pvcreate" + MKFS_FORCE_FLAGS = ["-f"] + MKFS_SET_UUID_OPTIONS = ["-u", "--uuid"] + MKFS_SET_UUID_EXTRA_OPTIONS = ["--norestorefile"] + INFO = "pvs" + GROW = "pvresize" + CHANGE_UUID = "pvchange" + CHANGE_UUID_OPTION = "-u" CHANGE_UUID_OPTION_HAS_ARG = False def get_fs_size(self, dev): """Get and return PV size, in bytes.""" cmd = self.module.get_bin_path(self.INFO, required=True) - dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) + dummy, size, dummy = self.module.run_command( + [cmd, "--noheadings", "-o", "pv_size", "--units", "b", "--nosuffix", str(dev)], check_rc=True + ) pv_size = int(size) return pv_size class Swap(Filesystem): - MKFS = 'mkswap' - MKFS_FORCE_FLAGS = ['-f'] + MKFS = "mkswap" + MKFS_FORCE_FLAGS = ["-f"] class UFS(Filesystem): - MKFS = 'newfs' - INFO = 'dumpfs' - GROW = 'growfs' - GROW_MAX_SPACE_FLAGS = ['-y'] + MKFS = "newfs" + INFO = "dumpfs" + GROW = "growfs" + GROW_MAX_SPACE_FLAGS = ["-y"] def get_fs_size(self, dev): """Get providersize and fragment size and return their product.""" @@ -585,9 +593,9 @@ def get_fs_size(self, dev): fragmentsize = providersize = None for line in out.splitlines(): - if line.startswith('fsize'): + if line.startswith("fsize"): fragmentsize = int(line.split()[1]) - elif 'providersize' in line: + elif "providersize" in line: providersize = int(line.split()[-1]) if None not in (fragmentsize, providersize): break @@ -598,26 +606,26 @@ def get_fs_size(self, dev): FILESYSTEMS = { - 'bcachefs': Bcachefs, - 'ext2': Ext2, - 'ext3': Ext3, - 'ext4': Ext4, - 'ext4dev': Ext4, - 'f2fs': F2fs, - 'reiserfs': Reiserfs, - 'xfs': XFS, - 'btrfs': Btrfs, - 'vfat': VFAT, - 'ocfs2': Ocfs2, - 'LVM2_member': LVM, - 'swap': Swap, - 'ufs': UFS, + "bcachefs": Bcachefs, + "ext2": Ext2, + "ext3": Ext3, + "ext4": Ext4, + "ext4dev": Ext4, + "f2fs": F2fs, + "reiserfs": Reiserfs, + "xfs": XFS, + "btrfs": Btrfs, + "vfat": VFAT, + "ocfs2": Ocfs2, + "LVM2_member": LVM, + "swap": Swap, + "ufs": UFS, } def main(): friendly_names = { - 'lvm': 'LVM2_member', + "lvm": "LVM2_member", } fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) @@ -625,30 +633,28 @@ def main(): # There is no "single command" to manipulate filesystems, so we map them all out and their options module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), - dev=dict(type='path', required=True, aliases=['device']), - opts=dict(type='str'), - force=dict(type='bool', default=False), - resizefs=dict(type='bool', default=False), - uuid=dict(type='str'), + state=dict(type="str", default="present", choices=["present", "absent"]), + fstype=dict(type="str", aliases=["type"], choices=list(fstypes)), + dev=dict(type="path", required=True, aliases=["device"]), + opts=dict(type="str"), + force=dict(type="bool", default=False), + resizefs=dict(type="bool", default=False), + uuid=dict(type="str"), ), - required_if=[ - ('state', 'present', ['fstype']) - ], + required_if=[("state", "present", ["fstype"])], mutually_exclusive=[ - ('resizefs', 'uuid'), + ("resizefs", "uuid"), ], supports_check_mode=True, ) - state = module.params['state'] - dev = module.params['dev'] - fstype = module.params['fstype'] - opts = module.params['opts'] - force = module.params['force'] - resizefs = module.params['resizefs'] - uuid = module.params['uuid'] + state = module.params["state"] + dev = module.params["dev"] + fstype = module.params["fstype"] + opts = module.params["opts"] + force = module.params["force"] + resizefs = module.params["resizefs"] + uuid = module.params["uuid"] mkfs_opts = [] if opts is not None: @@ -668,11 +674,11 @@ def main(): # In case blkid/fstyp isn't able to identify an existing filesystem, device # is considered as empty, then this existing filesystem would be overwritten # even if force isn't enabled. - cmd = module.get_bin_path('blkid', required=True) - rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) + cmd = module.get_bin_path("blkid", required=True) + rc, raw_fs, err = module.run_command([cmd, "-c", os.devnull, "-o", "value", "-s", "TYPE", str(dev)]) fs = raw_fs.strip() - if not fs and platform.system() == 'FreeBSD': - cmd = module.get_bin_path('fstyp', required=True) + if not fs and platform.system() == "FreeBSD": + cmd = module.get_bin_path("fstyp", required=True) rc, raw_fs, err = module.run_command([cmd, str(dev)]) fs = raw_fs.strip() @@ -688,7 +694,9 @@ def main(): filesystem = klass(module) if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS): - module.fail_json(changed=False, msg=f"module does not support UUID option for this filesystem ({fstype}) yet.") + module.fail_json( + changed=False, msg=f"module does not support UUID option for this filesystem ({fstype}) yet." + ) same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] if same_fs and not resizefs and not uuid and not force: @@ -702,7 +710,6 @@ def main(): module.exit_json(changed=True, msg=out) elif uuid: - out = filesystem.change_uuid(new_uuid=uuid, dev=dev) module.exit_json(changed=True, msg=out) @@ -722,5 +729,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index c4168c15b76..f1b5a71343a 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -186,13 +186,13 @@ def install_flat(module, binary, remote, names, method, no_dependencies): uri_names = [] id_names = [] for name in names: - if name.startswith('http://') or name.startswith('https://'): + if name.startswith("http://") or name.startswith("https://"): uri_names.append(name) else: id_names.append(name) base_command = [binary, "install", f"--{method}"] flatpak_version = _flatpak_version(module, binary) - if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + if LooseVersion(flatpak_version) < LooseVersion("1.1.3"): base_command += ["-y"] else: base_command += ["--noninteractive"] @@ -204,19 +204,16 @@ def install_flat(module, binary, remote, names, method, no_dependencies): if id_names: command = base_command + [remote] + id_names _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def update_flat(module, binary, names, method, no_dependencies): """Update existing flatpaks.""" global result # pylint: disable=global-variable-not-assigned - installed_flat_names = [ - _match_installed_flat_name(module, binary, name, method) - for name in names - ] + installed_flat_names = [_match_installed_flat_name(module, binary, name, method) for name in names] command = [binary, "update", f"--{method}"] flatpak_version = _flatpak_version(module, binary) - if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + if LooseVersion(flatpak_version) < LooseVersion("1.1.3"): command += ["-y"] else: command += ["--noninteractive"] @@ -224,27 +221,22 @@ def update_flat(module, binary, names, method, no_dependencies): command += ["--no-deps"] command += installed_flat_names stdout = _flatpak_command(module, module.check_mode, command) - result["changed"] = ( - True if module.check_mode else stdout.find("Nothing to do.") == -1 - ) + result["changed"] = True if module.check_mode else stdout.find("Nothing to do.") == -1 def uninstall_flat(module, binary, names, method): """Remove existing flatpaks.""" global result # pylint: disable=global-variable-not-assigned - installed_flat_names = [ - _match_installed_flat_name(module, binary, name, method) - for name in names - ] + installed_flat_names = [_match_installed_flat_name(module, binary, name, method) for name in names] command = [binary, "uninstall"] flatpak_version = _flatpak_version(module, binary) - if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + if LooseVersion(flatpak_version) < LooseVersion("1.1.3"): command += ["-y"] else: command += ["--noninteractive"] command += [f"--{method}"] + installed_flat_names _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def flatpak_exists(module, binary, names, method): @@ -271,19 +263,17 @@ def _match_installed_flat_name(module, binary, name, method): # Try running flatpak list with columns feature command = [binary, "list", f"--{method}", "--app", "--columns=application"] _flatpak_command(module, False, command, ignore_failure=True) - if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: + if result["rc"] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result["stderr"]: # Probably flatpak before 1.2 - matched_flatpak_name = \ - _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) + matched_flatpak_name = _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) else: # Probably flatpak >= 1.2 - matched_flatpak_name = \ - _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) + matched_flatpak_name = _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) if matched_flatpak_name: return matched_flatpak_name else: - result['msg'] = ( + result["msg"] = ( "Flatpak removal failed: Could not match any installed flatpaks to " f"the name `{_parse_flatpak_name(name)}`. " "If you used a URL, try using the reverse DNS name of the flatpak" @@ -295,7 +285,7 @@ def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, metho global result # pylint: disable=global-variable-not-assigned command = [binary, "list", f"--{method}", "--app", "--columns=application"] output = _flatpak_command(module, False, command) - for row in output.split('\n'): + for row in output.split("\n"): if parsed_name.lower() == row.lower(): return row @@ -304,7 +294,7 @@ def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method global result # pylint: disable=global-variable-not-assigned command = [binary, "list", f"--{method}", "--app"] output = _flatpak_command(module, False, command) - for row in output.split('\n'): + for row in output.split("\n"): if parsed_name.lower() in row.lower(): return row.split()[0] @@ -315,9 +305,9 @@ def _is_flatpak_id(part): # https://docs.flatpak.org/en/latest/conventions.html#application-ids # Flathub: # https://docs.flathub.org/docs/for-app-authors/requirements#application-id - if '.' not in part: + if "." not in part: return False - sections = part.split('.') + sections = part.split(".") if len(sections) < 2: return False domain = sections[0] @@ -330,12 +320,12 @@ def _is_flatpak_id(part): def _parse_flatpak_name(name): - if name.startswith('http://') or name.startswith('https://'): - file_name = urlparse(name).path.split('/')[-1] - file_name_without_extension = file_name.split('.')[0:-1] + if name.startswith("http://") or name.startswith("https://"): + file_name = urlparse(name).path.split("/")[-1] + file_name_without_extension = file_name.split(".")[0:-1] common_name = ".".join(file_name_without_extension) else: - parts = name.split('/') + parts = name.split("/") for part in parts: if _is_flatpak_id(part): common_name = part @@ -355,63 +345,57 @@ def _flatpak_version(module, binary): def _flatpak_command(module, noop, command, ignore_failure=False): global result # pylint: disable=global-variable-not-assigned - result['command'] = ' '.join(command) + result["command"] = " ".join(command) if noop: - result['rc'] = 0 + result["rc"] = 0 return "" - result['rc'], result['stdout'], result['stderr'] = module.run_command( - command, check_rc=not ignore_failure - ) - return result['stdout'] + result["rc"], result["stdout"], result["stderr"] = module.run_command(command, check_rc=not ignore_failure) + return result["stdout"] def main(): # This module supports check mode module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True), - remote=dict(type='str', default='flathub'), - method=dict(type='str', default='system', - choices=['user', 'system']), - state=dict(type='str', default='present', - choices=['absent', 'present', 'latest']), - no_dependencies=dict(type='bool', default=False), - executable=dict(type='path', default='flatpak') + name=dict(type="list", elements="str", required=True), + remote=dict(type="str", default="flathub"), + method=dict(type="str", default="system", choices=["user", "system"]), + state=dict(type="str", default="present", choices=["absent", "present", "latest"]), + no_dependencies=dict(type="bool", default=False), + executable=dict(type="path", default="flatpak"), ), supports_check_mode=True, ) - name = module.params['name'] - state = module.params['state'] - remote = module.params['remote'] - no_dependencies = module.params['no_dependencies'] - method = module.params['method'] - executable = module.params['executable'] + name = module.params["name"] + state = module.params["state"] + remote = module.params["remote"] + no_dependencies = module.params["no_dependencies"] + method = module.params["method"] + executable = module.params["executable"] binary = module.get_bin_path(executable, None) global result - result = dict( - changed=False - ) + result = dict(changed=False) # If the binary was not found, fail the operation if not binary: module.fail_json(msg=f"Executable '{executable}' was not found on the system.", **result) - module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + module.run_command_environ_update = dict(LANGUAGE="C", LC_ALL="C") installed, not_installed = flatpak_exists(module, binary, name, method) - if state == 'absent' and installed: + if state == "absent" and installed: uninstall_flat(module, binary, installed, method) else: - if state == 'latest' and installed: + if state == "latest" and installed: update_flat(module, binary, installed, method, no_dependencies) - if state in ('present', 'latest') and not_installed: + if state in ("present", "latest") and not_installed: install_flat(module, binary, remote, not_installed, method, no_dependencies) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py index a035d6808f5..b122ff1f94d 100644 --- a/plugins/modules/flatpak_remote.py +++ b/plugins/modules/flatpak_remote.py @@ -121,7 +121,7 @@ def add_remote(module, binary, name, flatpakrepo_url, method): global result # pylint: disable=global-variable-not-assigned command = [binary, "remote-add", f"--{method}", name, flatpakrepo_url] _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def remove_remote(module, binary, name, method): @@ -129,7 +129,7 @@ def remove_remote(module, binary, name, method): global result # pylint: disable=global-variable-not-assigned command = [binary, "remote-delete", f"--{method}", "--force", name] _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def remote_exists(module, binary, name, method): @@ -151,7 +151,7 @@ def enable_remote(module, binary, name, method): global result # pylint: disable=global-variable-not-assigned command = [binary, "remote-modify", "--enable", f"--{method}", name] _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def disable_remote(module, binary, name, method): @@ -159,7 +159,7 @@ def disable_remote(module, binary, name, method): global result # pylint: disable=global-variable-not-assigned command = [binary, "remote-modify", "--disable", f"--{method}", name] _flatpak_command(module, module.check_mode, command) - result['changed'] = True + result["changed"] = True def remote_enabled(module, binary, name, method): @@ -178,48 +178,42 @@ def remote_enabled(module, binary, name, method): def _flatpak_command(module, noop, command): global result # pylint: disable=global-variable-not-assigned - result['command'] = ' '.join(command) + result["command"] = " ".join(command) if noop: - result['rc'] = 0 + result["rc"] = 0 return "" - result['rc'], result['stdout'], result['stderr'] = module.run_command( - command, check_rc=True - ) - return result['stdout'] + result["rc"], result["stdout"], result["stderr"] = module.run_command(command, check_rc=True) + return result["stdout"] def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - flatpakrepo_url=dict(type='str'), - method=dict(type='str', default='system', - choices=['user', 'system']), - state=dict(type='str', default="present", - choices=['absent', 'present']), - enabled=dict(type='bool', default=True), - executable=dict(type='str', default="flatpak") + name=dict(type="str", required=True), + flatpakrepo_url=dict(type="str"), + method=dict(type="str", default="system", choices=["user", "system"]), + state=dict(type="str", default="present", choices=["absent", "present"]), + enabled=dict(type="bool", default=True), + executable=dict(type="str", default="flatpak"), ), # This module supports check mode supports_check_mode=True, ) - name = module.params['name'] - flatpakrepo_url = module.params['flatpakrepo_url'] - method = module.params['method'] - state = module.params['state'] - enabled = module.params['enabled'] - executable = module.params['executable'] + name = module.params["name"] + flatpakrepo_url = module.params["flatpakrepo_url"] + method = module.params["method"] + state = module.params["state"] + enabled = module.params["enabled"] + executable = module.params["executable"] binary = module.get_bin_path(executable, None) if flatpakrepo_url is None: - flatpakrepo_url = '' + flatpakrepo_url = "" global result - result = dict( - changed=False - ) + result = dict(changed=False) # If the binary was not found, fail the operation if not binary: @@ -227,12 +221,12 @@ def main(): remote_already_exists = remote_exists(module, binary, to_bytes(name), method) - if state == 'present' and not remote_already_exists: + if state == "present" and not remote_already_exists: add_remote(module, binary, name, flatpakrepo_url, method) - elif state == 'absent' and remote_already_exists: + elif state == "absent" and remote_already_exists: remove_remote(module, binary, name, method) - if state == 'present': + if state == "present": remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method) if enabled and not remote_already_enabled: @@ -243,5 +237,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py index 0d6f93529d4..9194b8c3851 100644 --- a/plugins/modules/gandi_livedns.py +++ b/plugins/modules/gandi_livedns.py @@ -165,50 +165,50 @@ def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(type='str', no_log=True), - personal_access_token=dict(type='str', no_log=True), - record=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - ttl=dict(type='int'), - type=dict(type='str', required=True), - values=dict(type='list', elements='str'), - domain=dict(type='str', required=True), + api_key=dict(type="str", no_log=True), + personal_access_token=dict(type="str", no_log=True), + record=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + ttl=dict(type="int"), + type=dict(type="str", required=True), + values=dict(type="list", elements="str"), + domain=dict(type="str", required=True), ), supports_check_mode=True, required_if=[ - ('state', 'present', ['values', 'ttl']), + ("state", "present", ["values", "ttl"]), ], mutually_exclusive=[ - ('api_key', 'personal_access_token'), + ("api_key", "personal_access_token"), ], required_one_of=[ - ('api_key', 'personal_access_token'), + ("api_key", "personal_access_token"), ], ) gandi_api = GandiLiveDNSAPI(module) - if module.params['state'] == 'present': - ret, changed = gandi_api.ensure_dns_record(module.params['record'], - module.params['type'], - module.params['ttl'], - module.params['values'], - module.params['domain']) + if module.params["state"] == "present": + ret, changed = gandi_api.ensure_dns_record( + module.params["record"], + module.params["type"], + module.params["ttl"], + module.params["values"], + module.params["domain"], + ) else: - ret, changed = gandi_api.delete_dns_record(module.params['record'], - module.params['type'], - module.params['values'], - module.params['domain']) + ret, changed = gandi_api.delete_dns_record( + module.params["record"], module.params["type"], module.params["values"], module.params["domain"] + ) result = dict( changed=changed, ) if ret: - result['record'] = gandi_api.build_result(ret, - module.params['domain']) + result["record"] = gandi_api.build_result(ret, module.params["domain"]) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index 9b5cbca78f5..90600ea6cae 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -107,22 +107,22 @@ class GConftool(StateModuleHelper): - diff_params = ('value', ) - output_params = ('key', 'value_type') - facts_params = ('key', 'value_type') - facts_name = 'gconftool2' + diff_params = ("value",) + output_params = ("key", "value_type") + facts_params = ("key", "value_type") + facts_name = "gconftool2" module = dict( argument_spec=dict( - key=dict(type='str', required=True, no_log=False), - value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), - value=dict(type='str'), - state=dict(type='str', required=True, choices=['absent', 'present']), - direct=dict(type='bool', default=False), - config_source=dict(type='str'), + key=dict(type="str", required=True, no_log=False), + value_type=dict(type="str", choices=["bool", "float", "int", "string"]), + value=dict(type="str"), + state=dict(type="str", required=True, choices=["absent", "present"]), + direct=dict(type="bool", default=False), + config_source=dict(type="str"), ), required_if=[ - ('state', 'present', ['value', 'value_type']), - ('direct', True, ['config_source']), + ("state", "present", ["value", "value_type"]), + ("direct", True, ["config_source"]), ], supports_check_mode=True, ) @@ -136,19 +136,20 @@ def __init_module__(self): rc, out, err = ctx.run() self.vars.version = out.strip() - self.vars.set('previous_value', self._get(), fact=True) - self.vars.set('value_type', self.vars.value_type) - self.vars.set('_value', self.vars.previous_value, output=False, change=True) - self.vars.set_meta('value', initial_value=self.vars.previous_value) - self.vars.set('playbook_value', self.vars.value, fact=True) + self.vars.set("previous_value", self._get(), fact=True) + self.vars.set("value_type", self.vars.value_type) + self.vars.set("_value", self.vars.previous_value, output=False, change=True) + self.vars.set_meta("value", initial_value=self.vars.previous_value) + self.vars.set("playbook_value", self.vars.value, fact=True) def _make_process(self, fail_on_err): def process(rc, out, err): if err and fail_on_err: - self.do_raise(f'gconftool-2 failed with error:\n{err.strip()}') + self.do_raise(f"gconftool-2 failed with error:\n{err.strip()}") out = out.rstrip() self.vars.value = None if out == "" else out return self.vars.value + return process def _get(self): @@ -157,15 +158,17 @@ def _get(self): def state_absent(self): with self.runner("state key", output_process=self._make_process(False)) as ctx: ctx.run() - self.vars.set('run_info', ctx.run_info, verbosity=4) - self.vars.set('new_value', None, fact=True) + self.vars.set("run_info", ctx.run_info, verbosity=4) + self.vars.set("new_value", None, fact=True) self.vars._value = None def state_present(self): - with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + with self.runner( + "direct config_source value_type state key value", output_process=self._make_process(True) + ) as ctx: ctx.run() - self.vars.set('run_info', ctx.run_info, verbosity=4) - self.vars.set('new_value', self._get(), fact=True) + self.vars.set("run_info", ctx.run_info, verbosity=4) + self.vars.set("new_value", self._get(), fact=True) self.vars._value = self.vars.new_value @@ -173,5 +176,5 @@ def main(): GConftool.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index f1047bccee6..1fab46b7920 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -58,10 +58,10 @@ class GConftoolInfo(ModuleHelper): - output_params = ['key'] + output_params = ["key"] module = dict( argument_spec=dict( - key=dict(type='str', required=True, no_log=False), + key=dict(type="str", required=True, no_log=False), ), supports_check_mode=True, ) @@ -82,5 +82,5 @@ def main(): GConftoolInfo.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py index 30a18f97020..cd5d15f7258 100644 --- a/plugins/modules/gem.py +++ b/plugins/modules/gem.py @@ -142,10 +142,10 @@ def get_rubygems_path(module): - if module.params['executable']: - result = module.params['executable'].split(' ') + if module.params["executable"]: + result = module.params["executable"].split(" ") else: - result = [module.get_bin_path('gem', True)] + result = [module.get_bin_path("gem", True)] return result @@ -153,10 +153,10 @@ def get_rubygems_version(module): if hasattr(get_rubygems_version, "ver"): return get_rubygems_version.ver - cmd = get_rubygems_path(module) + ['--version'] + cmd = get_rubygems_path(module) + ["--version"] (rc, out, err) = module.run_command(cmd, check_rc=True) - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) + match = re.match(r"^(\d+)\.(\d+)\.(\d+)", out) if not match: return None @@ -167,21 +167,20 @@ def get_rubygems_version(module): def get_rubygems_environ(module): - if module.params['install_dir']: - return {'GEM_HOME': module.params['install_dir']} + if module.params["install_dir"]: + return {"GEM_HOME": module.params["install_dir"]} return None def get_installed_versions(module, remote=False): - cmd = get_rubygems_path(module) - cmd.append('query') + cmd.append("query") cmd.extend(common_opts(module)) if remote: - cmd.append('--remote') - if module.params['repository']: - cmd.extend(['--source', module.params['repository']]) - cmd.append('-n') + cmd.append("--remote") + if module.params["repository"]: + cmd.extend(["--source", module.params["repository"]]) + cmd.append("-n") cmd.append(f"^{module.params['name']}$") environ = get_rubygems_environ(module) @@ -191,19 +190,19 @@ def get_installed_versions(module, remote=False): match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line) if match: versions = match.group(1) - for version in versions.split(', '): + for version in versions.split(", "): installed_versions.append(version.split()[0]) return installed_versions def exists(module): - if module.params['state'] == 'latest': + if module.params["state"] == "latest": remoteversions = get_installed_versions(module, remote=True) if remoteversions: - module.params['version'] = remoteversions[0] + module.params["version"] = remoteversions[0] installed_versions = get_installed_versions(module) - if module.params['version']: - if module.params['version'] in installed_versions: + if module.params["version"]: + if module.params["version"] in installed_versions: return True else: if installed_versions: @@ -214,123 +213,120 @@ def exists(module): def common_opts(module): opts = [] ver = get_rubygems_version(module) - if module.params['norc'] and ver and ver >= (2, 5, 2): - opts.append('--norc') + if module.params["norc"] and ver and ver >= (2, 5, 2): + opts.append("--norc") return opts def uninstall(module): - if module.check_mode: return cmd = get_rubygems_path(module) environ = get_rubygems_environ(module) - cmd.append('uninstall') + cmd.append("uninstall") cmd.extend(common_opts(module)) - if module.params['install_dir']: - cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params["install_dir"]: + cmd.extend(["--install-dir", module.params["install_dir"]]) - if module.params['bindir']: - cmd.extend(['--bindir', module.params['bindir']]) + if module.params["bindir"]: + cmd.extend(["--bindir", module.params["bindir"]]) - if module.params['version']: - cmd.extend(['--version', module.params['version']]) + if module.params["version"]: + cmd.extend(["--version", module.params["version"]]) else: - cmd.append('--all') - cmd.append('--executable') - if module.params['force']: - cmd.append('--force') - cmd.append(module.params['name']) + cmd.append("--all") + cmd.append("--executable") + if module.params["force"]: + cmd.append("--force") + cmd.append(module.params["name"]) return module.run_command(cmd, environ_update=environ, check_rc=True) def install(module): - if module.check_mode: return ver = get_rubygems_version(module) cmd = get_rubygems_path(module) - cmd.append('install') + cmd.append("install") cmd.extend(common_opts(module)) - if module.params['version']: - cmd.extend(['--version', module.params['version']]) - if module.params['repository']: - cmd.extend(['--source', module.params['repository']]) - if not module.params['include_dependencies']: - cmd.append('--ignore-dependencies') + if module.params["version"]: + cmd.extend(["--version", module.params["version"]]) + if module.params["repository"]: + cmd.extend(["--source", module.params["repository"]]) + if not module.params["include_dependencies"]: + cmd.append("--ignore-dependencies") else: if ver and ver < (2, 0, 0): - cmd.append('--include-dependencies') - if module.params['user_install']: - cmd.append('--user-install') + cmd.append("--include-dependencies") + if module.params["user_install"]: + cmd.append("--user-install") else: - cmd.append('--no-user-install') - if module.params['install_dir']: - cmd.extend(['--install-dir', module.params['install_dir']]) - if module.params['bindir']: - cmd.extend(['--bindir', module.params['bindir']]) - if module.params['pre_release']: - cmd.append('--pre') - if not module.params['include_doc']: + cmd.append("--no-user-install") + if module.params["install_dir"]: + cmd.extend(["--install-dir", module.params["install_dir"]]) + if module.params["bindir"]: + cmd.extend(["--bindir", module.params["bindir"]]) + if module.params["pre_release"]: + cmd.append("--pre") + if not module.params["include_doc"]: if ver and ver < (2, 0, 0): - cmd.append('--no-rdoc') - cmd.append('--no-ri') + cmd.append("--no-rdoc") + cmd.append("--no-ri") else: - cmd.append('--no-document') - if module.params['env_shebang']: - cmd.append('--env-shebang') - cmd.append(module.params['gem_source']) - if module.params['build_flags']: - cmd.extend(['--', module.params['build_flags']]) - if module.params['force']: - cmd.append('--force') + cmd.append("--no-document") + if module.params["env_shebang"]: + cmd.append("--env-shebang") + cmd.append(module.params["gem_source"]) + if module.params["build_flags"]: + cmd.extend(["--", module.params["build_flags"]]) + if module.params["force"]: + cmd.append("--force") module.run_command(cmd, check_rc=True) def main(): - module = AnsibleModule( argument_spec=dict( - executable=dict(type='path'), - gem_source=dict(type='path'), - include_dependencies=dict(default=True, type='bool'), - name=dict(required=True, type='str'), - repository=dict(aliases=['source'], type='str'), - state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'), - user_install=dict(default=True, type='bool'), - install_dir=dict(type='path'), - bindir=dict(type='path'), - norc=dict(type='bool', default=True), - pre_release=dict(default=False, type='bool'), - include_doc=dict(default=False, type='bool'), - env_shebang=dict(default=False, type='bool'), - version=dict(type='str'), - build_flags=dict(type='str'), - force=dict(default=False, type='bool'), + executable=dict(type="path"), + gem_source=dict(type="path"), + include_dependencies=dict(default=True, type="bool"), + name=dict(required=True, type="str"), + repository=dict(aliases=["source"], type="str"), + state=dict(default="present", choices=["present", "absent", "latest"], type="str"), + user_install=dict(default=True, type="bool"), + install_dir=dict(type="path"), + bindir=dict(type="path"), + norc=dict(type="bool", default=True), + pre_release=dict(default=False, type="bool"), + include_doc=dict(default=False, type="bool"), + env_shebang=dict(default=False, type="bool"), + version=dict(type="str"), + build_flags=dict(type="str"), + force=dict(default=False, type="bool"), ), supports_check_mode=True, - mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], + mutually_exclusive=[["gem_source", "repository"], ["gem_source", "version"]], ) - if module.params['version'] and module.params['state'] == 'latest': + if module.params["version"] and module.params["state"] == "latest": module.fail_json(msg="Cannot specify version when state=latest") - if module.params['gem_source'] and module.params['state'] == 'latest': + if module.params["gem_source"] and module.params["state"] == "latest": module.fail_json(msg="Cannot maintain state=latest when installing from local source") - if module.params['user_install'] and module.params['install_dir']: + if module.params["user_install"] and module.params["install_dir"]: module.fail_json(msg="install_dir requires user_install=false") - if not module.params['gem_source']: - module.params['gem_source'] = module.params['name'] + if not module.params["gem_source"]: + module.params["gem_source"] = module.params["name"] changed = False - if module.params['state'] in ['present', 'latest']: + if module.params["state"] in ["present", "latest"]: if not exists(module): install(module) changed = True - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": if exists(module): command_output = uninstall(module) if command_output is not None and exists(module): @@ -343,19 +339,19 @@ def main(): ), rc=rc, stdout=out, - stderr=err + stderr=err, ) else: changed = True result = {} - result['name'] = module.params['name'] - result['state'] = module.params['state'] - if module.params['version']: - result['version'] = module.params['version'] - result['changed'] = changed + result["name"] = module.params["name"] + result["state"] = module.params["state"] + if module.params["version"]: + result["version"] = module.params["version"] + result["changed"] = changed module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py index a7fb3c4fcff..e395cb883cb 100644 --- a/plugins/modules/gio_mime.py +++ b/plugins/modules/gio_mime.py @@ -72,11 +72,11 @@ class GioMime(ModuleHelper): - output_params = ['handler'] + output_params = ["handler"] module = dict( argument_spec=dict( - mime_type=dict(type='str', required=True), - handler=dict(type='str', required=True), + mime_type=dict(type="str", required=True), + handler=dict(type="str", required=True), ), supports_check_mode=True, ) @@ -86,12 +86,16 @@ def __init_module__(self): with self.runner("version") as ctx: rc, out, err = ctx.run() self.vars.version = out.strip() - self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + self.vars.set_meta( + "handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True + ) def __run__(self): - check_mode_return = (0, 'Module executed in check mode', '') + check_mode_return = (0, "Module executed in check mode", "") if self.vars.has_changed: - with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + with self.runner.context( + args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return + ) as ctx: rc, out, err = ctx.run() self.vars.stdout = out self.vars.stderr = err @@ -102,5 +106,5 @@ def main(): GioMime.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index 2be77ff0b74..2faedd61138 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -144,44 +144,46 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - repo=dict(type='path'), - file=dict(type='path'), - add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']), - scope=dict(type='str', choices=['file', 'local', 'global', 'system']), - state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + repo=dict(type="path"), + file=dict(type="path"), + add_mode=dict(type="str", default="replace-all", choices=["add", "replace-all"]), + scope=dict(type="str", choices=["file", "local", "global", "system"]), + state=dict(type="str", default="present", choices=["present", "absent"]), value=dict(), ), required_if=[ - ('scope', 'local', ['repo']), - ('scope', 'file', ['file']), - ('state', 'present', ['value']), + ("scope", "local", ["repo"]), + ("scope", "file", ["file"]), + ("state", "present", ["value"]), ], supports_check_mode=True, ) - git_path = module.get_bin_path('git', True) + git_path = module.get_bin_path("git", True) params = module.params # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. # Set the locale to C to ensure consistent messages. - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") - name = params['name'] or '' - unset = params['state'] == 'absent' - new_value = params['value'] or '' - add_mode = params['add_mode'] + name = params["name"] or "" + unset = params["state"] == "absent" + new_value = params["value"] or "" + add_mode = params["add_mode"] if not unset and not new_value: - module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.") + module.fail_json( + msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value." + ) scope = determine_scope(params) cwd = determine_cwd(scope, params) base_args = [git_path, "config", "--includes"] - if scope == 'file': - base_args.append('-f') - base_args.append(params['file']) + if scope == "file": + base_args.append("-f") + base_args.append(params["file"]) elif scope: base_args.append(f"--{scope}") @@ -194,12 +196,12 @@ def main(): if rc >= 2: # If the return code is 1, it just means the option hasn't been set yet, which is fine. - module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args)) + module.fail_json(rc=rc, msg=err, cmd=" ".join(list_args)) old_values = out.rstrip().splitlines() if unset and not out: - module.exit_json(changed=False, msg='no setting to unset') + module.exit_json(changed=False, msg="no setting to unset") elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: module.exit_json(changed=False, msg="") @@ -227,21 +229,21 @@ def main(): after_values = [new_value] module.exit_json( - msg='setting changed', + msg="setting changed", diff=dict( - before_header=' '.join(set_args), + before_header=" ".join(set_args), before=build_diff_value(old_values), - after_header=' '.join(set_args), + after_header=" ".join(set_args), after=build_diff_value(after_values), ), - changed=True + changed=True, ) def determine_scope(params): - if params['scope']: - return params['scope'] - return 'system' + if params["scope"]: + return params["scope"] + return "system" def build_diff_value(value): @@ -253,11 +255,11 @@ def build_diff_value(value): def determine_cwd(scope, params): - if scope == 'local': - return params['repo'] + if scope == "local": + return params["repo"] # Run from root directory to avoid accidentally picking up any local config settings return "/" -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py index dbfcf69cdbf..6e6ee25dbb0 100644 --- a/plugins/modules/git_config_info.py +++ b/plugins/modules/git_config_info.py @@ -123,7 +123,7 @@ def main(): # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. # Set the locale to C to ensure consistent messages. - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") name = module.params["name"] path = module.params["path"] diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py index 8f72e324a4e..be004273da4 100644 --- a/plugins/modules/github_deploy_key.py +++ b/plugins/modules/github_deploy_key.py @@ -188,29 +188,29 @@ class GithubDeployKey: def __init__(self, module): self.module = module - self.github_url = self.module.params['github_url'] - self.name = module.params['name'] - self.key = module.params['key'] - self.state = module.params['state'] - self.read_only = module.params.get('read_only', True) - self.force = module.params.get('force', False) - self.username = module.params.get('username', None) - self.password = module.params.get('password', None) - self.token = module.params.get('token', None) - self.otp = module.params.get('otp', None) + self.github_url = self.module.params["github_url"] + self.name = module.params["name"] + self.key = module.params["key"] + self.state = module.params["state"] + self.read_only = module.params.get("read_only", True) + self.force = module.params.get("force", False) + self.username = module.params.get("username", None) + self.password = module.params.get("password", None) + self.token = module.params.get("token", None) + self.otp = module.params.get("otp", None) @property def url(self): - owner = self.module.params['owner'] - repo = self.module.params['repo'] + owner = self.module.params["owner"] + repo = self.module.params["repo"] return f"{self.github_url}/repos/{owner}/{repo}/keys" @property def headers(self): if self.username is not None and self.password is not None: - self.module.params['url_username'] = self.username - self.module.params['url_password'] = self.password - self.module.params['force_basic_auth'] = True + self.module.params["url_username"] = self.username + self.module.params["url_password"] = self.password + self.module.params["force_basic_auth"] = True if self.otp is not None: return {"X-GitHub-OTP": self.otp} elif self.token is not None: @@ -226,10 +226,10 @@ def paginate(self, url): yield self.module.from_json(resp.read()) links = {} - for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')): + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", "")): links[y] = x - url = links.get('next') + url = links.get("next") else: self.handle_error(method="GET", info=info) @@ -240,7 +240,7 @@ def get_existing_key(self): existing_key_id = str(i["id"]) if i["key"].split() == self.key.split()[:2]: return existing_key_id - elif i['title'] == self.name and self.force: + elif i["title"] == self.name and self.force: return existing_key_id else: return None @@ -248,7 +248,14 @@ def get_existing_key(self): def add_new_key(self): request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} - resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) + resp, info = fetch_url( + self.module, + self.url, + data=self.module.jsonify(request_body), + headers=self.headers, + method="POST", + timeout=30, + ) status_code = info["status"] @@ -272,58 +279,59 @@ def remove_existing_key(self, key_id): status_code = info["status"] if status_code == 204: - if self.state == 'absent': + if self.state == "absent": self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) else: self.handle_error(method="DELETE", info=info, key_id=key_id) def handle_error(self, method, info, key_id=None): - status_code = info['status'] - body = info.get('body') + status_code = info["status"] + body = info.get("body") if body: - err = self.module.from_json(body)['message'] + err = self.module.from_json(body)["message"] else: err = None if status_code == 401: - self.module.fail_json(msg=f"Failed to connect to {self.github_url} due to invalid credentials", http_status_code=status_code, error=err) + self.module.fail_json( + msg=f"Failed to connect to {self.github_url} due to invalid credentials", + http_status_code=status_code, + error=err, + ) elif status_code == 404: self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) else: if method == "GET": - self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) + self.module.fail_json( + msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err + ) elif method == "POST": self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) elif method == "DELETE": - self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) + self.module.fail_json( + msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err + ) def main(): module = AnsibleModule( argument_spec=dict( - github_url=dict(type='str', default="https://api.github.com"), - owner=dict(required=True, type='str', aliases=['account', 'organization']), - repo=dict(required=True, type='str', aliases=['repository']), - name=dict(required=True, type='str', aliases=['title', 'label']), - key=dict(required=True, type='str', no_log=False), - read_only=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - force=dict(type='bool', default=False), - username=dict(type='str'), - password=dict(type='str', no_log=True), - otp=dict(type='int', no_log=True), - token=dict(type='str', no_log=True) + github_url=dict(type="str", default="https://api.github.com"), + owner=dict(required=True, type="str", aliases=["account", "organization"]), + repo=dict(required=True, type="str", aliases=["repository"]), + name=dict(required=True, type="str", aliases=["title", "label"]), + key=dict(required=True, type="str", no_log=False), + read_only=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + force=dict(type="bool", default=False), + username=dict(type="str"), + password=dict(type="str", no_log=True), + otp=dict(type="int", no_log=True), + token=dict(type="str", no_log=True), ), - mutually_exclusive=[ - ['password', 'token'] - ], - required_together=[ - ['username', 'password'], - ['otp', 'username', 'password'] - ], - required_one_of=[ - ['username', 'token'] - ], + mutually_exclusive=[["password", "token"]], + required_together=[["username", "password"], ["otp", "username", "password"]], + required_one_of=[["username", "token"]], supports_check_mode=True, ) @@ -337,17 +345,17 @@ def main(): module.exit_json(changed=False) # to forcefully modify an existing key, the existing key must be deleted first - if deploy_key.state == 'absent' or deploy_key.force: + if deploy_key.state == "absent" or deploy_key.force: key_id = deploy_key.get_existing_key() if key_id is not None: deploy_key.remove_existing_key(key_id) - elif deploy_key.state == 'absent': + elif deploy_key.state == "absent": module.exit_json(changed=False, msg="Deploy key does not exist") if deploy_key.state == "present": deploy_key.add_new_key() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_issue.py b/plugins/modules/github_issue.py index cf23baf36f1..8ad859d0579 100644 --- a/plugins/modules/github_issue.py +++ b/plugins/modules/github_issue.py @@ -81,42 +81,42 @@ def main(): argument_spec=dict( organization=dict(required=True), repo=dict(required=True), - issue=dict(type='int', required=True), - action=dict(choices=['get_status'], default='get_status'), + issue=dict(type="int", required=True), + action=dict(choices=["get_status"], default="get_status"), ), supports_check_mode=True, ) - organization = module.params['organization'] - repo = module.params['repo'] - issue = module.params['issue'] - action = module.params['action'] + organization = module.params["organization"] + repo = module.params["repo"] + issue = module.params["issue"] + action = module.params["action"] result = dict() headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', + "Content-Type": "application/json", + "Accept": "application/vnd.github.v3+json", } url = f"https://api.github.com/repos/{organization}/{repo}/issues/{issue}" response, info = fetch_url(module, url, headers=headers) - if not (200 <= info['status'] < 400): - if info['status'] == 404: + if not (200 <= info["status"] < 400): + if info["status"] == 404: module.fail_json(msg=f"Failed to find issue {issue}") module.fail_json(msg=f"Failed to send request to {url}: {info['msg']}") gh_obj = json.loads(response.read()) - if action == 'get_status' or action is None: + if action == "get_status" or action is None: if module.check_mode: result.update(changed=True) else: - result.update(changed=True, issue_status=gh_obj['state']) + result.update(changed=True, issue_status=gh_obj["state"]) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index e76f9c47f94..2284e677029 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -150,8 +150,8 @@ def json(self): def links(self): links = {} - if 'link' in self.info: - link_header = self.info['link'] + if "link" in self.info: + link_header = self.info["link"] matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) for url, rel in matches: links[rel] = url @@ -162,19 +162,17 @@ class GitHubSession: def __init__(self, module, token, api_url): self.module = module self.token = token - self.api_url = api_url.rstrip('/') + self.api_url = api_url.rstrip("/") def request(self, method, url, data=None): headers = { - 'Authorization': f'token {self.token}', - 'Content-Type': 'application/json', - 'Accept': 'application/vnd.github.v3+json', + "Authorization": f"token {self.token}", + "Content-Type": "application/json", + "Accept": "application/vnd.github.v3+json", } - response, info = fetch_url( - self.module, url, method=method, data=data, headers=headers) - if not (200 <= info['status'] < 400): - self.module.fail_json( - msg=f" failed to send request {method} to {url}: {info['msg']}") + response, info = fetch_url(self.module, url, method=method, data=data, headers=headers) + if not (200 <= info["status"] < 400): + self.module.fail_json(msg=f" failed to send request {method} to {url}: {info['msg']}") return GitHubResponse(response, info) @@ -182,9 +180,9 @@ def get_all_keys(session): url = f"{session.api_url}/user/keys" result = [] while url: - r = session.request('GET', url) + r = session.request("GET", url) result.extend(r.json()) - url = r.links().get('next') + url = r.links().get("next") return result @@ -192,19 +190,18 @@ def create_key(session, name, pubkey, check_mode): if check_mode: now_t = now() return { - 'id': 0, - 'key': pubkey, - 'title': name, - 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), - 'read_only': False, - 'verified': False + "id": 0, + "key": pubkey, + "title": name, + "url": "http://example.com/CHECK_MODE_GITHUB_KEY", + "created_at": datetime.datetime.strftime(now_t, "%Y-%m-%dT%H:%M:%SZ"), + "read_only": False, + "verified": False, } else: return session.request( - 'POST', - f"{session.api_url}/user/keys", - data=json.dumps({'title': name, 'key': pubkey})).json() + "POST", f"{session.api_url}/user/keys", data=json.dumps({"title": name, "key": pubkey}) + ).json() def delete_keys(session, to_delete, check_mode): @@ -212,29 +209,30 @@ def delete_keys(session, to_delete, check_mode): return for key in to_delete: - session.request('DELETE', f"{session.api_url}/user/keys/{key['id']}") + session.request("DELETE", f"{session.api_url}/user/keys/{key['id']}") def ensure_key_absent(session, name, check_mode): - to_delete = [key for key in get_all_keys(session) if key['title'] == name] + to_delete = [key for key in get_all_keys(session) if key["title"] == name] delete_keys(session, to_delete, check_mode=check_mode) - return {'changed': bool(to_delete), - 'deleted_keys': to_delete} + return {"changed": bool(to_delete), "deleted_keys": to_delete} def ensure_key_present(module, session, name, pubkey, force, check_mode): all_keys = get_all_keys(session) - matching_keys = [k for k in all_keys if k['title'] == name] + matching_keys = [k for k in all_keys if k["title"] == name] deleted_keys = [] - new_signature = pubkey.split(' ')[1] + new_signature = pubkey.split(" ")[1] for key in all_keys: - existing_signature = key['key'].split(' ')[1] - if new_signature == existing_signature and key['title'] != name: - module.fail_json(msg=f"another key with the same content is already registered under the name |{key['title']}|") + existing_signature = key["key"].split(" ")[1] + if new_signature == existing_signature and key["title"] != name: + module.fail_json( + msg=f"another key with the same content is already registered under the name |{key['title']}|" + ) - if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: + if matching_keys and force and matching_keys[0]["key"].split(" ")[1] != new_signature: delete_keys(session, matching_keys, check_mode=check_mode) (deleted_keys, matching_keys) = (matching_keys, []) @@ -244,51 +242,50 @@ def ensure_key_present(module, session, name, pubkey, force, check_mode): key = matching_keys[0] return { - 'changed': bool(deleted_keys or not matching_keys), - 'deleted_keys': deleted_keys, - 'matching_keys': matching_keys, - 'key': key + "changed": bool(deleted_keys or not matching_keys), + "deleted_keys": deleted_keys, + "matching_keys": matching_keys, + "key": key, } def main(): argument_spec = { - 'token': {'required': True, 'no_log': True}, - 'name': {'required': True}, - 'pubkey': {}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - 'force': {'default': True, 'type': 'bool'}, - 'api_url': {'default': 'https://api.github.com', 'type': 'str'}, + "token": {"required": True, "no_log": True}, + "name": {"required": True}, + "pubkey": {}, + "state": {"choices": ["present", "absent"], "default": "present"}, + "force": {"default": True, "type": "bool"}, + "api_url": {"default": "https://api.github.com", "type": "str"}, } module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) - token = module.params['token'] - name = module.params['name'] - state = module.params['state'] - force = module.params['force'] - pubkey = module.params.get('pubkey') - api_url = module.params.get('api_url') + token = module.params["token"] + name = module.params["name"] + state = module.params["state"] + force = module.params["force"] + pubkey = module.params.get("pubkey") + api_url = module.params.get("api_url") if pubkey: - pubkey_parts = pubkey.split(' ') + pubkey_parts = pubkey.split(" ") # Keys consist of a protocol, the key data, and an optional comment. if len(pubkey_parts) < 2: module.fail_json(msg='"pubkey" parameter has an invalid format') - elif state == 'present': + elif state == "present": module.fail_json(msg='"pubkey" is required when state=present') session = GitHubSession(module, token, api_url) - if state == 'present': - result = ensure_key_present(module, session, name, pubkey, force=force, - check_mode=module.check_mode) - elif state == 'absent': + if state == "present": + result = ensure_key_present(module, session, name, pubkey, force=force, check_mode=module.check_mode) + elif state == "absent": result = ensure_key_absent(session, name, check_mode=module.check_mode) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py index db1ec4b5e78..e7c1fd132ef 100644 --- a/plugins/modules/github_release.py +++ b/plugins/modules/github_release.py @@ -139,36 +139,33 @@ def main(): user=dict(required=True), password=dict(no_log=True), token=dict(no_log=True), - action=dict( - required=True, choices=['latest_release', 'create_release']), - tag=dict(type='str'), - target=dict(type='str'), - name=dict(type='str'), - body=dict(type='str'), - draft=dict(type='bool', default=False), - prerelease=dict(type='bool', default=False), + action=dict(required=True, choices=["latest_release", "create_release"]), + tag=dict(type="str"), + target=dict(type="str"), + name=dict(type="str"), + body=dict(type="str"), + draft=dict(type="bool", default=False), + prerelease=dict(type="bool", default=False), ), supports_check_mode=True, - mutually_exclusive=(('password', 'token'),), - required_if=[('action', 'create_release', ['tag']), - ('action', 'create_release', ['password', 'token'], True)], + mutually_exclusive=(("password", "token"),), + required_if=[("action", "create_release", ["tag"]), ("action", "create_release", ["password", "token"], True)], ) if not HAS_GITHUB_API: - module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), - exception=GITHUB_IMP_ERR) - - repo = module.params['repo'] - user = module.params['user'] - password = module.params['password'] - login_token = module.params['token'] - action = module.params['action'] - tag = module.params.get('tag') - target = module.params.get('target') - name = module.params.get('name') - body = module.params.get('body') - draft = module.params.get('draft') - prerelease = module.params.get('prerelease') + module.fail_json(msg=missing_required_lib("github3.py >= 1.0.0a3"), exception=GITHUB_IMP_ERR) + + repo = module.params["repo"] + user = module.params["user"] + password = module.params["password"] + login_token = module.params["token"] + action = module.params["action"] + tag = module.params.get("tag") + target = module.params.get("target") + name = module.params.get("name") + body = module.params.get("body") + draft = module.params.get("draft") + prerelease = module.params.get("prerelease") # login to github try: @@ -191,40 +188,42 @@ def main(): # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats # # Test if we're actually logged in, but skip this check for some token prefixes - SKIPPED_TOKEN_PREFIXES = ['ghs_'] + SKIPPED_TOKEN_PREFIXES = ["ghs_"] if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)): gh_obj.me() except github3.exceptions.AuthenticationFailed as e: - module.fail_json(msg=f'Failed to connect to GitHub: {e}', - details=f"Please check username and password or token for repository {repo}") + module.fail_json( + msg=f"Failed to connect to GitHub: {e}", + details=f"Please check username and password or token for repository {repo}", + ) except github3.exceptions.GitHubError as e: - module.fail_json(msg=f'GitHub API error: {e}', - details=f"Please check username and password or token for repository {repo}") + module.fail_json( + msg=f"GitHub API error: {e}", details=f"Please check username and password or token for repository {repo}" + ) repository = gh_obj.repository(user, repo) if not repository: module.fail_json(msg=f"Repository {user}/{repo} doesn't exist") - if action == 'latest_release': + if action == "latest_release": release = repository.latest_release() if release: module.exit_json(tag=release.tag_name) else: module.exit_json(tag=None) - if action == 'create_release': + if action == "create_release": release_exists = repository.release_from_tag(tag) if release_exists: module.exit_json(changed=False, msg=f"Release for tag {tag} already exists.") - release = repository.create_release( - tag, target, name, body, draft, prerelease) + release = repository.create_release(tag, target, name, body, draft, prerelease) if release: module.exit_json(changed=True, tag=release.tag_name) else: module.exit_json(changed=False, tag=None) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py index 96c1d10859f..cca1d77da1b 100644 --- a/plugins/modules/github_repo.py +++ b/plugins/modules/github_repo.py @@ -130,6 +130,7 @@ try: from github import Github, GithubException, GithubObject from github.GithubException import UnknownObjectException + HAS_GITHUB_PACKAGE = True except Exception: GITHUB_IMP_ERR = traceback.format_exc() @@ -147,9 +148,7 @@ def authenticate(username=None, password=None, access_token=None, api_url=None): def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False): - result = dict( - changed=False, - repo=dict()) + result = dict(changed=False, repo=dict()) if organization: target = gh.get_organization(organization) else: @@ -158,7 +157,7 @@ def create_repo(gh, name, organization=None, private=None, description=None, che repo = None try: repo = target.get_repo(name=name) - result['repo'] = repo.raw_data + result["repo"] = repo.raw_data except UnknownObjectException: if not check_mode: repo = target.create_repo( @@ -166,27 +165,29 @@ def create_repo(gh, name, organization=None, private=None, description=None, che private=GithubObject.NotSet if private is None else private, description=GithubObject.NotSet if description is None else description, ) - result['repo'] = repo.raw_data + result["repo"] = repo.raw_data - result['changed'] = True + result["changed"] = True changes = {} if private is not None: - if repo is None or repo.raw_data['private'] != private: - changes['private'] = private + if repo is None or repo.raw_data["private"] != private: + changes["private"] = private if description is not None: - if repo is None or repo.raw_data['description'] not in (description, description or None): - changes['description'] = description + if repo is None or repo.raw_data["description"] not in (description, description or None): + changes["description"] = description if changes: if not check_mode: repo.edit(**changes) - result['repo'].update({ - 'private': repo._private.value if not check_mode else private, - 'description': repo._description.value if not check_mode else description, - }) - result['changed'] = True + result["repo"].update( + { + "private": repo._private.value if not check_mode else private, + "description": repo._description.value if not check_mode else description, + } + ) + result["changed"] = True return result @@ -201,7 +202,7 @@ def delete_repo(gh, name, organization=None, check_mode=False): repo = target.get_repo(name=name) if not check_mode: repo.delete() - result['changed'] = True + result["changed"] = True except UnknownObjectException: pass @@ -209,61 +210,62 @@ def delete_repo(gh, name, organization=None, check_mode=False): def run_module(params, check_mode=False): - if params['force_defaults']: - params['description'] = params['description'] or '' - params['private'] = params['private'] or False + if params["force_defaults"]: + params["description"] = params["description"] or "" + params["private"] = params["private"] or False gh = authenticate( - username=params['username'], password=params['password'], access_token=params['access_token'], - api_url=params['api_url']) - if params['state'] == "absent": - return delete_repo( - gh=gh, - name=params['name'], - organization=params['organization'], - check_mode=check_mode - ) + username=params["username"], + password=params["password"], + access_token=params["access_token"], + api_url=params["api_url"], + ) + if params["state"] == "absent": + return delete_repo(gh=gh, name=params["name"], organization=params["organization"], check_mode=check_mode) else: return create_repo( gh=gh, - name=params['name'], - organization=params['organization'], - private=params['private'], - description=params['description'], - check_mode=check_mode + name=params["name"], + organization=params["organization"], + private=params["private"], + description=params["description"], + check_mode=check_mode, ) def main(): module_args = dict( - username=dict(type='str'), - password=dict(type='str', no_log=True), - access_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - state=dict(type='str', default="present", - choices=["present", "absent"]), - organization=dict(type='str', ), - private=dict(type='bool'), - description=dict(type='str'), - api_url=dict(type='str', default='https://api.github.com'), - force_defaults=dict(type='bool'), + username=dict(type="str"), + password=dict(type="str", no_log=True), + access_token=dict(type="str", no_log=True), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + organization=dict( + type="str", + ), + private=dict(type="bool"), + description=dict(type="str"), + api_url=dict(type="str", default="https://api.github.com"), + force_defaults=dict(type="bool"), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - required_together=[('username', 'password')], - required_one_of=[('username', 'access_token')], - mutually_exclusive=[('username', 'access_token')] + required_together=[("username", "password")], + required_one_of=[("username", "access_token")], + mutually_exclusive=[("username", "access_token")], ) - if module.params['force_defaults'] is None: - module.deprecate("'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", - version="13.0.0", collection_name="community.general") - module.params['force_defaults'] = True + if module.params["force_defaults"] is None: + module.deprecate( + "'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", + version="13.0.0", + collection_name="community.general", + ) + module.params["force_defaults"] = True if not HAS_GITHUB_PACKAGE: - module.fail_json(msg=missing_required_lib( - "PyGithub"), exception=GITHUB_IMP_ERR) + module.fail_json(msg=missing_required_lib("PyGithub"), exception=GITHUB_IMP_ERR) try: result = run_module(module.params, module.check_mode) @@ -274,5 +276,5 @@ def main(): module.fail_json(msg=f"Unexpected error. {e}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py index eef360ff623..dcb3d2cc959 100644 --- a/plugins/modules/github_webhook.py +++ b/plugins/modules/github_webhook.py @@ -143,6 +143,7 @@ GITHUB_IMP_ERR = None try: import github + HAS_GITHUB = True except ImportError: GITHUB_IMP_ERR = traceback.format_exc() @@ -155,7 +156,7 @@ def _create_hook_config(module): hook_config = { "url": module.params["url"], "content_type": module.params["content_type"], - "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" + "insecure_ssl": "1" if module.params["insecure_ssl"] else "0", } secret = module.params.get("secret") @@ -169,10 +170,8 @@ def create_hook(repo, module): config = _create_hook_config(module) try: hook = repo.create_hook( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) + name="web", config=config, events=module.params["events"], active=module.params["active"] + ) except github.GithubException as err: module.fail_json(msg=f"Unable to create hook for repository {repo.full_name}: {err}") @@ -184,11 +183,7 @@ def update_hook(repo, hook, module): config = _create_hook_config(module) try: hook.update() - hook.edit( - name="web", - config=config, - events=module.params["events"], - active=module.params["active"]) + hook.edit(name="web", config=config, events=module.params["events"], active=module.params["active"]) changed = hook.update() except github.GithubException as err: @@ -201,32 +196,36 @@ def update_hook(repo, hook, module): def main(): module = AnsibleModule( argument_spec=dict( - repository=dict(type='str', required=True, aliases=['repo']), - url=dict(type='str', required=True), - content_type=dict(type='str', choices=('json', 'form'), default='form'), - secret=dict(type='str', no_log=True), - insecure_ssl=dict(type='bool', default=False), - events=dict(type='list', elements='str', ), - active=dict(type='bool', default=True), - state=dict(type='str', choices=('absent', 'present'), default='present'), - user=dict(type='str', required=True), - password=dict(type='str', no_log=True), - token=dict(type='str', no_log=True), - github_url=dict(type='str', default="https://api.github.com")), - mutually_exclusive=(('password', 'token'),), + repository=dict(type="str", required=True, aliases=["repo"]), + url=dict(type="str", required=True), + content_type=dict(type="str", choices=("json", "form"), default="form"), + secret=dict(type="str", no_log=True), + insecure_ssl=dict(type="bool", default=False), + events=dict( + type="list", + elements="str", + ), + active=dict(type="bool", default=True), + state=dict(type="str", choices=("absent", "present"), default="present"), + user=dict(type="str", required=True), + password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + github_url=dict(type="str", default="https://api.github.com"), + ), + mutually_exclusive=(("password", "token"),), required_one_of=(("password", "token"),), required_if=(("state", "present", ("events",)),), ) if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) + module.fail_json(msg=missing_required_lib("PyGithub"), exception=GITHUB_IMP_ERR) try: github_conn = github.Github( module.params["user"], module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) + base_url=module.params["github_url"], + ) except github.GithubException as err: module.fail_json(msg=f"Could not connect to GitHub at {module.params['github_url']}: {err}") @@ -236,11 +235,13 @@ def main(): module.fail_json(msg=f"Could not authenticate to GitHub at {module.params['github_url']}: {err}") except github.UnknownObjectException as err: module.fail_json( - msg=f"Could not find repository {module.params['repository']} in GitHub at {module.params['github_url']}: {err}") + msg=f"Could not find repository {module.params['repository']} in GitHub at {module.params['github_url']}: {err}" + ) except Exception as err: module.fail_json( msg=f"Could not fetch repository {module.params['repository']} from GitHub at {module.params['github_url']}: {err}", - exception=traceback.format_exc()) + exception=traceback.format_exc(), + ) hook = None try: @@ -260,8 +261,7 @@ def main(): try: hook.delete() except github.GithubException as err: - module.fail_json( - msg=f"Unable to delete hook from repository {repo.full_name}: {err}") + module.fail_json(msg=f"Unable to delete hook from repository {repo.full_name}: {err}") else: changed = True elif hook is not None and module.params["state"] == "present": @@ -271,5 +271,5 @@ def main(): module.exit_json(changed=changed, **data) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py index 0eff211fe47..e7b6a01c839 100644 --- a/plugins/modules/github_webhook_info.py +++ b/plugins/modules/github_webhook_info.py @@ -92,6 +92,7 @@ GITHUB_IMP_ERR = None try: import github + HAS_GITHUB = True except ImportError: GITHUB_IMP_ERR = traceback.format_exc() @@ -119,25 +120,26 @@ def _munge_hook(hook_obj): def main(): module = AnsibleModule( argument_spec=dict( - repository=dict(type='str', required=True, aliases=["repo"]), - user=dict(type='str', required=True), - password=dict(type='str', no_log=True), - token=dict(type='str', no_log=True), - github_url=dict( - type='str', default="https://api.github.com")), - mutually_exclusive=(('password', 'token'), ), - required_one_of=(("password", "token"), ), - supports_check_mode=True) + repository=dict(type="str", required=True, aliases=["repo"]), + user=dict(type="str", required=True), + password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + github_url=dict(type="str", default="https://api.github.com"), + ), + mutually_exclusive=(("password", "token"),), + required_one_of=(("password", "token"),), + supports_check_mode=True, + ) if not HAS_GITHUB: - module.fail_json(msg=missing_required_lib('PyGithub'), - exception=GITHUB_IMP_ERR) + module.fail_json(msg=missing_required_lib("PyGithub"), exception=GITHUB_IMP_ERR) try: github_conn = github.Github( module.params["user"], module.params.get("password") or module.params.get("token"), - base_url=module.params["github_url"]) + base_url=module.params["github_url"], + ) except github.GithubException as err: module.fail_json(msg=f"Could not connect to GitHub at {module.params['github_url']}: {err}") @@ -147,21 +149,24 @@ def main(): module.fail_json(msg=f"Could not authenticate to GitHub at {module.params['github_url']}: {err}") except github.UnknownObjectException as err: module.fail_json( - msg=f"Could not find repository {module.params['repository']} in GitHub at {module.params['github_url']}: {err}") + msg=f"Could not find repository {module.params['repository']} in GitHub at {module.params['github_url']}: {err}" + ) except Exception as err: module.fail_json( msg=f"Could not fetch repository {module.params['repository']} from GitHub at {module.params['github_url']}: {err}", - exception=traceback.format_exc()) + exception=traceback.format_exc(), + ) try: hooks = [_munge_hook(h) for h in repo.get_hooks()] except github.GithubException as err: module.fail_json( msg=f"Unable to get hooks from repository {module.params['repository']}: {err}", - exception=traceback.format_exc()) + exception=traceback.format_exc(), + ) module.exit_json(changed=False, hooks=hooks) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py index a44093ba18a..2beb2322b0d 100644 --- a/plugins/modules/gitlab_branch.py +++ b/plugins/modules/gitlab_branch.py @@ -80,12 +80,13 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab + auth_argument_spec, + gitlab_authentication, + gitlab, ) class GitlabBranch: - def __init__(self, module, project, gitlab_instance): self.repo = gitlab_instance self._module = module @@ -104,7 +105,7 @@ def get_branch(self, branch): return False def create_branch(self, branch, ref_branch): - return self.project.branches.create({'branch': branch, 'ref': ref_branch}) + return self.project.branches.create({"branch": branch, "ref": ref_branch}) def delete_branch(self, branch): return branch.delete() @@ -114,46 +115,44 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=True), - branch=dict(type='str', required=True), - ref_branch=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + branch=dict(type="str", required=True), + ref_branch=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], required_if=[ - ['state', 'present', ['ref_branch'], True], + ["state", "present", ["ref_branch"], True], ], - supports_check_mode=False + supports_check_mode=False, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - project = module.params['project'] - branch = module.params['branch'] - ref_branch = module.params['ref_branch'] - state = module.params['state'] + project = module.params["project"] + branch = module.params["branch"] + ref_branch = module.params["ref_branch"] + state = module.params["state"] gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + if LooseVersion(gitlab_version) < LooseVersion("2.3.0"): module.fail_json( msg=f"community.general.gitlab_branch requires python-gitlab Python module >= 2.3.0 (installed version: [{gitlab_version}])." - " Please upgrade python-gitlab to version 2.3.0 or above." + " Please upgrade python-gitlab to version 2.3.0 or above." ) this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) @@ -178,5 +177,5 @@ def main(): module.exit_json(changed=False, msg="No changes are needed.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py index b5ea900f79e..74dadf518ce 100644 --- a/plugins/modules/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -116,7 +116,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_project, gitlab_authentication, gitlab, list_all_kwargs + auth_argument_spec, + find_project, + gitlab_authentication, + gitlab, + list_all_kwargs, ) @@ -126,13 +130,14 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.deploy_key_object = None - ''' + """ @param project Project object @param key_title Title of the key @param key_key String of the key @param key_can_push Option of the deploy_key @param options Deploy key options - ''' + """ + def create_or_update_deploy_key(self, project, key_title, key_key, options): changed = False @@ -146,15 +151,14 @@ def create_or_update_deploy_key(self, project, key_title, key_key, options): # Because we have already call exists_deploy_key in main() if self.deploy_key_object is None: - deploy_key = self.create_deploy_key(project, { - 'title': key_title, - 'key': key_key, - 'can_push': options['can_push']}) + deploy_key = self.create_deploy_key( + project, {"title": key_title, "key": key_key, "can_push": options["can_push"]} + ) changed = True else: - changed, deploy_key = self.update_deploy_key(self.deploy_key_object, { - 'title': key_title, - 'can_push': options['can_push']}) + changed, deploy_key = self.update_deploy_key( + self.deploy_key_object, {"title": key_title, "can_push": options["can_push"]} + ) self.deploy_key_object = deploy_key if changed: @@ -169,25 +173,27 @@ def create_or_update_deploy_key(self, project, key_title, key_key, options): else: return False - ''' + """ @param project Project Object @param arguments Attributes of the deploy_key - ''' + """ + def create_deploy_key(self, project, arguments): if self._module.check_mode: return True try: deploy_key = project.keys.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create deploy key: {e} ") return deploy_key - ''' + """ @param deploy_key Deploy Key Object @param arguments Attributes of the deploy_key - ''' + """ + def update_deploy_key(self, deploy_key, arguments): changed = False @@ -199,19 +205,21 @@ def update_deploy_key(self, deploy_key, arguments): return (changed, deploy_key) - ''' + """ @param project Project object @param key_title Title of the key - ''' + """ + def find_deploy_key(self, project, key_title): for deploy_key in project.keys.list(**list_all_kwargs): if deploy_key.title == key_title: return deploy_key - ''' + """ @param project Project object @param key_title Title of the key - ''' + """ + def exists_deploy_key(self, project, key_title): # When project exists, object will be stored in self.project_object. deploy_key = self.find_deploy_key(project, key_title) @@ -230,40 +238,38 @@ def delete_deploy_key(self): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=False), - can_push=dict(type='bool', default=False), - title=dict(type='str', required=True) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + key=dict(type="str", required=True, no_log=False), + can_push=dict(type="bool", default=False), + title=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], + required_together=[["api_username", "api_password"]], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - state = module.params['state'] - project_identifier = module.params['project'] - key_title = module.params['title'] - key_keyfile = module.params['key'] - key_can_push = module.params['can_push'] + state = module.params["state"] + project_identifier = module.params["project"] + key_title = module.params["title"] + key_keyfile = module.params["key"] + key_can_push = module.params["can_push"] gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) @@ -274,22 +280,27 @@ def main(): deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title) - if state == 'absent': + if state == "absent": if deploy_key_exists: gitlab_deploy_key.delete_deploy_key() module.exit_json(changed=True, msg=f"Successfully deleted deploy key {key_title}") else: module.exit_json(changed=False, msg="Deploy key deleted or does not exists") - if state == 'present': - if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}): - - module.exit_json(changed=True, msg=f"Successfully created or updated the deploy key {key_title}", - deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + if state == "present": + if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {"can_push": key_can_push}): + module.exit_json( + changed=True, + msg=f"Successfully created or updated the deploy key {key_title}", + deploy_key=gitlab_deploy_key.deploy_key_object._attrs, + ) else: - module.exit_json(changed=False, msg=f"No need to update the deploy key {key_title}", - deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + module.exit_json( + changed=False, + msg=f"No need to update the deploy key {key_title}", + deploy_key=gitlab_deploy_key.deploy_key_object._attrs, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index 02c01511afd..c55a85eebbb 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -248,7 +248,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, gitlab_authentication, gitlab + auth_argument_spec, + find_group, + gitlab_authentication, + gitlab, ) @@ -258,60 +261,62 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.group_object = None - ''' + """ @param group Group object - ''' + """ + def get_group_id(self, group): if group is not None: return group.id return None - ''' + """ @param name Name of the group @param parent Parent group full path @param options Group options - ''' + """ + def create_or_update_group(self, name, parent, options): changed = False payload = { - 'auto_devops_enabled': options['auto_devops_enabled'], - 'default_branch': options['default_branch'], - 'description': options['description'], - 'lfs_enabled': options['lfs_enabled'], - 'membership_lock': options['membership_lock'], - 'mentions_disabled': options['mentions_disabled'], - 'name': name, - 'path': options['path'], - 'prevent_forking_outside_group': options['prevent_forking_outside_group'], - 'project_creation_level': options['project_creation_level'], - 'request_access_enabled': options['request_access_enabled'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - 'share_with_group_lock': options['share_with_group_lock'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'visibility': options['visibility'], - 'wiki_access_level': options['wiki_access_level'], + "auto_devops_enabled": options["auto_devops_enabled"], + "default_branch": options["default_branch"], + "description": options["description"], + "lfs_enabled": options["lfs_enabled"], + "membership_lock": options["membership_lock"], + "mentions_disabled": options["mentions_disabled"], + "name": name, + "path": options["path"], + "prevent_forking_outside_group": options["prevent_forking_outside_group"], + "project_creation_level": options["project_creation_level"], + "request_access_enabled": options["request_access_enabled"], + "require_two_factor_authentication": options["require_two_factor_authentication"], + "share_with_group_lock": options["share_with_group_lock"], + "subgroup_creation_level": options["subgroup_creation_level"], + "visibility": options["visibility"], + "wiki_access_level": options["wiki_access_level"], } - if options.get('enabled_git_access_protocol') and parent is None: - payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] - if options.get('lock_duo_features_enabled') and parent is None: - payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] - if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: - payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] - if options.get('service_access_tokens_expiration_enforced') and parent is None: - payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] - if options.get('two_factor_grace_period'): - payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + if options.get("enabled_git_access_protocol") and parent is None: + payload["enabled_git_access_protocol"] = options["enabled_git_access_protocol"] + if options.get("lock_duo_features_enabled") and parent is None: + payload["lock_duo_features_enabled"] = options["lock_duo_features_enabled"] + if options.get("prevent_sharing_groups_outside_hierarchy") and parent is None: + payload["prevent_sharing_groups_outside_hierarchy"] = options["prevent_sharing_groups_outside_hierarchy"] + if options.get("service_access_tokens_expiration_enforced") and parent is None: + payload["service_access_tokens_expiration_enforced"] = options["service_access_tokens_expiration_enforced"] + if options.get("two_factor_grace_period"): + payload["two_factor_grace_period"] = int(options["two_factor_grace_period"]) # Because we have already call userExists in main() if self.group_object is None: - payload['parent_id'] = self.get_group_id(parent) + payload["parent_id"] = self.get_group_id(parent) group = self.create_group(payload) # add avatar to group - if options['avatar_path']: + if options["avatar_path"]: try: - group.avatar = open(options['avatar_path'], 'rb') + group.avatar = open(options["avatar_path"], "rb") except IOError as e: self._module.fail_json(msg=f"Cannot open {options['avatar_path']}: {e}") changed = True @@ -331,9 +336,10 @@ def create_or_update_group(self, name, parent, options): else: return False - ''' + """ @param arguments Attributes of the group - ''' + """ + def create_group(self, arguments): if self._module.check_mode: return True @@ -343,15 +349,16 @@ def create_group(self, arguments): filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} group = self._gitlab.groups.create(filtered) - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create group: {e} ") return group - ''' + """ @param group Group Object @param arguments Attributes of the group - ''' + """ + def update_group(self, group, arguments): changed = False @@ -363,17 +370,20 @@ def update_group(self, group, arguments): return (changed, group) - ''' + """ @param force To delete even if projects inside - ''' + """ + def delete_group(self, force=False): group = self.group_object if not force and len(group.projects.list(all=False)) >= 1: self._module.fail_json( - msg=("There are still projects in this group. " - "These needs to be moved or deleted before this group can be removed. " - "Use 'force_delete' to 'true' to force deletion of existing projects.") + msg=( + "There are still projects in this group. " + "These needs to be moved or deleted before this group can be removed. " + "Use 'force_delete' to 'true' to force deletion of existing projects." + ) ) else: if self._module.check_mode: @@ -384,10 +394,11 @@ def delete_group(self, force=False): except Exception as e: self._module.fail_json(msg=f"Failed to delete group: {e} ") - ''' + """ @param name Name of the group @param full_path Complete path of the Group including parent group path. / - ''' + """ + def exists_group(self, project_identifier): # When group/user exists, object will be stored in self.group_object. group = find_group(self._gitlab, project_identifier) @@ -400,80 +411,80 @@ def exists_group(self, project_identifier): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - auto_devops_enabled=dict(type='bool'), - avatar_path=dict(type='path'), - default_branch=dict(type='str'), - description=dict(type='str'), - enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), - force_delete=dict(type='bool', default=False), - lfs_enabled=dict(type='bool'), - lock_duo_features_enabled=dict(type='bool'), - membership_lock=dict(type='bool'), - mentions_disabled=dict(type='bool'), - name=dict(type='str', required=True), - parent=dict(type='str'), - path=dict(type='str'), - prevent_forking_outside_group=dict(type='bool'), - prevent_sharing_groups_outside_hierarchy=dict(type='bool'), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), - request_access_enabled=dict(type='bool'), - require_two_factor_authentication=dict(type='bool'), - service_access_tokens_expiration_enforced=dict(type='bool'), - share_with_group_lock=dict(type='bool'), - state=dict(type='str', default="present", choices=["absent", "present"]), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - two_factor_grace_period=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), - )) + argument_spec.update( + dict( + auto_devops_enabled=dict(type="bool"), + avatar_path=dict(type="path"), + default_branch=dict(type="str"), + description=dict(type="str"), + enabled_git_access_protocol=dict(type="str", choices=["all", "ssh", "http"]), + force_delete=dict(type="bool", default=False), + lfs_enabled=dict(type="bool"), + lock_duo_features_enabled=dict(type="bool"), + membership_lock=dict(type="bool"), + mentions_disabled=dict(type="bool"), + name=dict(type="str", required=True), + parent=dict(type="str"), + path=dict(type="str"), + prevent_forking_outside_group=dict(type="bool"), + prevent_sharing_groups_outside_hierarchy=dict(type="bool"), + project_creation_level=dict(type="str", choices=["developer", "maintainer", "noone"]), + request_access_enabled=dict(type="bool"), + require_two_factor_authentication=dict(type="bool"), + service_access_tokens_expiration_enforced=dict(type="bool"), + share_with_group_lock=dict(type="bool"), + state=dict(type="str", default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type="str", choices=["maintainer", "owner"]), + two_factor_grace_period=dict(type="str"), + visibility=dict(type="str", default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type="str", choices=["enabled", "private", "disabled"]), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_token', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_token'], + ["api_token", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_token"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - auto_devops_enabled = module.params['auto_devops_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - description = module.params['description'] - enabled_git_access_protocol = module.params['enabled_git_access_protocol'] - force_delete = module.params['force_delete'] - group_name = module.params['name'] - group_path = module.params['path'] - group_visibility = module.params['visibility'] - lfs_enabled = module.params['lfs_enabled'] - lock_duo_features_enabled = module.params['lock_duo_features_enabled'] - membership_lock = module.params['membership_lock'] - mentions_disabled = module.params['mentions_disabled'] - parent_identifier = module.params['parent'] - prevent_forking_outside_group = module.params['prevent_forking_outside_group'] - prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] - project_creation_level = module.params['project_creation_level'] - request_access_enabled = module.params['request_access_enabled'] - require_two_factor_authentication = module.params['require_two_factor_authentication'] - service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] - share_with_group_lock = module.params['share_with_group_lock'] - state = module.params['state'] - subgroup_creation_level = module.params['subgroup_creation_level'] - two_factor_grace_period = module.params['two_factor_grace_period'] - wiki_access_level = module.params['wiki_access_level'] + auto_devops_enabled = module.params["auto_devops_enabled"] + avatar_path = module.params["avatar_path"] + default_branch = module.params["default_branch"] + description = module.params["description"] + enabled_git_access_protocol = module.params["enabled_git_access_protocol"] + force_delete = module.params["force_delete"] + group_name = module.params["name"] + group_path = module.params["path"] + group_visibility = module.params["visibility"] + lfs_enabled = module.params["lfs_enabled"] + lock_duo_features_enabled = module.params["lock_duo_features_enabled"] + membership_lock = module.params["membership_lock"] + mentions_disabled = module.params["mentions_disabled"] + parent_identifier = module.params["parent"] + prevent_forking_outside_group = module.params["prevent_forking_outside_group"] + prevent_sharing_groups_outside_hierarchy = module.params["prevent_sharing_groups_outside_hierarchy"] + project_creation_level = module.params["project_creation_level"] + request_access_enabled = module.params["request_access_enabled"] + require_two_factor_authentication = module.params["require_two_factor_authentication"] + service_access_tokens_expiration_enforced = module.params["service_access_tokens_expiration_enforced"] + share_with_group_lock = module.params["share_with_group_lock"] + state = module.params["state"] + subgroup_creation_level = module.params["subgroup_creation_level"] + two_factor_grace_period = module.params["two_factor_grace_period"] + wiki_access_level = module.params["wiki_access_level"] # Define default group_path based on group_name if group_path is None: @@ -491,41 +502,51 @@ def main(): else: group_exists = gitlab_group.exists_group(group_path) - if state == 'absent': + if state == "absent": if group_exists: gitlab_group.delete_group(force=force_delete) module.exit_json(changed=True, msg=f"Successfully deleted group {group_name}") else: module.exit_json(changed=False, msg="Group deleted or does not exist") - if state == 'present': - if gitlab_group.create_or_update_group(group_name, parent_group, { - "auto_devops_enabled": auto_devops_enabled, - "avatar_path": avatar_path, - "default_branch": default_branch, - "description": description, - "enabled_git_access_protocol": enabled_git_access_protocol, - "lfs_enabled": lfs_enabled, - "lock_duo_features_enabled": lock_duo_features_enabled, - "membership_lock": membership_lock, - "mentions_disabled": mentions_disabled, - "path": group_path, - "prevent_forking_outside_group": prevent_forking_outside_group, - "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, - "project_creation_level": project_creation_level, - "request_access_enabled": request_access_enabled, - "require_two_factor_authentication": require_two_factor_authentication, - "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, - "share_with_group_lock": share_with_group_lock, - "subgroup_creation_level": subgroup_creation_level, - "two_factor_grace_period": two_factor_grace_period, - "visibility": group_visibility, - "wiki_access_level": wiki_access_level, - }): - module.exit_json(changed=True, msg=f"Successfully created or updated the group {group_name}", group=gitlab_group.group_object._attrs) + if state == "present": + if gitlab_group.create_or_update_group( + group_name, + parent_group, + { + "auto_devops_enabled": auto_devops_enabled, + "avatar_path": avatar_path, + "default_branch": default_branch, + "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, + "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, + "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, + "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, + "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, + "visibility": group_visibility, + "wiki_access_level": wiki_access_level, + }, + ): + module.exit_json( + changed=True, + msg=f"Successfully created or updated the group {group_name}", + group=gitlab_group.group_object._attrs, + ) else: - module.exit_json(changed=False, msg=f"No need to update the group {group_name}", group=gitlab_group.group_object._attrs) + module.exit_json( + changed=False, msg=f"No need to update the group {group_name}", group=gitlab_group.group_object._attrs + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py index 2c430c1603d..5a2a6695709 100644 --- a/plugins/modules/gitlab_group_access_token.py +++ b/plugins/modules/gitlab_group_access_token.py @@ -160,7 +160,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, gitlab_authentication, gitlab + auth_argument_spec, + find_group, + gitlab_authentication, + gitlab, ) ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) @@ -172,11 +175,12 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.access_token_object = None - ''' + """ @param project Project Object @param group Group Object @param arguments Attributes of the access_token - ''' + """ + def create_access_token(self, group, arguments): changed = False if self._module.check_mode: @@ -185,18 +189,19 @@ def create_access_token(self, group, arguments): try: self.access_token_object = group.access_tokens.create(arguments) changed = True - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create access token: {e} ") return changed - ''' + """ @param project Project object @param group Group Object @param name of the access token - ''' + """ + def find_access_token(self, group, name): - access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, "revoked", False)] for access_token in access_tokens: if access_token.name == name: self.access_token_object = access_token @@ -211,19 +216,19 @@ def revoke_access_token(self): try: self.access_token_object.delete() changed = True - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to revoke access token: {e} ") return changed def access_tokens_equal(self): - if self.access_token_object.name != self._module.params['name']: + if self.access_token_object.name != self._module.params["name"]: return False - if self.access_token_object.scopes != self._module.params['scopes']: + if self.access_token_object.scopes != self._module.params["scopes"]: return False - if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params["access_level"]]: return False - if self.access_token_object.expires_at != self._module.params['expires_at']: + if self.access_token_object.expires_at != self._module.params["expires_at"]: return False return True @@ -231,62 +236,68 @@ def access_tokens_equal(self): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - group=dict(type='str', required=True), - name=dict(type='str', required=True), - scopes=dict(type='list', - required=True, - aliases=['scope'], - elements='str', - choices=['api', - 'read_api', - 'read_registry', - 'write_registry', - 'read_virtual_registry', - 'write_virtual_registry', - 'read_repository', - 'write_repository', - 'create_runner', - 'manage_runner', - 'ai_features', - 'k8s_proxy', - 'self_rotate']), - access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), - expires_at=dict(type='str', required=True), - recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + group=dict(type="str", required=True), + name=dict(type="str", required=True), + scopes=dict( + type="list", + required=True, + aliases=["scope"], + elements="str", + choices=[ + "api", + "read_api", + "read_registry", + "write_registry", + "read_virtual_registry", + "write_virtual_registry", + "read_repository", + "write_repository", + "create_runner", + "manage_runner", + "ai_features", + "k8s_proxy", + "self_rotate", + ], + ), + access_level=dict( + type="str", + default="maintainer", + choices=["guest", "planner", "reporter", "developer", "maintainer", "owner"], + ), + expires_at=dict(type="str", required=True), + recreate=dict(type="str", default="never", choices=["never", "always", "state_change"]), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'] - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], - supports_check_mode=True + required_together=[["api_username", "api_password"]], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) - state = module.params['state'] - group_identifier = module.params['group'] - name = module.params['name'] - scopes = module.params['scopes'] - access_level_str = module.params['access_level'] - expires_at = module.params['expires_at'] - recreate = module.params['recreate'] + state = module.params["state"] + group_identifier = module.params["group"] + name = module.params["name"] + scopes = module.params["scopes"] + access_level_str = module.params["access_level"] + expires_at = module.params["expires_at"] + recreate = module.params["recreate"] access_level = ACCESS_LEVELS[access_level_str] try: - datetime.strptime(expires_at, '%Y-%m-%d') + datetime.strptime(expires_at, "%Y-%m-%d") except ValueError: module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") @@ -303,36 +314,60 @@ def main(): if gitlab_access_token.access_token_object is not None: gitlab_access_token_exists = True - if state == 'absent': + if state == "absent": if gitlab_access_token_exists: gitlab_access_token.revoke_access_token() module.exit_json(changed=True, msg=f"Successfully deleted access token {name}") else: module.exit_json(changed=False, msg="Access token does not exists") - if state == 'present': + if state == "present": if gitlab_access_token_exists: if gitlab_access_token.access_tokens_equal(): - if recreate == 'always': + if recreate == "always": gitlab_access_token.revoke_access_token() - gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + gitlab_access_token.create_access_token( + group, {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at} + ) + module.exit_json( + changed=True, + msg="Successfully recreated access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + module.exit_json( + changed=False, + msg="Access token already exists", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - if recreate == 'never': - module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + if recreate == "never": + module.fail_json( + msg="Access token already exists and its state is different. It can not be updated without recreating." + ) else: gitlab_access_token.revoke_access_token() - gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + gitlab_access_token.create_access_token( + group, {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at} + ) + module.exit_json( + changed=True, + msg="Successfully recreated access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + gitlab_access_token.create_access_token( + group, {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at} + ) if module.check_mode: module.exit_json(changed=True, msg="Successfully created access token", access_token={}) else: - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + module.exit_json( + changed=True, + msg="Successfully created access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py index 7c9e8ac4640..d44f998a6a0 100644 --- a/plugins/modules/gitlab_group_members.py +++ b/plugins/modules/gitlab_group_members.py @@ -157,7 +157,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs + auth_argument_spec, + gitlab_authentication, + gitlab, + list_all_kwargs, ) @@ -168,19 +171,17 @@ def __init__(self, module, gl): # get user id if the user exists def get_user_id(self, gitlab_user): - return next( - (u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)), - None - ) + return next((u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)), None) # get group id if group exists def get_group_id(self, gitlab_group): return next( ( - g.id for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs) + g.id + for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs) if g.full_path == gitlab_group ), - None + None, ) # get all members in a group @@ -209,8 +210,7 @@ def is_user_a_member(self, members, gitlab_user_id): # add user to a group def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): group = self._gitlab.groups.get(gitlab_group_id) - add_member = group.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) + add_member = group.members.create({"user_id": gitlab_user_id, "access_level": access_level}) # remove user from a group def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): @@ -234,43 +234,49 @@ def update_user_access_level(self, members, gitlab_user_id, access_level): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - gitlab_group=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), - ) - ), - )) + argument_spec.update( + dict( + gitlab_group=dict(type="str", required=True), + gitlab_user=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent"]), + access_level=dict(type="str", choices=["guest", "reporter", "developer", "maintainer", "owner"]), + purge_users=dict( + type="list", elements="str", choices=["guest", "reporter", "developer", "maintainer", "owner"] + ), + gitlab_users_access=dict( + type="list", + elements="dict", + options=dict( + name=dict(type="str", required=True), + access_level=dict( + type="str", choices=["guest", "reporter", "developer", "maintainer", "owner"], required=True + ), + ), + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["gitlab_user", "gitlab_users_access"], + ["access_level", "gitlab_users_access"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], + ["api_username", "api_password"], + ["gitlab_user", "access_level"], ], required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], + ["api_username", "api_token", "api_oauth_token", "api_job_token"], + ["gitlab_user", "gitlab_users_access"], ], required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ["state", "present", ["access_level", "gitlab_users_access"], True], ], supports_check_mode=True, ) @@ -279,17 +285,17 @@ def main(): gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.const.GUEST_ACCESS, - 'reporter': gitlab.const.REPORTER_ACCESS, - 'developer': gitlab.const.DEVELOPER_ACCESS, - 'maintainer': gitlab.const.MAINTAINER_ACCESS, - 'owner': gitlab.const.OWNER_ACCESS, + "guest": gitlab.const.GUEST_ACCESS, + "reporter": gitlab.const.REPORTER_ACCESS, + "developer": gitlab.const.DEVELOPER_ACCESS, + "maintainer": gitlab.const.MAINTAINER_ACCESS, + "owner": gitlab.const.OWNER_ACCESS, } - gitlab_group = module.params['gitlab_group'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] + gitlab_group = module.params["gitlab_group"] + state = module.params["state"] + access_level = module.params["access_level"] + purge_users = module.params["purge_users"] if purge_users: purge_users = [access_level_int[level] for level in purge_users] @@ -303,27 +309,30 @@ def main(): module.fail_json(msg=f"group '{gitlab_group}' not found.") members = [] - if module.params['gitlab_user'] is not None: + if module.params["gitlab_user"] is not None: gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] + gitlab_users = module.params["gitlab_user"] for gl_user in gitlab_users: - gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] + gitlab_users_access.append( + {"name": gl_user, "access_level": access_level_int[access_level] if access_level else None} + ) + elif module.params["gitlab_users_access"] is not None: + gitlab_users_access = module.params["gitlab_users_access"] for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] + user_level["access_level"] = access_level_int[user_level["access_level"]] if len(gitlab_users_access) == 1 and not purge_users: # only single user given - members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] + members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]["name"]))] if members[0] is None: members = [] elif len(gitlab_users_access) > 1 or purge_users: # list of users given members = group.get_members_in_a_group(gitlab_group_id) else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) + module.exit_json( + changed="OK", result="Nothing to do, please give at least one user or set purge_users true.", result_data=[] + ) changed = False error = False @@ -331,67 +340,115 @@ def main(): changed_data = [] for gitlab_user in gitlab_users_access: - gitlab_user_id = group.get_user_id(gitlab_user['name']) + gitlab_user_id = group.get_user_id(gitlab_user["name"]) # user doesn't exist if not gitlab_user_id: - if state == 'absent': + if state == "absent": changed_users.append(f"user '{gitlab_user['name']}' not found, and thus also not part of the group") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"user '{gitlab_user['name']}' not found, and thus also not part of the group"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"user '{gitlab_user['name']}' not found, and thus also not part of the group", + } + ) else: error = True changed_users.append(f"user '{gitlab_user['name']}' not found.") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"user '{gitlab_user['name']}' not found."}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"user '{gitlab_user['name']}' not found.", + } + ) continue is_user_a_member = group.is_user_a_member(members, gitlab_user_id) # check if the user is a member in the group if not is_user_a_member: - if state == 'present': + if state == "present": # add user to the group try: if not module.check_mode: - group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) + group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user["access_level"]) changed = True changed_users.append(f"Successfully added user '{gitlab_user['name']}' to group") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully added user '{gitlab_user['name']}' to group"}) - except (gitlab.exceptions.GitlabCreateError) as e: + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully added user '{gitlab_user['name']}' to group", + } + ) + except gitlab.exceptions.GitlabCreateError as e: error = True changed_users.append(f"Failed to updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Not allowed to add the access level for the member, {gitlab_user['name']}: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Not allowed to add the access level for the member, {gitlab_user['name']}: {e}", + } + ) # state as absent else: - changed_users.append(f"User, '{gitlab_user['name']}', is not a member in the group. No change to report") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"User, '{gitlab_user['name']}', is not a member in the group. No change to report"}) + changed_users.append( + f"User, '{gitlab_user['name']}', is not a member in the group. No change to report" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"User, '{gitlab_user['name']}', is not a member in the group. No change to report", + } + ) # in case that a user is a member else: - if state == 'present': + if state == "present": # compare the access level user_access_level = group.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append(f"User, '{gitlab_user['name']}', is already a member in the group. No change to report") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"User, '{gitlab_user['name']}', is already a member in the group. No change to report"}) + if user_access_level == gitlab_user["access_level"]: + changed_users.append( + f"User, '{gitlab_user['name']}', is already a member in the group. No change to report" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"User, '{gitlab_user['name']}', is already a member in the group. No change to report", + } + ) else: # update the access level for the user try: if not module.check_mode: - group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + group.update_user_access_level(members, gitlab_user_id, gitlab_user["access_level"]) changed = True - changed_users.append(f"Successfully updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully updated the access level for the user, '{gitlab_user['name']}'"}) - except (gitlab.exceptions.GitlabUpdateError) as e: + changed_users.append( + f"Successfully updated the access level for the user, '{gitlab_user['name']}'" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully updated the access level for the user, '{gitlab_user['name']}'", + } + ) + except gitlab.exceptions.GitlabUpdateError as e: error = True - changed_users.append(f"Failed to updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Not allowed to update the access level for the member, {gitlab_user['name']}: {e}"}) + changed_users.append( + f"Failed to updated the access level for the user, '{gitlab_user['name']}'" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Not allowed to update the access level for the member, {gitlab_user['name']}: {e}", + } + ) else: # remove the user from the group try: @@ -399,43 +456,70 @@ def main(): group.remove_user_from_group(gitlab_user_id, gitlab_group_id) changed = True changed_users.append(f"Successfully removed user, '{gitlab_user['name']}', from the group") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully removed user, '{gitlab_user['name']}', from the group"}) - except (gitlab.exceptions.GitlabDeleteError) as e: + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully removed user, '{gitlab_user['name']}', from the group", + } + ) + except gitlab.exceptions.GitlabDeleteError as e: error = True changed_users.append(f"Failed to removed user, '{gitlab_user['name']}', from the group") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Failed to remove user, '{gitlab_user['name']}' from the group: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Failed to remove user, '{gitlab_user['name']}' from the group: {e}", + } + ) # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: + if state == "present" and purge_users: uppercase_names_in_gitlab_users_access = [] for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + uppercase_names_in_gitlab_users_access.append(name["name"].upper()) for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + if ( + member.access_level in purge_users + and member.username.upper() not in uppercase_names_in_gitlab_users_access + ): try: if not module.check_mode: group.remove_user_from_group(member.id, gitlab_group_id) changed = True - changed_users.append(f"Successfully removed user '{member.username}', from group. Was not in given list") - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': f"Successfully removed user '{member.username}', from group. Was not in given list"}) - except (gitlab.exceptions.GitlabDeleteError) as e: + changed_users.append( + f"Successfully removed user '{member.username}', from group. Was not in given list" + ) + changed_data.append( + { + "gitlab_user": member.username, + "result": "CHANGED", + "msg": f"Successfully removed user '{member.username}', from group. Was not in given list", + } + ) + except gitlab.exceptions.GitlabDeleteError as e: error = True changed_users.append(f"Failed to removed user, '{gitlab_user['name']}', from the group") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Failed to remove user, '{gitlab_user['name']}' from the group: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Failed to remove user, '{gitlab_user['name']}' from the group: {e}", + } + ) if len(gitlab_users_access) == 1 and error: # if single user given and an error occurred return error for list errors will be per user module.fail_json(msg=f"FAILED: '{changed_users[0]} '", result_data=changed_data) elif error: - module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + module.fail_json(msg="FAILED: At least one given user/permission could not be set", result_data=changed_data) - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + module.exit_json( + changed=changed, msg="Successfully set memberships", result="\n".join(changed_users), result_data=changed_data + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py index 3fa1343c210..0e3203faa66 100644 --- a/plugins/modules/gitlab_group_variable.py +++ b/plugins/modules/gitlab_group_variable.py @@ -220,16 +220,18 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, - list_all_kwargs + auth_argument_spec, + gitlab_authentication, + filter_returned_variables, + vars_to_variables, + list_all_kwargs, ) class GitlabGroupVariables: - def __init__(self, module, gitlab_instance): self.repo = gitlab_instance - self.group = self.get_group(module.params['group']) + self.group = self.get_group(module.params["group"]) self._module = module def get_group(self, group_name): @@ -242,17 +244,17 @@ def create_variable(self, var_obj): if self._module.check_mode: return True var = { - "key": var_obj.get('key'), - "value": var_obj.get('value'), - "description": var_obj.get('description'), - "masked": var_obj.get('masked'), - "masked_and_hidden": var_obj.get('hidden'), - "protected": var_obj.get('protected'), - "raw": var_obj.get('raw'), - "variable_type": var_obj.get('variable_type'), + "key": var_obj.get("key"), + "value": var_obj.get("value"), + "description": var_obj.get("description"), + "masked": var_obj.get("masked"), + "masked_and_hidden": var_obj.get("hidden"), + "protected": var_obj.get("protected"), + "raw": var_obj.get("raw"), + "variable_type": var_obj.get("variable_type"), } - if var_obj.get('environment_scope') is not None: - var["environment_scope"] = var_obj.get('environment_scope') + if var_obj.get("environment_scope") is not None: + var["environment_scope"] = var_obj.get("environment_scope") self.group.variables.create(var) return True @@ -267,7 +269,7 @@ def update_variable(self, var_obj): def delete_variable(self, var_obj): if self._module.check_mode: return True - self.group.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + self.group.variables.delete(var_obj.get("key"), filter={"environment_scope": var_obj.get("environment_scope")}) return True @@ -283,16 +285,16 @@ def compare(requested_variables, existing_variables, state): updated = list() added = list() - if state == 'present': + if state == "present": existing_key_scope_vars = list() for item in existing_variables: - existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + existing_key_scope_vars.append({"key": item.get("key"), "environment_scope": item.get("environment_scope")}) for var in requested_variables: if var in existing_variables: untouched.append(var) else: - compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + compare_item = {"key": var.get("name"), "environment_scope": var.get("environment_scope")} if compare_item in existing_key_scope_vars: updated.append(var) else: @@ -302,7 +304,6 @@ def compare(requested_variables, existing_variables, state): def native_python_main(this_gitlab, purge, requested_variables, state, module): - change = False return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) @@ -313,34 +314,34 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): existing_variables = filter_returned_variables(gitlab_keys) for item in requested_variables: - item['key'] = item.pop('name') - item['value'] = str(item.get('value')) - if item.get('protected') is None: - item['protected'] = False - if item.get('raw') is None: - item['raw'] = False - if item.get('masked') is None: - item['masked'] = False - if item.get('hidden') is None: - item['hidden'] = False - if item.get('environment_scope') is None: - item['environment_scope'] = '*' - if item.get('variable_type') is None: - item['variable_type'] = 'env_var' + item["key"] = item.pop("name") + item["value"] = str(item.get("value")) + if item.get("protected") is None: + item["protected"] = False + if item.get("raw") is None: + item["raw"] = False + if item.get("masked") is None: + item["masked"] = False + if item.get("hidden") is None: + item["hidden"] = False + if item.get("environment_scope") is None: + item["environment_scope"] = "*" + if item.get("variable_type") is None: + item["variable_type"] = "env_var" if module.check_mode: untouched, updated, added = compare(requested_variables, existing_variables, state) - if state == 'present': + if state == "present": add_or_update = [x for x in requested_variables if x not in existing_variables] for item in add_or_update: try: if this_gitlab.create_variable(item): - return_value['added'].append(item) + return_value["added"].append(item) except Exception: if this_gitlab.update_variable(item): - return_value['updated'].append(item) + return_value["updated"].append(item) if purge: # refetch and filter @@ -350,11 +351,11 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove = [x for x in existing_variables if x not in requested_variables] for item in remove: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) - elif state == 'absent': + elif state == "absent": # value, type, and description do not matter on removing variables. - keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + keys_ignored_on_deletion = ["value", "variable_type", "description"] for key in keys_ignored_on_deletion: for item in existing_variables: item.pop(key) @@ -365,17 +366,17 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove_requested = [x for x in requested_variables if x in existing_variables] for item in remove_requested: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) else: for item in existing_variables: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) if module.check_mode: - return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + return_value = dict(added=added, updated=updated, removed=return_value["removed"], untouched=untouched) - if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + if len(return_value["added"] + return_value["removed"] + return_value["updated"]) > 0: change = True gitlab_keys = this_gitlab.list_all_group_variables() @@ -388,59 +389,62 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - group=dict(type='str', required=True), - purge=dict(type='bool', default=False), - vars=dict(type='dict', default=dict(), no_log=True), + group=dict(type="str", required=True), + purge=dict(type="bool", default=False), + vars=dict(type="dict", default=dict(), no_log=True), # please mind whenever changing the variables dict to also change module_utils/gitlab.py's # KNOWN dict in filter_returned_variables or bad evil will happen - variables=dict(type='list', elements='dict', default=list(), options=dict( - name=dict(type='str', required=True), - value=dict(type='str', no_log=True), - description=dict(type='str'), - masked=dict(type='bool', default=False), - hidden=dict(type='bool', default=False), - protected=dict(type='bool', default=False), - raw=dict(type='bool', default=False), - environment_scope=dict(type='str', default='*'), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) - )), - state=dict(type='str', default="present", choices=["absent", "present"]), + variables=dict( + type="list", + elements="dict", + default=list(), + options=dict( + name=dict(type="str", required=True), + value=dict(type="str", no_log=True), + description=dict(type="str"), + masked=dict(type="bool", default=False), + hidden=dict(type="bool", default=False), + protected=dict(type="bool", default=False), + raw=dict(type="bool", default=False), + environment_scope=dict(type="str", default="*"), + variable_type=dict(type="str", default="env_var", choices=["env_var", "file"]), + ), + ), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['vars', 'variables'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["vars", "variables"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] + purge = module.params["purge"] + var_list = module.params["vars"] + state = module.params["state"] if var_list: variables = vars_to_variables(var_list, module) else: - variables = module.params['variables'] + variables = module.params["variables"] - if state == 'present': - if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') + if state == "present": + if any(x["value"] is None for x in variables): + module.fail_json(msg="value parameter is required in state present") this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) @@ -448,25 +452,25 @@ def main(): # postprocessing for item in after: - item.pop('group_id') - item['name'] = item.pop('key') + item.pop("group_id") + item["name"] = item.pop("key") for item in before: - item.pop('group_id') - item['name'] = item.pop('key') + item.pop("group_id") + item["name"] = item.pop("key") - untouched_key_name = 'key' + untouched_key_name = "key" if not module.check_mode: - untouched_key_name = 'name' - raw_return_value['untouched'] = [x for x in before if x in after] + untouched_key_name = "name" + raw_return_value["untouched"] = [x for x in before if x in after] - added = [x.get('key') for x in raw_return_value['added']] - updated = [x.get('key') for x in raw_return_value['updated']] - removed = [x.get('key') for x in raw_return_value['removed']] - untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + added = [x.get("key") for x in raw_return_value["added"]] + updated = [x.get("key") for x in raw_return_value["updated"]] + removed = [x.get("key") for x in raw_return_value["removed"]] + untouched = [x.get(untouched_key_name) for x in raw_return_value["untouched"]] return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) module.exit_json(changed=changed, group_variable=return_value) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py index 43101c1c2e8..9b8ab8efef9 100644 --- a/plugins/modules/gitlab_hook.py +++ b/plugins/modules/gitlab_hook.py @@ -171,7 +171,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_project, gitlab_authentication, list_all_kwargs + auth_argument_spec, + find_project, + gitlab_authentication, + list_all_kwargs, ) @@ -181,48 +184,55 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.hook_object = None - ''' + """ @param project Project Object @param hook_url Url to call on event @param description Description of the group @param parent Parent group full path - ''' + """ + def create_or_update_hook(self, project, hook_url, options): changed = False # Because we have already call userExists in main() if self.hook_object is None: - hook = self.create_hook(project, { - 'url': hook_url, - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'releases_events': options['releases_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token'], - }) + hook = self.create_hook( + project, + { + "url": hook_url, + "push_events": options["push_events"], + "push_events_branch_filter": options["push_events_branch_filter"], + "issues_events": options["issues_events"], + "merge_requests_events": options["merge_requests_events"], + "tag_push_events": options["tag_push_events"], + "note_events": options["note_events"], + "job_events": options["job_events"], + "pipeline_events": options["pipeline_events"], + "wiki_page_events": options["wiki_page_events"], + "releases_events": options["releases_events"], + "enable_ssl_verification": options["enable_ssl_verification"], + "token": options["token"], + }, + ) changed = True else: - changed, hook = self.update_hook(self.hook_object, { - 'push_events': options['push_events'], - 'push_events_branch_filter': options['push_events_branch_filter'], - 'issues_events': options['issues_events'], - 'merge_requests_events': options['merge_requests_events'], - 'tag_push_events': options['tag_push_events'], - 'note_events': options['note_events'], - 'job_events': options['job_events'], - 'pipeline_events': options['pipeline_events'], - 'wiki_page_events': options['wiki_page_events'], - 'releases_events': options['releases_events'], - 'enable_ssl_verification': options['enable_ssl_verification'], - 'token': options['token'], - }) + changed, hook = self.update_hook( + self.hook_object, + { + "push_events": options["push_events"], + "push_events_branch_filter": options["push_events_branch_filter"], + "issues_events": options["issues_events"], + "merge_requests_events": options["merge_requests_events"], + "tag_push_events": options["tag_push_events"], + "note_events": options["note_events"], + "job_events": options["job_events"], + "pipeline_events": options["pipeline_events"], + "wiki_page_events": options["wiki_page_events"], + "releases_events": options["releases_events"], + "enable_ssl_verification": options["enable_ssl_verification"], + "token": options["token"], + }, + ) self.hook_object = hook if changed: @@ -236,10 +246,11 @@ def create_or_update_hook(self, project, hook_url, options): return changed - ''' + """ @param project Project Object @param arguments Attributes of the hook - ''' + """ + def create_hook(self, project, arguments): if self._module.check_mode: return True @@ -248,10 +259,11 @@ def create_hook(self, project, arguments): return hook - ''' + """ @param hook Hook Object @param arguments Attributes of the hook - ''' + """ + def update_hook(self, hook, arguments): changed = False @@ -263,19 +275,21 @@ def update_hook(self, hook, arguments): return (changed, hook) - ''' + """ @param project Project object @param hook_url Url to call on event - ''' + """ + def find_hook(self, project, hook_url): for hook in project.hooks.list(**list_all_kwargs): if hook.url == hook_url: return hook - ''' + """ @param project Project object @param hook_url Url to call on event - ''' + """ + def exists_hook(self, project, hook_url): # When project exists, object will be stored in self.project_object. hook = self.find_hook(project, hook_url) @@ -292,60 +306,58 @@ def delete_hook(self): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - hook_url=dict(type='str', required=True), - push_events=dict(type='bool', default=True), - push_events_branch_filter=dict(type='str', default=''), - issues_events=dict(type='bool', default=False), - merge_requests_events=dict(type='bool', default=False), - tag_push_events=dict(type='bool', default=False), - note_events=dict(type='bool', default=False), - job_events=dict(type='bool', default=False), - pipeline_events=dict(type='bool', default=False), - wiki_page_events=dict(type='bool', default=False), - releases_events=dict(type='bool'), - hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), - token=dict(type='str', no_log=True), - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + hook_url=dict(type="str", required=True), + push_events=dict(type="bool", default=True), + push_events_branch_filter=dict(type="str", default=""), + issues_events=dict(type="bool", default=False), + merge_requests_events=dict(type="bool", default=False), + tag_push_events=dict(type="bool", default=False), + note_events=dict(type="bool", default=False), + job_events=dict(type="bool", default=False), + pipeline_events=dict(type="bool", default=False), + wiki_page_events=dict(type="bool", default=False), + releases_events=dict(type="bool"), + hook_validate_certs=dict(type="bool", default=False, aliases=["enable_ssl_verification"]), + token=dict(type="str", no_log=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], + required_together=[["api_username", "api_password"]], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - state = module.params['state'] - project_identifier = module.params['project'] - hook_url = module.params['hook_url'] - push_events = module.params['push_events'] - push_events_branch_filter = module.params['push_events_branch_filter'] - issues_events = module.params['issues_events'] - merge_requests_events = module.params['merge_requests_events'] - tag_push_events = module.params['tag_push_events'] - note_events = module.params['note_events'] - job_events = module.params['job_events'] - pipeline_events = module.params['pipeline_events'] - wiki_page_events = module.params['wiki_page_events'] - releases_events = module.params['releases_events'] - enable_ssl_verification = module.params['hook_validate_certs'] - hook_token = module.params['token'] + state = module.params["state"] + project_identifier = module.params["project"] + hook_url = module.params["hook_url"] + push_events = module.params["push_events"] + push_events_branch_filter = module.params["push_events_branch_filter"] + issues_events = module.params["issues_events"] + merge_requests_events = module.params["merge_requests_events"] + tag_push_events = module.params["tag_push_events"] + note_events = module.params["note_events"] + job_events = module.params["job_events"] + pipeline_events = module.params["pipeline_events"] + wiki_page_events = module.params["wiki_page_events"] + releases_events = module.params["releases_events"] + enable_ssl_verification = module.params["hook_validate_certs"] + hook_token = module.params["token"] gitlab_hook = GitLabHook(module, gitlab_instance) @@ -356,33 +368,42 @@ def main(): hook_exists = gitlab_hook.exists_hook(project, hook_url) - if state == 'absent': + if state == "absent": if hook_exists: gitlab_hook.delete_hook() module.exit_json(changed=True, msg=f"Successfully deleted hook {hook_url}") else: module.exit_json(changed=False, msg="Hook deleted or does not exists") - if state == 'present': - if gitlab_hook.create_or_update_hook(project, hook_url, { - "push_events": push_events, - "push_events_branch_filter": push_events_branch_filter, - "issues_events": issues_events, - "merge_requests_events": merge_requests_events, - "tag_push_events": tag_push_events, - "note_events": note_events, - "job_events": job_events, - "pipeline_events": pipeline_events, - "wiki_page_events": wiki_page_events, - "releases_events": releases_events, - "enable_ssl_verification": enable_ssl_verification, - "token": hook_token, - }): - - module.exit_json(changed=True, msg=f"Successfully created or updated the hook {hook_url}", hook=gitlab_hook.hook_object._attrs) + if state == "present": + if gitlab_hook.create_or_update_hook( + project, + hook_url, + { + "push_events": push_events, + "push_events_branch_filter": push_events_branch_filter, + "issues_events": issues_events, + "merge_requests_events": merge_requests_events, + "tag_push_events": tag_push_events, + "note_events": note_events, + "job_events": job_events, + "pipeline_events": pipeline_events, + "wiki_page_events": wiki_page_events, + "releases_events": releases_events, + "enable_ssl_verification": enable_ssl_verification, + "token": hook_token, + }, + ): + module.exit_json( + changed=True, + msg=f"Successfully created or updated the hook {hook_url}", + hook=gitlab_hook.hook_object._attrs, + ) else: - module.exit_json(changed=False, msg=f"No need to update the hook {hook_url}", hook=gitlab_hook.hook_object._attrs) + module.exit_json( + changed=False, msg=f"No need to update the hook {hook_url}", hook=gitlab_hook.hook_object._attrs + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py index 25eacf3db8d..79ff2077dc0 100644 --- a/plugins/modules/gitlab_instance_variable.py +++ b/plugins/modules/gitlab_instance_variable.py @@ -149,13 +149,14 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, filter_returned_variables, - list_all_kwargs + auth_argument_spec, + gitlab_authentication, + filter_returned_variables, + list_all_kwargs, ) class GitlabInstanceVariables: - def __init__(self, module, gitlab_instance): self.instance = gitlab_instance self._module = module @@ -167,13 +168,13 @@ def create_variable(self, var_obj): if self._module.check_mode: return True var = { - "key": var_obj.get('key'), - "value": var_obj.get('value'), - "description": var_obj.get('description'), - "masked": var_obj.get('masked'), - "protected": var_obj.get('protected'), - "raw": var_obj.get('raw'), - "variable_type": var_obj.get('variable_type'), + "key": var_obj.get("key"), + "value": var_obj.get("value"), + "description": var_obj.get("description"), + "masked": var_obj.get("masked"), + "protected": var_obj.get("protected"), + "raw": var_obj.get("raw"), + "variable_type": var_obj.get("variable_type"), } self.instance.variables.create(var) @@ -189,7 +190,7 @@ def update_variable(self, var_obj): def delete_variable(self, var_obj): if self._module.check_mode: return True - self.instance.variables.delete(var_obj.get('key')) + self.instance.variables.delete(var_obj.get("key")) return True @@ -205,16 +206,16 @@ def compare(requested_variables, existing_variables, state): updated = list() added = list() - if state == 'present': + if state == "present": existing_key_scope_vars = list() for item in existing_variables: - existing_key_scope_vars.append({'key': item.get('key')}) + existing_key_scope_vars.append({"key": item.get("key")}) for var in requested_variables: if var in existing_variables: untouched.append(var) else: - compare_item = {'key': var.get('name')} + compare_item = {"key": var.get("name")} if compare_item in existing_key_scope_vars: updated.append(var) else: @@ -224,7 +225,6 @@ def compare(requested_variables, existing_variables, state): def native_python_main(this_gitlab, purge, requested_variables, state, module): - change = False return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) @@ -234,30 +234,30 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): existing_variables = filter_returned_variables(gitlab_keys) for item in requested_variables: - item['key'] = item.pop('name') - item['value'] = str(item.get('value')) - if item.get('protected') is None: - item['protected'] = False - if item.get('masked') is None: - item['masked'] = False - if item.get('raw') is None: - item['raw'] = False - if item.get('variable_type') is None: - item['variable_type'] = 'env_var' + item["key"] = item.pop("name") + item["value"] = str(item.get("value")) + if item.get("protected") is None: + item["protected"] = False + if item.get("masked") is None: + item["masked"] = False + if item.get("raw") is None: + item["raw"] = False + if item.get("variable_type") is None: + item["variable_type"] = "env_var" if module.check_mode: untouched, updated, added = compare(requested_variables, existing_variables, state) - if state == 'present': + if state == "present": add_or_update = [x for x in requested_variables if x not in existing_variables] for item in add_or_update: try: if this_gitlab.create_variable(item): - return_value['added'].append(item) + return_value["added"].append(item) except Exception: if this_gitlab.update_variable(item): - return_value['updated'].append(item) + return_value["updated"].append(item) if purge: # refetch and filter @@ -267,11 +267,11 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove = [x for x in existing_variables if x not in requested_variables] for item in remove: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) - elif state == 'absent': + elif state == "absent": # value, type, and description do not matter on removing variables. - keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + keys_ignored_on_deletion = ["value", "variable_type", "description"] for key in keys_ignored_on_deletion: for item in existing_variables: item.pop(key) @@ -282,17 +282,17 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove_requested = [x for x in requested_variables if x in existing_variables] for item in remove_requested: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) else: for item in existing_variables: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) if module.check_mode: - return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + return_value = dict(added=added, updated=updated, removed=return_value["removed"], untouched=untouched) - if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + if len(return_value["added"] + return_value["removed"] + return_value["updated"]) > 0: change = True gitlab_keys = this_gitlab.list_all_instance_variables() @@ -305,48 +305,51 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - purge=dict(type='bool', default=False), - variables=dict(type='list', elements='dict', default=list(), options=dict( - name=dict(type='str', required=True), - value=dict(type='str', no_log=True), - description=dict(type='str'), - masked=dict(type='bool', default=False), - protected=dict(type='bool', default=False), - raw=dict(type='bool', default=False), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) - )), - state=dict(type='str', default="present", choices=["absent", "present"]), + purge=dict(type="bool", default=False), + variables=dict( + type="list", + elements="dict", + default=list(), + options=dict( + name=dict(type="str", required=True), + value=dict(type="str", no_log=True), + description=dict(type="str"), + masked=dict(type="bool", default=False), + protected=dict(type="bool", default=False), + raw=dict(type="bool", default=False), + variable_type=dict(type="str", default="env_var", choices=["env_var", "file"]), + ), + ), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - purge = module.params['purge'] - state = module.params['state'] + purge = module.params["purge"] + state = module.params["state"] - variables = module.params['variables'] + variables = module.params["variables"] - if state == 'present': - if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') + if state == "present": + if any(x["value"] is None for x in variables): + module.fail_json(msg="value parameter is required in state present") this_gitlab = GitlabInstanceVariables(module=module, gitlab_instance=gitlab_instance) @@ -354,23 +357,23 @@ def main(): # postprocessing for item in after: - item['name'] = item.pop('key') + item["name"] = item.pop("key") for item in before: - item['name'] = item.pop('key') + item["name"] = item.pop("key") - untouched_key_name = 'key' + untouched_key_name = "key" if not module.check_mode: - untouched_key_name = 'name' - raw_return_value['untouched'] = [x for x in before if x in after] + untouched_key_name = "name" + raw_return_value["untouched"] = [x for x in before if x in after] - added = [x.get('key') for x in raw_return_value['added']] - updated = [x.get('key') for x in raw_return_value['updated']] - removed = [x.get('key') for x in raw_return_value['removed']] - untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + added = [x.get("key") for x in raw_return_value["added"]] + updated = [x.get("key") for x in raw_return_value["updated"]] + removed = [x.get("key") for x in raw_return_value["removed"]] + untouched = [x.get(untouched_key_name) for x in raw_return_value["untouched"]] return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) module.exit_json(changed=changed, instance_variable=return_value) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py index dc522aa2cf6..f4c8934574d 100644 --- a/plugins/modules/gitlab_issue.py +++ b/plugins/modules/gitlab_issue.py @@ -142,20 +142,24 @@ from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group + auth_argument_spec, + gitlab_authentication, + gitlab, + find_project, + find_group, ) class GitlabIssue: - def __init__(self, module, project, gitlab_instance): self._gitlab = gitlab_instance self._module = module self.project = project - ''' + """ @param milestone_id Title of the milestone - ''' + """ + def get_milestone(self, milestone_id, group): milestones = [] try: @@ -173,10 +177,11 @@ def get_milestone(self, milestone_id, group): except gitlab.exceptions.GitlabGetError as e: self._module.fail_json(msg=f"Failed to get the Milestones: {e}") - ''' + """ @param title Title of the Issue @param state_filter Issue's state to filter on - ''' + """ + def get_issue(self, title, state_filter): issues = [] try: @@ -192,9 +197,10 @@ def get_issue(self, title, state_filter): except gitlab.exceptions.GitlabGetError as e: self._module.fail_json(msg=f"Failed to get the Issue: {e}") - ''' + """ @param username Name of the user - ''' + """ + def get_user(self, username): users = [] try: @@ -209,15 +215,17 @@ def get_user(self, username): else: return users[0] - ''' + """ @param users List of usernames - ''' + """ + def get_user_ids(self, users): return [self.get_user(user).id for user in users] - ''' + """ @param options Options of the Issue - ''' + """ + def create_issue(self, options): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully created Issue '{options['title']}'.") @@ -227,9 +235,10 @@ def create_issue(self, options): except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create Issue: {e}") - ''' + """ @param issue Issue object to delete - ''' + """ + def delete_issue(self, issue): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully deleted Issue '{issue['title']}'.") @@ -239,10 +248,11 @@ def delete_issue(self, issue): except gitlab.exceptions.GitlabDeleteError as e: self._module.fail_json(msg=f"Failed to delete Issue: '{e}'.") - ''' + """ @param issue Issue object to update @param options Options of the Issue - ''' + """ + def update_issue(self, issue, options): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully updated Issue '{issue['title']}'.") @@ -252,23 +262,23 @@ def update_issue(self, issue, options): except gitlab.exceptions.GitlabUpdateError as e: self._module.fail_json(msg=f"Failed to update Issue {e}.") - ''' + """ @param issue Issue object to evaluate @param options New options to update Issue with - ''' + """ + def issue_has_changed(self, issue, options): for key, value in options.items(): if value is not None: - - if key == 'milestone_id': - old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" + if key == "milestone_id": + old_milestone = getattr(issue, "milestone")["id"] if getattr(issue, "milestone") else "" if value != old_milestone: return True - elif key == 'assignee_ids': - if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): + elif key == "assignee_ids": + if value != sorted([user["id"] for user in getattr(issue, "assignees")]): return True - elif key == 'labels': + elif key == "labels": if value != sorted(getattr(issue, key)): return True @@ -282,53 +292,51 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - assignee_ids=dict(type='list', elements='str'), - description=dict(type='str'), - description_path=dict(type='path'), - issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]), - labels=dict(type='list', elements='str'), - milestone_search=dict(type='str'), - milestone_group_id=dict(type='str'), - project=dict(type='str', required=True), - state=dict(type='str', default="present", choices=["absent", "present"]), - state_filter=dict(type='str', default="opened", choices=["opened", "closed"]), - title=dict(type='str', required=True), + assignee_ids=dict(type="list", elements="str"), + description=dict(type="str"), + description_path=dict(type="path"), + issue_type=dict(type="str", default="issue", choices=["issue", "incident", "test_case"]), + labels=dict(type="list", elements="str"), + milestone_search=dict(type="str"), + milestone_group_id=dict(type="str"), + project=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + state_filter=dict(type="str", default="opened", choices=["opened", "closed"]), + title=dict(type="str", required=True), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['description', 'description_path'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["description", "description_path"], ], required_together=[ - ['api_username', 'api_password'], - ['milestone_search', 'milestone_group_id'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], + ["milestone_search", "milestone_group_id"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) - assignee_ids = module.params['assignee_ids'] - description = module.params['description'] - description_path = module.params['description_path'] - issue_type = module.params['issue_type'] - labels = module.params['labels'] - milestone_id = module.params['milestone_search'] - milestone_group_id = module.params['milestone_group_id'] - project = module.params['project'] - state = module.params['state'] - state_filter = module.params['state_filter'] - title = module.params['title'] + assignee_ids = module.params["assignee_ids"] + description = module.params["description"] + description_path = module.params["description_path"] + issue_type = module.params["issue_type"] + labels = module.params["labels"] + milestone_id = module.params["milestone_search"] + milestone_group_id = module.params["milestone_group_id"] + project = module.params["project"] + state = module.params["state"] + state_filter = module.params["state_filter"] + title = module.params["title"] # check prerequisites and connect to gitlab server - gitlab_instance = gitlab_authentication(module, min_version='2.3.0') + gitlab_instance = gitlab_authentication(module, min_version="2.3.0") this_project = find_project(gitlab_instance, project) if this_project is None: @@ -348,10 +356,10 @@ def main(): if state == "present": if description_path: try: - with open(description_path, 'rb') as f: - description = to_text(f.read(), errors='surrogate_or_strict') + with open(description_path, "rb") as f: + description = to_text(f.read(), errors="surrogate_or_strict") except IOError as e: - module.fail_json(msg=f'Cannot open {description_path}: {e}') + module.fail_json(msg=f"Cannot open {description_path}: {e}") # sorting necessary in order to properly detect changes, as we don't want to get false positive # results due to differences in ids ordering; @@ -369,32 +377,20 @@ def main(): if not this_issue: issue = this_gitlab.create_issue(options) - module.exit_json( - changed=True, msg=f"Created Issue '{title}'.", - issue=issue.asdict() - ) + module.exit_json(changed=True, msg=f"Created Issue '{title}'.", issue=issue.asdict()) else: if this_gitlab.issue_has_changed(this_issue, options): issue = this_gitlab.update_issue(this_issue, options) - module.exit_json( - changed=True, msg=f"Updated Issue '{title}'.", - issue=issue - ) + module.exit_json(changed=True, msg=f"Updated Issue '{title}'.", issue=issue) else: - module.exit_json( - changed=False, msg=f"Issue '{title}' already exists", - issue=this_issue.asdict() - ) + module.exit_json(changed=False, msg=f"Issue '{title}' already exists", issue=this_issue.asdict()) elif state == "absent": if not this_issue: module.exit_json(changed=False, msg=f"Issue '{title}' does not exist or has already been deleted.") else: issue = this_gitlab.delete_issue(this_issue) - module.exit_json( - changed=True, msg=f"Issue '{title}' deleted.", - issue=issue - ) + module.exit_json(changed=True, msg=f"Issue '{title}' deleted.", issue=issue) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py index 30640cf28fc..ffd017ab910 100644 --- a/plugins/modules/gitlab_label.py +++ b/plugins/modules/gitlab_label.py @@ -221,12 +221,15 @@ from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project + auth_argument_spec, + gitlab_authentication, + ensure_gitlab_package, + find_group, + find_project, ) class GitlabLabels: - def __init__(self, module, gitlab_instance, group_id, project_id): self._gitlab = gitlab_instance self.gitlab_object = group_id if group_id else project_id @@ -248,15 +251,15 @@ def create_label(self, var_obj): return True, True var = { - "name": var_obj.get('name'), - "color": var_obj.get('color'), + "name": var_obj.get("name"), + "color": var_obj.get("color"), } - if var_obj.get('description') is not None: - var["description"] = var_obj.get('description') + if var_obj.get("description") is not None: + var["description"] = var_obj.get("description") - if var_obj.get('priority') is not None: - var["priority"] = var_obj.get('priority') + if var_obj.get("priority") is not None: + var["priority"] = var_obj.get("priority") _obj = self.gitlab_object.labels.create(var) return True, _obj.asdict() @@ -264,17 +267,17 @@ def create_label(self, var_obj): def update_label(self, var_obj): if self._module.check_mode: return True, True - _label = self.gitlab_object.labels.get(var_obj.get('name')) + _label = self.gitlab_object.labels.get(var_obj.get("name")) - if var_obj.get('new_name') is not None: - _label.new_name = var_obj.get('new_name') + if var_obj.get("new_name") is not None: + _label.new_name = var_obj.get("new_name") - if var_obj.get('description') is not None: - _label.description = var_obj.get('description') - if var_obj.get('priority') is not None: - _label.priority = var_obj.get('priority') - if var_obj.get('color') is not None: - _label.color = var_obj.get('color') + if var_obj.get("description") is not None: + _label.description = var_obj.get("description") + if var_obj.get("priority") is not None: + _label.priority = var_obj.get("priority") + if var_obj.get("color") is not None: + _label.color = var_obj.get("color") # save returns None _label.save() @@ -283,7 +286,7 @@ def update_label(self, var_obj): def delete_label(self, var_obj): if self._module.check_mode: return True, True - _label = self.gitlab_object.labels.get(var_obj.get('name')) + _label = self.gitlab_object.labels.get(var_obj.get("name")) # delete returns None _label.delete() return True, _label.asdict() @@ -301,16 +304,16 @@ def compare(requested_labels, existing_labels, state): updated = list() added = list() - if state == 'present': + if state == "present": _existing_labels = list() for item in existing_labels: - _existing_labels.append({'name': item.get('name')}) + _existing_labels.append({"name": item.get("name")}) for var in requested_labels: if var in existing_labels: untouched.append(var) else: - compare_item = {'name': var.get('name')} + compare_item = {"name": var.get("name")} if compare_item in _existing_labels: updated.append(var) else: @@ -329,42 +332,42 @@ def native_python_main(this_gitlab, purge, requested_labels, state, module): # filter out and enrich before compare for item in requested_labels: # add defaults when not present - if item.get('description') is None: - item['description'] = "" - if item.get('new_name') is None: - item['new_name'] = None - if item.get('priority') is None: - item['priority'] = None + if item.get("description") is None: + item["description"] = "" + if item.get("new_name") is None: + item["new_name"] = None + if item.get("priority") is None: + item["priority"] = None # group label does not have priority, removing for comparison if this_gitlab.is_group_label: - item.pop('priority') + item.pop("priority") for item in labels_before: # remove field only from server - item.pop('id') - item.pop('description_html') - item.pop('text_color') - item.pop('subscribed') + item.pop("id") + item.pop("description_html") + item.pop("text_color") + item.pop("subscribed") # field present only when it is a project's label - if 'is_project_label' in item: - item.pop('is_project_label') - item['new_name'] = None + if "is_project_label" in item: + item.pop("is_project_label") + item["new_name"] = None - if state == 'present': + if state == "present": add_or_update = [x for x in requested_labels if x not in labels_before] for item in add_or_update: try: _rv, _obj = this_gitlab.create_label(item) if _rv: - return_value['added'].append(item) - return_obj['added'].append(_obj) + return_value["added"].append(item) + return_obj["added"].append(_obj) except Exception: # create raises exception with following error message when label already exists _rv, _obj = this_gitlab.update_label(item) if _rv: - return_value['updated'].append(item) - return_obj['updated'].append(_obj) + return_value["updated"].append(item) + return_obj["updated"].append(_obj) if purge: # re-fetch @@ -373,30 +376,30 @@ def native_python_main(this_gitlab, purge, requested_labels, state, module): for item in labels_before: _rv, _obj = this_gitlab.delete_label(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) - elif state == 'absent': + elif state == "absent": if not purge: - _label_names_requested = [x['name'] for x in requested_labels] - remove_requested = [x for x in labels_before if x['name'] in _label_names_requested] + _label_names_requested = [x["name"] for x in requested_labels] + remove_requested = [x for x in labels_before if x["name"] in _label_names_requested] for item in remove_requested: _rv, _obj = this_gitlab.delete_label(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) else: for item in labels_before: _rv, _obj = this_gitlab.delete_label(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) if module.check_mode: _untouched, _updated, _added = compare(requested_labels, labels_before, state) - return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + return_value = dict(added=_added, updated=_updated, removed=return_value["removed"], untouched=_untouched) - if any(return_value[x] for x in ['added', 'removed', 'updated']): + if any(return_value[x] for x in ["added", "removed", "updated"]): change = True labels_after = [x.asdict() for x in this_gitlab.list_all_labels()] @@ -408,48 +411,49 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str'), - group=dict(type='str'), - purge=dict(type='bool', default=False), - labels=dict(type='list', elements='dict', default=list(), - options=dict( - name=dict(type='str', required=True), - color=dict(type='str'), - description=dict(type='str'), - priority=dict(type='int'), - new_name=dict(type='str')) - ), - state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type="str"), + group=dict(type="str"), + purge=dict(type="bool", default=False), + labels=dict( + type="list", + elements="dict", + default=list(), + options=dict( + name=dict(type="str", required=True), + color=dict(type="str"), + description=dict(type="str"), + priority=dict(type="int"), + new_name=dict(type="str"), + ), + ), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['project', 'group'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["project", "group"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['project', 'group'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"], ["project", "group"]], + supports_check_mode=True, ) ensure_gitlab_package(module) - gitlab_project = module.params['project'] - gitlab_group = module.params['group'] - purge = module.params['purge'] - label_list = module.params['labels'] - state = module.params['state'] + gitlab_project = module.params["project"] + gitlab_group = module.params["group"] + purge = module.params["purge"] + label_list = module.params["labels"] + state = module.params["state"] - gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + gitlab_instance = gitlab_authentication(module, min_version="3.2.0") # find_project can return None, but the other must exist gitlab_project_id = find_project(gitlab_instance, gitlab_project) @@ -464,29 +468,30 @@ def main(): if gitlab_group and not gitlab_group_id: module.fail_json(msg=f"group '{gitlab_group}' not found.") - this_gitlab = GitlabLabels(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, - project_id=gitlab_project_id) + this_gitlab = GitlabLabels( + module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, project_id=gitlab_project_id + ) - if state == 'present': - _existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()] + if state == "present": + _existing_labels = [x.asdict()["name"] for x in this_gitlab.list_all_labels()] # color is mandatory when creating label, but it is optional when changing name or updating other fields - if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list): - module.fail_json(msg='color parameter is required for new labels') + if any(x["color"] is None and x["new_name"] is None and x["name"] not in _existing_labels for x in label_list): + module.fail_json(msg="color parameter is required for new labels") change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, label_list, state, module) if not module.check_mode: - raw_return_value['untouched'] = [x for x in before if x in after] + raw_return_value["untouched"] = [x for x in before if x in after] - added = [x.get('name') for x in raw_return_value['added']] - updated = [x.get('name') for x in raw_return_value['updated']] - removed = [x.get('name') for x in raw_return_value['removed']] - untouched = [x.get('name') for x in raw_return_value['untouched']] + added = [x.get("name") for x in raw_return_value["added"]] + updated = [x.get("name") for x in raw_return_value["updated"]] + removed = [x.get("name") for x in raw_return_value["removed"]] + untouched = [x.get("name") for x in raw_return_value["untouched"]] return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) module.exit_json(changed=change, labels=return_value, labels_obj=_obj) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py index 7b48d285390..d3ba81ecce5 100644 --- a/plugins/modules/gitlab_merge_request.py +++ b/plugins/modules/gitlab_merge_request.py @@ -148,36 +148,42 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, find_project + auth_argument_spec, + gitlab_authentication, + gitlab, + find_project, ) class GitlabMergeRequest: - def __init__(self, module, project, gitlab_instance): self._gitlab = gitlab_instance self._module = module self.project = project - ''' + """ @param branch Name of the branch - ''' + """ + def get_branch(self, branch): try: return self.project.branches.get(branch) except gitlab.exceptions.GitlabGetError as e: self._module.fail_json(msg=f"Failed to get the branch: {e}") - ''' + """ @param title Title of the Merge Request @param source_branch Merge Request's source branch @param target_branch Merge Request's target branch @param state_filter Merge Request's state to filter on - ''' + """ + def get_mr(self, title, source_branch, target_branch, state_filter): mrs = [] try: - mrs = self.project.mergerequests.list(search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter) + mrs = self.project.mergerequests.list( + search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter + ) except gitlab.exceptions.GitlabGetError as e: self._module.fail_json(msg=f"Failed to list the Merge Request: {e}") @@ -189,9 +195,10 @@ def get_mr(self, title, source_branch, target_branch, state_filter): except gitlab.exceptions.GitlabGetError as e: self._module.fail_json(msg=f"Failed to get the Merge Request: {e}") - ''' + """ @param username Name of the user - ''' + """ + def get_user(self, username): users = [] try: @@ -206,15 +213,17 @@ def get_user(self, username): else: return users[0] - ''' + """ @param users List of usernames - ''' + """ + def get_user_ids(self, users): return [self.get_user(user).id for user in users] - ''' + """ @param options Options of the Merge Request - ''' + """ + def create_mr(self, options): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully created the Merge Request {options['title']}") @@ -224,9 +233,10 @@ def create_mr(self, options): except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create Merge Request: {e}") - ''' + """ @param mr Merge Request object to delete - ''' + """ + def delete_mr(self, mr): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully deleted the Merge Request {mr['title']}") @@ -236,9 +246,10 @@ def delete_mr(self, mr): except gitlab.exceptions.GitlabDeleteError as e: self._module.fail_json(msg=f"Failed to delete Merge Request: {e}") - ''' + """ @param mr Merge Request object to update - ''' + """ + def update_mr(self, mr, options): if self._module.check_mode: self._module.exit_json(changed=True, msg=f"Successfully updated the Merge Request {mr['title']}") @@ -248,26 +259,27 @@ def update_mr(self, mr, options): except gitlab.exceptions.GitlabUpdateError as e: self._module.fail_json(msg=f"Failed to update Merge Request: {e}") - ''' + """ @param mr Merge Request object to evaluate @param options New options to update MR with - ''' + """ + def mr_has_changed(self, mr, options): for key, value in options.items(): if value is not None: # see https://gitlab.com/gitlab-org/gitlab-foss/-/issues/27355 - if key == 'remove_source_branch': - key = 'force_remove_source_branch' + if key == "remove_source_branch": + key = "force_remove_source_branch" - if key == 'assignee_ids': - if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): + if key == "assignee_ids": + if value != sorted([user["id"] for user in getattr(mr, "assignees")]): return True - elif key == 'reviewer_ids': - if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + elif key == "reviewer_ids": + if value != sorted([user["id"] for user in getattr(mr, "reviewers")]): return True - elif key == 'labels': + elif key == "labels": if value != sorted(getattr(mr, key)): return True @@ -281,64 +293,62 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=True), - source_branch=dict(type='str', required=True), - target_branch=dict(type='str', required=True), - title=dict(type='str', required=True), - description=dict(type='str'), - labels=dict(type='str', default=""), - description_path=dict(type='path'), - remove_source_branch=dict(type='bool', default=False), - state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]), - assignee_ids=dict(type='str'), - reviewer_ids=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + source_branch=dict(type="str", required=True), + target_branch=dict(type="str", required=True), + title=dict(type="str", required=True), + description=dict(type="str"), + labels=dict(type="str", default=""), + description_path=dict(type="path"), + remove_source_branch=dict(type="bool", default=False), + state_filter=dict(type="str", default="opened", choices=["opened", "closed", "locked", "merged"]), + assignee_ids=dict(type="str"), + reviewer_ids=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['description', 'description_path'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["description", "description_path"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], required_if=[ - ['state', 'present', ['source_branch', 'target_branch', 'title'], True], - ['state', 'absent', ['source_branch', 'target_branch', 'title'], True], + ["state", "present", ["source_branch", "target_branch", "title"], True], + ["state", "absent", ["source_branch", "target_branch", "title"], True], ], - supports_check_mode=True + supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - project = module.params['project'] - source_branch = module.params['source_branch'] - target_branch = module.params['target_branch'] - title = module.params['title'] - description = module.params['description'] - labels = module.params['labels'] - description_path = module.params['description_path'] - remove_source_branch = module.params['remove_source_branch'] - state_filter = module.params['state_filter'] - assignee_ids = module.params['assignee_ids'] - reviewer_ids = module.params['reviewer_ids'] - state = module.params['state'] + project = module.params["project"] + source_branch = module.params["source_branch"] + target_branch = module.params["target_branch"] + title = module.params["title"] + description = module.params["description"] + labels = module.params["labels"] + description_path = module.params["description_path"] + remove_source_branch = module.params["remove_source_branch"] + state_filter = module.params["state_filter"] + assignee_ids = module.params["assignee_ids"] + reviewer_ids = module.params["reviewer_ids"] + state = module.params["state"] gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + if LooseVersion(gitlab_version) < LooseVersion("2.3.0"): module.fail_json( msg=f"community.general.gitlab_merge_request requires python-gitlab Python module >= 2.3.0 (installed version: [{gitlab_version}])." - " Please upgrade python-gitlab to version 2.3.0 or above." + " Please upgrade python-gitlab to version 2.3.0 or above." ) this_project = find_project(gitlab_instance, project) @@ -360,10 +370,10 @@ def main(): if state == "present": if description_path: try: - with open(description_path, 'rb') as f: - description = to_text(f.read(), errors='surrogate_or_strict') + with open(description_path, "rb") as f: + description = to_text(f.read(), errors="surrogate_or_strict") except IOError as e: - module.fail_json(msg=f'Cannot open {description_path}: {e}') + module.fail_json(msg=f"Cannot open {description_path}: {e}") # sorting necessary in order to properly detect changes, as we don't want to get false positive # results due to differences in ids ordering; see `mr_has_changed()` @@ -386,30 +396,34 @@ def main(): mr = this_gitlab.create_mr(options) module.exit_json( - changed=True, msg=f"Created the Merge Request {title} from branch {source_branch} to branch {target_branch}.", - mr=mr.asdict() + changed=True, + msg=f"Created the Merge Request {title} from branch {source_branch} to branch {target_branch}.", + mr=mr.asdict(), ) else: if this_gitlab.mr_has_changed(this_mr, options): mr = this_gitlab.update_mr(this_mr, options) module.exit_json( - changed=True, msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} updated.", - mr=mr + changed=True, + msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} updated.", + mr=mr, ) else: module.exit_json( - changed=False, msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} already exist", - mr=this_mr.asdict() + changed=False, + msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} already exist", + mr=this_mr.asdict(), ) elif this_mr and state == "absent": mr = this_gitlab.delete_mr(this_mr) module.exit_json( - changed=True, msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} deleted.", - mr=mr + changed=True, + msg=f"Merge Request {title} from branch {source_branch} to branch {target_branch} deleted.", + mr=mr, ) else: module.exit_json(changed=False, msg="No changes are needed.", mr=this_mr.asdict()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py index 616cdbde5bf..07de63fd88d 100644 --- a/plugins/modules/gitlab_milestone.py +++ b/plugins/modules/gitlab_milestone.py @@ -205,13 +205,16 @@ from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project + auth_argument_spec, + gitlab_authentication, + ensure_gitlab_package, + find_group, + find_project, ) from datetime import datetime class GitlabMilestones: - def __init__(self, module, gitlab_instance, group_id, project_id): self._gitlab = gitlab_instance self.gitlab_object = group_id if group_id else project_id @@ -233,17 +236,17 @@ def create_milestone(self, var_obj): return True, True var = { - "title": var_obj.get('title'), + "title": var_obj.get("title"), } - if var_obj.get('description') is not None: - var["description"] = var_obj.get('description') + if var_obj.get("description") is not None: + var["description"] = var_obj.get("description") - if var_obj.get('start_date') is not None: - var["start_date"] = self.check_date(var_obj.get('start_date')) + if var_obj.get("start_date") is not None: + var["start_date"] = self.check_date(var_obj.get("start_date")) - if var_obj.get('due_date') is not None: - var["due_date"] = self.check_date(var_obj.get('due_date')) + if var_obj.get("due_date") is not None: + var["due_date"] = self.check_date(var_obj.get("due_date")) _obj = self.gitlab_object.milestones.create(var) return True, _obj.asdict() @@ -251,16 +254,16 @@ def create_milestone(self, var_obj): def update_milestone(self, var_obj): if self._module.check_mode: return True, True - _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get("title"))) - if var_obj.get('description') is not None: - _milestone.description = var_obj.get('description') + if var_obj.get("description") is not None: + _milestone.description = var_obj.get("description") - if var_obj.get('start_date') is not None: - _milestone.start_date = var_obj.get('start_date') + if var_obj.get("start_date") is not None: + _milestone.start_date = var_obj.get("start_date") - if var_obj.get('due_date') is not None: - _milestone.due_date = var_obj.get('due_date') + if var_obj.get("due_date") is not None: + _milestone.due_date = var_obj.get("due_date") # save returns None _milestone.save() @@ -276,7 +279,7 @@ def get_milestone_id(self, _title): def check_date(self, _date): try: - datetime.strptime(_date, '%Y-%m-%d') + datetime.strptime(_date, "%Y-%m-%d") except ValueError: self._module.fail_json(msg=f"milestone's date '{_date}' not in correct format.") return _date @@ -284,7 +287,7 @@ def check_date(self, _date): def delete_milestone(self, var_obj): if self._module.check_mode: return True, True - _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get("title"))) # delete returns None _milestone.delete() return True, _milestone.asdict() @@ -302,16 +305,16 @@ def compare(requested_milestones, existing_milestones, state): updated = list() added = list() - if state == 'present': + if state == "present": _existing_milestones = list() for item in existing_milestones: - _existing_milestones.append({'title': item.get('title')}) + _existing_milestones.append({"title": item.get("title")}) for var in requested_milestones: if var in existing_milestones: untouched.append(var) else: - compare_item = {'title': var.get('title')} + compare_item = {"title": var.get("title")} if compare_item in _existing_milestones: updated.append(var) else: @@ -330,42 +333,42 @@ def native_python_main(this_gitlab, purge, requested_milestones, state, module): # filter out and enrich before compare for item in requested_milestones: # add defaults when not present - if item.get('description') is None: - item['description'] = "" - if item.get('due_date') is None: - item['due_date'] = None - if item.get('start_date') is None: - item['start_date'] = None + if item.get("description") is None: + item["description"] = "" + if item.get("due_date") is None: + item["due_date"] = None + if item.get("start_date") is None: + item["start_date"] = None for item in milestones_before: # remove field only from server - item.pop('id') - item.pop('iid') - item.pop('created_at') - item.pop('expired') - item.pop('state') - item.pop('updated_at') - item.pop('web_url') + item.pop("id") + item.pop("iid") + item.pop("created_at") + item.pop("expired") + item.pop("state") + item.pop("updated_at") + item.pop("web_url") # group milestone has group_id, while project has project_id - if 'group_id' in item: - item.pop('group_id') - if 'project_id' in item: - item.pop('project_id') + if "group_id" in item: + item.pop("group_id") + if "project_id" in item: + item.pop("project_id") - if state == 'present': + if state == "present": add_or_update = [x for x in requested_milestones if x not in milestones_before] for item in add_or_update: try: _rv, _obj = this_gitlab.create_milestone(item) if _rv: - return_value['added'].append(item) - return_obj['added'].append(_obj) + return_value["added"].append(item) + return_obj["added"].append(_obj) except Exception: # create raises exception with following error message when milestone already exists _rv, _obj = this_gitlab.update_milestone(item) if _rv: - return_value['updated'].append(item) - return_obj['updated'].append(_obj) + return_value["updated"].append(item) + return_obj["updated"].append(_obj) if purge: # re-fetch @@ -374,30 +377,30 @@ def native_python_main(this_gitlab, purge, requested_milestones, state, module): for item in milestones_before: _rv, _obj = this_gitlab.delete_milestone(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) - elif state == 'absent': + elif state == "absent": if not purge: - _milestone_titles_requested = [x['title'] for x in requested_milestones] - remove_requested = [x for x in milestones_before if x['title'] in _milestone_titles_requested] + _milestone_titles_requested = [x["title"] for x in requested_milestones] + remove_requested = [x for x in milestones_before if x["title"] in _milestone_titles_requested] for item in remove_requested: _rv, _obj = this_gitlab.delete_milestone(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) else: for item in milestones_before: _rv, _obj = this_gitlab.delete_milestone(item) if _rv: - return_value['removed'].append(item) - return_obj['removed'].append(_obj) + return_value["removed"].append(item) + return_obj["removed"].append(_obj) if module.check_mode: _untouched, _updated, _added = compare(requested_milestones, milestones_before, state) - return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + return_value = dict(added=_added, updated=_updated, removed=return_value["removed"], untouched=_untouched) - if any(return_value[x] for x in ['added', 'removed', 'updated']): + if any(return_value[x] for x in ["added", "removed", "updated"]): change = True milestones_after = [x.asdict() for x in this_gitlab.list_all_milestones()] @@ -409,47 +412,48 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str'), - group=dict(type='str'), - purge=dict(type='bool', default=False), - milestones=dict(type='list', elements='dict', default=[], - options=dict( - title=dict(type='str', required=True), - description=dict(type='str'), - due_date=dict(type='str'), - start_date=dict(type='str')) - ), - state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type="str"), + group=dict(type="str"), + purge=dict(type="bool", default=False), + milestones=dict( + type="list", + elements="dict", + default=[], + options=dict( + title=dict(type="str", required=True), + description=dict(type="str"), + due_date=dict(type="str"), + start_date=dict(type="str"), + ), + ), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['project', 'group'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["project", "group"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['project', 'group'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"], ["project", "group"]], + supports_check_mode=True, ) ensure_gitlab_package(module) - gitlab_project = module.params['project'] - gitlab_group = module.params['group'] - purge = module.params['purge'] - milestone_list = module.params['milestones'] - state = module.params['state'] + gitlab_project = module.params["project"] + gitlab_group = module.params["group"] + purge = module.params["purge"] + milestone_list = module.params["milestones"] + state = module.params["state"] - gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + gitlab_instance = gitlab_authentication(module, min_version="3.2.0") # find_project can return None, but the other must exist gitlab_project_id = find_project(gitlab_instance, gitlab_project) @@ -464,23 +468,25 @@ def main(): if gitlab_group and not gitlab_group_id: module.fail_json(msg=f"group '{gitlab_group}' not found.") - this_gitlab = GitlabMilestones(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, - project_id=gitlab_project_id) + this_gitlab = GitlabMilestones( + module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, project_id=gitlab_project_id + ) - change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, milestone_list, state, - module) + change, raw_return_value, before, after, _obj = native_python_main( + this_gitlab, purge, milestone_list, state, module + ) if not module.check_mode: - raw_return_value['untouched'] = [x for x in before if x in after] + raw_return_value["untouched"] = [x for x in before if x in after] - added = [x.get('title') for x in raw_return_value['added']] - updated = [x.get('title') for x in raw_return_value['updated']] - removed = [x.get('title') for x in raw_return_value['removed']] - untouched = [x.get('title') for x in raw_return_value['untouched']] + added = [x.get("title") for x in raw_return_value["added"]] + updated = [x.get("title") for x in raw_return_value["updated"]] + removed = [x.get("title") for x in raw_return_value["removed"]] + untouched = [x.get("title") for x in raw_return_value["untouched"]] return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) module.exit_json(changed=change, milestones=return_value, milestones_obj=_obj) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index 744a360f703..bec0c28f6df 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -411,7 +411,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab + auth_argument_spec, + find_group, + find_project, + gitlab_authentication, + gitlab, ) from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -423,83 +427,88 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.project_object = None - ''' + """ @param project_name Name of the project @param namespace Namespace Object (User or Group) @param options Options of the project - ''' + """ + def create_or_update_project(self, module, project_name, namespace, options): changed = False project_options = { - 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], - 'builds_access_level': options['builds_access_level'], - 'build_timeout': options['build_timeout'], - 'ci_config_path': options['ci_config_path'], - 'container_expiration_policy': options['container_expiration_policy'], - 'container_registry_access_level': options['container_registry_access_level'], - 'description': options['description'], - 'environments_access_level': options['environments_access_level'], - 'feature_flags_access_level': options['feature_flags_access_level'], - 'forking_access_level': options['forking_access_level'], - 'infrastructure_access_level': options['infrastructure_access_level'], - 'issues_access_level': options['issues_access_level'], - 'issues_enabled': options['issues_enabled'], - 'lfs_enabled': options['lfs_enabled'], - 'merge_method': options['merge_method'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'model_registry_access_level': options['model_registry_access_level'], - 'monitor_access_level': options['monitor_access_level'], - 'name': project_name, - 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], - 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], - 'packages_enabled': options['packages_enabled'], - 'pages_access_level': options['pages_access_level'], - 'releases_access_level': options['releases_access_level'], - 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'repository_access_level': options['repository_access_level'], - 'security_and_compliance_access_level': options['security_and_compliance_access_level'], - 'service_desk_enabled': options['service_desk_enabled'], - 'shared_runners_enabled': options['shared_runners_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'squash_option': options['squash_option'], - 'visibility': options['visibility'], - 'wiki_enabled': options['wiki_enabled'], + "allow_merge_on_skipped_pipeline": options["allow_merge_on_skipped_pipeline"], + "builds_access_level": options["builds_access_level"], + "build_timeout": options["build_timeout"], + "ci_config_path": options["ci_config_path"], + "container_expiration_policy": options["container_expiration_policy"], + "container_registry_access_level": options["container_registry_access_level"], + "description": options["description"], + "environments_access_level": options["environments_access_level"], + "feature_flags_access_level": options["feature_flags_access_level"], + "forking_access_level": options["forking_access_level"], + "infrastructure_access_level": options["infrastructure_access_level"], + "issues_access_level": options["issues_access_level"], + "issues_enabled": options["issues_enabled"], + "lfs_enabled": options["lfs_enabled"], + "merge_method": options["merge_method"], + "merge_requests_enabled": options["merge_requests_enabled"], + "model_registry_access_level": options["model_registry_access_level"], + "monitor_access_level": options["monitor_access_level"], + "name": project_name, + "only_allow_merge_if_all_discussions_are_resolved": options[ + "only_allow_merge_if_all_discussions_are_resolved" + ], + "only_allow_merge_if_pipeline_succeeds": options["only_allow_merge_if_pipeline_succeeds"], + "packages_enabled": options["packages_enabled"], + "pages_access_level": options["pages_access_level"], + "releases_access_level": options["releases_access_level"], + "remove_source_branch_after_merge": options["remove_source_branch_after_merge"], + "repository_access_level": options["repository_access_level"], + "security_and_compliance_access_level": options["security_and_compliance_access_level"], + "service_desk_enabled": options["service_desk_enabled"], + "shared_runners_enabled": options["shared_runners_enabled"], + "snippets_enabled": options["snippets_enabled"], + "squash_option": options["squash_option"], + "visibility": options["visibility"], + "wiki_enabled": options["wiki_enabled"], } # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version # and check if less than 14. If yes we use tag_list instead topics if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"): - project_options['tag_list'] = options['topics'] + project_options["tag_list"] = options["topics"] else: - project_options['topics'] = options['topics'] + project_options["topics"] = options["topics"] # Because we have already call userExists in main() if self.project_object is None: - if options['default_branch'] and not options['initialize_with_readme']: + if options["default_branch"] and not options["initialize_with_readme"]: module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") - project_options.update({ - 'path': options['path'], - 'import_url': options['import_url'], - }) - if options['initialize_with_readme']: - project_options['initialize_with_readme'] = options['initialize_with_readme'] - if options['default_branch']: - project_options['default_branch'] = options['default_branch'] + project_options.update( + { + "path": options["path"], + "import_url": options["import_url"], + } + ) + if options["initialize_with_readme"]: + project_options["initialize_with_readme"] = options["initialize_with_readme"] + if options["default_branch"]: + project_options["default_branch"] = options["default_branch"] project_options = self.get_options_with_value(project_options) project = self.create_project(namespace, project_options) # add avatar to project - if options['avatar_path']: + if options["avatar_path"]: try: - project.avatar = open(options['avatar_path'], 'rb') + project.avatar = open(options["avatar_path"], "rb") except IOError as e: self._module.fail_json(msg=f"Cannot open {options['avatar_path']}: {e}") changed = True else: - if options['default_branch']: - project_options['default_branch'] = options['default_branch'] + if options["default_branch"]: + project_options["default_branch"] = options["default_branch"] changed, project = self.update_project(self.project_object, project_options) self.project_object = project @@ -514,53 +523,56 @@ def create_or_update_project(self, module, project_name, namespace, options): return True return False - ''' + """ @param namespace Namespace Object (User or Group) @param arguments Attributes of the project - ''' + """ + def create_project(self, namespace, arguments): if self._module.check_mode: return True - arguments['namespace_id'] = namespace.id - if 'container_expiration_policy' in arguments: - arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] + arguments["namespace_id"] = namespace.id + if "container_expiration_policy" in arguments: + arguments["container_expiration_policy_attributes"] = arguments["container_expiration_policy"] try: project = self._gitlab.projects.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create project: {e} ") return project - ''' + """ @param arguments Attributes of the project - ''' + """ + def get_options_with_value(self, arguments): ret_arguments = {k: v for k, v in arguments.items() if v is not None} return ret_arguments - ''' + """ @param project Project Object @param arguments Attributes of the project - ''' + """ + def update_project(self, project, arguments): changed = False for arg_key, arg_value in arguments.items(): if arg_value is not None: if getattr(project, arg_key, None) != arg_value: - if arg_key == 'container_expiration_policy': + if arg_key == "container_expiration_policy": old_val = getattr(project, arg_key, {}) final_val = {key: value for key, value in arg_value.items() if value is not None} - if final_val.get('older_than') == '0d': - final_val['older_than'] = None - if final_val.get('keep_n') == 0: - final_val['keep_n'] = None + if final_val.get("older_than") == "0d": + final_val["older_than"] = None + if final_val.get("keep_n") == 0: + final_val["keep_n"] = None if all(old_val.get(key) == value for key, value in final_val.items()): continue - setattr(project, 'container_expiration_policy_attributes', final_val) + setattr(project, "container_expiration_policy_attributes", final_val) else: setattr(project, arg_key, arg_value) changed = True @@ -575,10 +587,11 @@ def delete_project(self): return project.delete() - ''' + """ @param namespace User/Group object @param name Name of the project - ''' + """ + def exists_project(self, namespace, path): # When project exists, object will be stored in self.project_object. project = find_project(self._gitlab, f"{namespace.full_path}/{path}") @@ -591,123 +604,128 @@ def exists_project(self, namespace, path): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - allow_merge_on_skipped_pipeline=dict(type='bool'), - avatar_path=dict(type='path'), - builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - build_timeout=dict(type='int'), - ci_config_path=dict(type='str'), - container_expiration_policy=dict(type='dict', options=dict( - cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), - enabled=dict(type='bool'), - keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), - older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), - name_regex=dict(type='str'), - name_regex_keep=dict(type='str'), - )), - container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - default_branch=dict(type='str'), - description=dict(type='str'), - environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - group=dict(type='str'), - import_url=dict(type='str'), - infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - initialize_with_readme=dict(type='bool', default=False), - issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - issues_enabled=dict(type='bool', default=True), - lfs_enabled=dict(default=False, type='bool'), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - merge_requests_enabled=dict(type='bool', default=True), - model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - name=dict(type='str', required=True), - only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), - only_allow_merge_if_pipeline_succeeds=dict(type='bool'), - packages_enabled=dict(type='bool'), - pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - path=dict(type='str'), - releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - remove_source_branch_after_merge=dict(type='bool'), - repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - service_desk_enabled=dict(type='bool'), - shared_runners_enabled=dict(type='bool'), - snippets_enabled=dict(default=True, type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - state=dict(type='str', default="present", choices=["absent", "present"]), - topics=dict(type='list', elements='str'), - username=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - wiki_enabled=dict(type='bool', default=True), - )) + argument_spec.update( + dict( + allow_merge_on_skipped_pipeline=dict(type="bool"), + avatar_path=dict(type="path"), + builds_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + build_timeout=dict(type="int"), + ci_config_path=dict(type="str"), + container_expiration_policy=dict( + type="dict", + options=dict( + cadence=dict(type="str", choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type="bool"), + keep_n=dict(type="int", choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type="str", choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type="str"), + name_regex_keep=dict(type="str"), + ), + ), + container_registry_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + default_branch=dict(type="str"), + description=dict(type="str"), + environments_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + feature_flags_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + forking_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + group=dict(type="str"), + import_url=dict(type="str"), + infrastructure_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + initialize_with_readme=dict(type="bool", default=False), + issues_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + issues_enabled=dict(type="bool", default=True), + lfs_enabled=dict(default=False, type="bool"), + merge_method=dict(type="str", default="merge", choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type="bool", default=True), + model_registry_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + monitor_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + name=dict(type="str", required=True), + only_allow_merge_if_all_discussions_are_resolved=dict(type="bool"), + only_allow_merge_if_pipeline_succeeds=dict(type="bool"), + packages_enabled=dict(type="bool"), + pages_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + path=dict(type="str"), + releases_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + remove_source_branch_after_merge=dict(type="bool"), + repository_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + security_and_compliance_access_level=dict(type="str", choices=["private", "disabled", "enabled"]), + service_desk_enabled=dict(type="bool"), + shared_runners_enabled=dict(type="bool"), + snippets_enabled=dict(default=True, type="bool"), + squash_option=dict(type="str", choices=["never", "always", "default_off", "default_on"]), + state=dict(type="str", default="present", choices=["absent", "present"]), + topics=dict(type="list", elements="str"), + username=dict(type="str"), + visibility=dict( + type="str", default="private", choices=["internal", "private", "public"], aliases=["visibility_level"] + ), + wiki_enabled=dict(type="bool", default=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['group', 'username'], - ['issues_access_level', 'issues_enabled'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["group", "username"], + ["issues_access_level", "issues_enabled"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] - avatar_path = module.params['avatar_path'] - builds_access_level = module.params['builds_access_level'] - build_timeout = module.params['build_timeout'] - ci_config_path = module.params['ci_config_path'] - container_expiration_policy = module.params['container_expiration_policy'] - container_registry_access_level = module.params['container_registry_access_level'] - default_branch = module.params['default_branch'] - environments_access_level = module.params['environments_access_level'] - feature_flags_access_level = module.params['feature_flags_access_level'] - forking_access_level = module.params['forking_access_level'] - group_identifier = module.params['group'] - import_url = module.params['import_url'] - infrastructure_access_level = module.params['infrastructure_access_level'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_access_level = module.params['issues_access_level'] - issues_enabled = module.params['issues_enabled'] - lfs_enabled = module.params['lfs_enabled'] - merge_method = module.params['merge_method'] - merge_requests_enabled = module.params['merge_requests_enabled'] - model_registry_access_level = module.params['model_registry_access_level'] - monitor_access_level = module.params['monitor_access_level'] - only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] - only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] - packages_enabled = module.params['packages_enabled'] - pages_access_level = module.params['pages_access_level'] - project_description = module.params['description'] - project_name = module.params['name'] - project_path = module.params['path'] - releases_access_level = module.params['releases_access_level'] - remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - repository_access_level = module.params['repository_access_level'] - security_and_compliance_access_level = module.params['security_and_compliance_access_level'] - service_desk_enabled = module.params['service_desk_enabled'] - shared_runners_enabled = module.params['shared_runners_enabled'] - snippets_enabled = module.params['snippets_enabled'] - squash_option = module.params['squash_option'] - state = module.params['state'] - topics = module.params['topics'] - username = module.params['username'] - visibility = module.params['visibility'] - wiki_enabled = module.params['wiki_enabled'] + allow_merge_on_skipped_pipeline = module.params["allow_merge_on_skipped_pipeline"] + avatar_path = module.params["avatar_path"] + builds_access_level = module.params["builds_access_level"] + build_timeout = module.params["build_timeout"] + ci_config_path = module.params["ci_config_path"] + container_expiration_policy = module.params["container_expiration_policy"] + container_registry_access_level = module.params["container_registry_access_level"] + default_branch = module.params["default_branch"] + environments_access_level = module.params["environments_access_level"] + feature_flags_access_level = module.params["feature_flags_access_level"] + forking_access_level = module.params["forking_access_level"] + group_identifier = module.params["group"] + import_url = module.params["import_url"] + infrastructure_access_level = module.params["infrastructure_access_level"] + initialize_with_readme = module.params["initialize_with_readme"] + issues_access_level = module.params["issues_access_level"] + issues_enabled = module.params["issues_enabled"] + lfs_enabled = module.params["lfs_enabled"] + merge_method = module.params["merge_method"] + merge_requests_enabled = module.params["merge_requests_enabled"] + model_registry_access_level = module.params["model_registry_access_level"] + monitor_access_level = module.params["monitor_access_level"] + only_allow_merge_if_all_discussions_are_resolved = module.params["only_allow_merge_if_all_discussions_are_resolved"] + only_allow_merge_if_pipeline_succeeds = module.params["only_allow_merge_if_pipeline_succeeds"] + packages_enabled = module.params["packages_enabled"] + pages_access_level = module.params["pages_access_level"] + project_description = module.params["description"] + project_name = module.params["name"] + project_path = module.params["path"] + releases_access_level = module.params["releases_access_level"] + remove_source_branch_after_merge = module.params["remove_source_branch_after_merge"] + repository_access_level = module.params["repository_access_level"] + security_and_compliance_access_level = module.params["security_and_compliance_access_level"] + service_desk_enabled = module.params["service_desk_enabled"] + shared_runners_enabled = module.params["shared_runners_enabled"] + snippets_enabled = module.params["snippets_enabled"] + squash_option = module.params["squash_option"] + state = module.params["state"] + topics = module.params["topics"] + username = module.params["username"] + visibility = module.params["visibility"] + wiki_enabled = module.params["wiki_enabled"] # Set project_path to project_name if it is empty. if project_path is None: @@ -742,58 +760,69 @@ def main(): module.fail_json(msg="Failed to find the namespace for the project") project_exists = gitlab_project.exists_project(namespace, project_path) - if state == 'absent': + if state == "absent": if project_exists: gitlab_project.delete_project() module.exit_json(changed=True, msg=f"Successfully deleted project {project_name}") module.exit_json(changed=False, msg="Project deleted or does not exist") - if state == 'present': - - if gitlab_project.create_or_update_project(module, project_name, namespace, { - "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, - "avatar_path": avatar_path, - "builds_access_level": builds_access_level, - "build_timeout": build_timeout, - "ci_config_path": ci_config_path, - "container_expiration_policy": container_expiration_policy, - "container_registry_access_level": container_registry_access_level, - "default_branch": default_branch, - "description": project_description, - "environments_access_level": environments_access_level, - "feature_flags_access_level": feature_flags_access_level, - "forking_access_level": forking_access_level, - "import_url": import_url, - "infrastructure_access_level": infrastructure_access_level, - "initialize_with_readme": initialize_with_readme, - "issues_access_level": issues_access_level, - "issues_enabled": issues_enabled, - "lfs_enabled": lfs_enabled, - "merge_method": merge_method, - "merge_requests_enabled": merge_requests_enabled, - "model_registry_access_level": model_registry_access_level, - "monitor_access_level": monitor_access_level, - "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, - "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, - "packages_enabled": packages_enabled, - "pages_access_level": pages_access_level, - "path": project_path, - "releases_access_level": releases_access_level, - "remove_source_branch_after_merge": remove_source_branch_after_merge, - "repository_access_level": repository_access_level, - "security_and_compliance_access_level": security_and_compliance_access_level, - "service_desk_enabled": service_desk_enabled, - "shared_runners_enabled": shared_runners_enabled, - "snippets_enabled": snippets_enabled, - "squash_option": squash_option, - "topics": topics, - "visibility": visibility, - "wiki_enabled": wiki_enabled, - }): - - module.exit_json(changed=True, msg=f"Successfully created or updated the project {project_name}", project=gitlab_project.project_object._attrs) - module.exit_json(changed=False, msg=f"No need to update the project {project_name}", project=gitlab_project.project_object._attrs) - - -if __name__ == '__main__': + if state == "present": + if gitlab_project.create_or_update_project( + module, + project_name, + namespace, + { + "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "build_timeout": build_timeout, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, + "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, + "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, + "packages_enabled": packages_enabled, + "pages_access_level": pages_access_level, + "path": project_path, + "releases_access_level": releases_access_level, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "repository_access_level": repository_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, + "service_desk_enabled": service_desk_enabled, + "shared_runners_enabled": shared_runners_enabled, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, + "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, + }, + ): + module.exit_json( + changed=True, + msg=f"Successfully created or updated the project {project_name}", + project=gitlab_project.project_object._attrs, + ) + module.exit_json( + changed=False, + msg=f"No need to update the project {project_name}", + project=gitlab_project.project_object._attrs, + ) + + +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py index 74102af3fd8..679e260db88 100644 --- a/plugins/modules/gitlab_project_access_token.py +++ b/plugins/modules/gitlab_project_access_token.py @@ -158,7 +158,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_project, gitlab_authentication, gitlab + auth_argument_spec, + find_project, + gitlab_authentication, + gitlab, ) ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) @@ -170,10 +173,11 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.access_token_object = None - ''' + """ @param project Project Object @param arguments Attributes of the access_token - ''' + """ + def create_access_token(self, project, arguments): changed = False if self._module.check_mode: @@ -182,17 +186,18 @@ def create_access_token(self, project, arguments): try: self.access_token_object = project.access_tokens.create(arguments) changed = True - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create access token: {e}") return changed - ''' + """ @param project Project object @param name of the access token - ''' + """ + def find_access_token(self, project, name): - access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, "revoked", False)] for access_token in access_tokens: if access_token.name == name: self.access_token_object = access_token @@ -207,19 +212,19 @@ def revoke_access_token(self): try: self.access_token_object.delete() changed = True - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to revoke access token: {e}") return changed def access_tokens_equal(self): - if self.access_token_object.name != self._module.params['name']: + if self.access_token_object.name != self._module.params["name"]: return False - if self.access_token_object.scopes != self._module.params['scopes']: + if self.access_token_object.scopes != self._module.params["scopes"]: return False - if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params["access_level"]]: return False - if self.access_token_object.expires_at != self._module.params['expires_at']: + if self.access_token_object.expires_at != self._module.params["expires_at"]: return False return True @@ -227,60 +232,66 @@ def access_tokens_equal(self): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default="present", choices=["absent", "present"]), - project=dict(type='str', required=True), - name=dict(type='str', required=True), - scopes=dict(type='list', - required=True, - aliases=['scope'], - elements='str', - choices=['api', - 'read_api', - 'read_registry', - 'write_registry', - 'read_repository', - 'write_repository', - 'create_runner', - 'manage_runner', - 'ai_features', - 'k8s_proxy', - 'self_rotate']), - access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), - expires_at=dict(type='str', required=True), - recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + name=dict(type="str", required=True), + scopes=dict( + type="list", + required=True, + aliases=["scope"], + elements="str", + choices=[ + "api", + "read_api", + "read_registry", + "write_registry", + "read_repository", + "write_repository", + "create_runner", + "manage_runner", + "ai_features", + "k8s_proxy", + "self_rotate", + ], + ), + access_level=dict( + type="str", + default="maintainer", + choices=["guest", "planner", "reporter", "developer", "maintainer", "owner"], + ), + expires_at=dict(type="str", required=True), + recreate=dict(type="str", default="never", choices=["never", "always", "state_change"]), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'] - ], - required_together=[ - ['api_username', 'api_password'] - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], - supports_check_mode=True + required_together=[["api_username", "api_password"]], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) - state = module.params['state'] - project_identifier = module.params['project'] - name = module.params['name'] - scopes = module.params['scopes'] - access_level_str = module.params['access_level'] - expires_at = module.params['expires_at'] - recreate = module.params['recreate'] + state = module.params["state"] + project_identifier = module.params["project"] + name = module.params["name"] + scopes = module.params["scopes"] + access_level_str = module.params["access_level"] + expires_at = module.params["expires_at"] + recreate = module.params["recreate"] access_level = ACCESS_LEVELS[access_level_str] try: - datetime.strptime(expires_at, '%Y-%m-%d') + datetime.strptime(expires_at, "%Y-%m-%d") except ValueError: module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") @@ -297,36 +308,62 @@ def main(): if gitlab_access_token.access_token_object is not None: gitlab_access_token_exists = True - if state == 'absent': + if state == "absent": if gitlab_access_token_exists: gitlab_access_token.revoke_access_token() module.exit_json(changed=True, msg=f"Successfully deleted access token {name}") else: module.exit_json(changed=False, msg="Access token does not exists") - if state == 'present': + if state == "present": if gitlab_access_token_exists: if gitlab_access_token.access_tokens_equal(): - if recreate == 'always': + if recreate == "always": gitlab_access_token.revoke_access_token() - gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + gitlab_access_token.create_access_token( + project, + {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at}, + ) + module.exit_json( + changed=True, + msg="Successfully recreated access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + module.exit_json( + changed=False, + msg="Access token already exists", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - if recreate == 'never': - module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + if recreate == "never": + module.fail_json( + msg="Access token already exists and its state is different. It can not be updated without recreating." + ) else: gitlab_access_token.revoke_access_token() - gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) - module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + gitlab_access_token.create_access_token( + project, + {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at}, + ) + module.exit_json( + changed=True, + msg="Successfully recreated access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) else: - gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + gitlab_access_token.create_access_token( + project, {"name": name, "scopes": scopes, "access_level": access_level, "expires_at": expires_at} + ) if module.check_mode: module.exit_json(changed=True, msg="Successfully created access token", access_token={}) else: - module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + module.exit_json( + changed=True, + msg="Successfully created access token", + access_token=gitlab_access_token.access_token_object._attrs, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_project_badge.py b/plugins/modules/gitlab_project_badge.py index e3e301dcc5b..b10198142a4 100644 --- a/plugins/modules/gitlab_project_badge.py +++ b/plugins/modules/gitlab_project_badge.py @@ -94,7 +94,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, find_project, list_all_kwargs + auth_argument_spec, + gitlab_authentication, + find_project, + list_all_kwargs, ) @@ -149,18 +152,15 @@ def absent_strategy(module, gl, project, wished_badge): return changed, None -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): # check prerequisites and connect to gitlab server gl = gitlab_authentication(module) - gitlab_project = module.params['project'] - state = module.params['state'] + gitlab_project = module.params["project"] + state = module.params["state"] project = find_project(gl, gitlab_project) # project doesn't exist @@ -180,27 +180,29 @@ def core(module): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - project=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - link_url=dict(type='str', required=True), - image_url=dict(type='str', required=True), - )) + argument_spec.update( + dict( + project=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + link_url=dict(type="str", required=True), + image_url=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], + ["api_username", "api_password"], ], required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ["api_username", "api_token", "api_oauth_token", "api_job_token"], ], supports_check_mode=True, ) @@ -208,5 +210,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py index 6a89c1af6fe..88a5a91053d 100644 --- a/plugins/modules/gitlab_project_members.py +++ b/plugins/modules/gitlab_project_members.py @@ -162,7 +162,9 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab + auth_argument_spec, + gitlab_authentication, + gitlab, ) @@ -211,8 +213,7 @@ def is_user_a_member(self, members, gitlab_user_id): # add user to a project def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): project = self._gitlab.projects.get(gitlab_project_id) - add_member = project.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) + add_member = project.members.create({"user_id": gitlab_user_id, "access_level": access_level}) # remove user from a project def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): @@ -236,45 +237,49 @@ def update_user_access_level(self, members, gitlab_user_id, access_level): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - project=dict(type='str', required=True), - gitlab_user=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), - purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer', 'owner']), - gitlab_users_access=dict( - type='list', - elements='dict', - options=dict( - name=dict(type='str', required=True), - access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), - ) - ), - )) + argument_spec.update( + dict( + project=dict(type="str", required=True), + gitlab_user=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent"]), + access_level=dict(type="str", choices=["guest", "reporter", "developer", "maintainer", "owner"]), + purge_users=dict( + type="list", elements="str", choices=["guest", "reporter", "developer", "maintainer", "owner"] + ), + gitlab_users_access=dict( + type="list", + elements="dict", + options=dict( + name=dict(type="str", required=True), + access_level=dict( + type="str", choices=["guest", "reporter", "developer", "maintainer", "owner"], required=True + ), + ), + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], - ['access_level', 'gitlab_users_access'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["gitlab_user", "gitlab_users_access"], + ["access_level", "gitlab_users_access"], ], required_together=[ - ['api_username', 'api_password'], - ['gitlab_user', 'access_level'], + ["api_username", "api_password"], + ["gitlab_user", "access_level"], ], required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ['gitlab_user', 'gitlab_users_access'], + ["api_username", "api_token", "api_oauth_token", "api_job_token"], + ["gitlab_user", "gitlab_users_access"], ], required_if=[ - ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ["state", "present", ["access_level", "gitlab_users_access"], True], ], supports_check_mode=True, ) @@ -283,17 +288,17 @@ def main(): gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.const.GUEST_ACCESS, - 'reporter': gitlab.const.REPORTER_ACCESS, - 'developer': gitlab.const.DEVELOPER_ACCESS, - 'maintainer': gitlab.const.MAINTAINER_ACCESS, - 'owner': gitlab.const.OWNER_ACCESS, + "guest": gitlab.const.GUEST_ACCESS, + "reporter": gitlab.const.REPORTER_ACCESS, + "developer": gitlab.const.DEVELOPER_ACCESS, + "maintainer": gitlab.const.MAINTAINER_ACCESS, + "owner": gitlab.const.OWNER_ACCESS, } - gitlab_project = module.params['project'] - state = module.params['state'] - access_level = module.params['access_level'] - purge_users = module.params['purge_users'] + gitlab_project = module.params["project"] + state = module.params["state"] + access_level = module.params["access_level"] + purge_users = module.params["purge_users"] if purge_users: purge_users = [access_level_int[level] for level in purge_users] @@ -307,29 +312,32 @@ def main(): module.fail_json(msg=f"project '{gitlab_project}' not found.") members = [] - if module.params['gitlab_user'] is not None: + if module.params["gitlab_user"] is not None: gitlab_users_access = [] - gitlab_users = module.params['gitlab_user'] + gitlab_users = module.params["gitlab_user"] for gl_user in gitlab_users: gitlab_users_access.append( - {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) - elif module.params['gitlab_users_access'] is not None: - gitlab_users_access = module.params['gitlab_users_access'] + {"name": gl_user, "access_level": access_level_int[access_level] if access_level else None} + ) + elif module.params["gitlab_users_access"] is not None: + gitlab_users_access = module.params["gitlab_users_access"] for user_level in gitlab_users_access: - user_level['access_level'] = access_level_int[user_level['access_level']] + user_level["access_level"] = access_level_int[user_level["access_level"]] if len(gitlab_users_access) == 1 and not purge_users: # only single user given - members = [project.get_member_in_a_project( - gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] + members = [ + project.get_member_in_a_project(gitlab_project_id, project.get_user_id(gitlab_users_access[0]["name"])) + ] if members[0] is None: members = [] elif len(gitlab_users_access) > 1 or purge_users: # list of users given members = project.get_members_in_a_project(gitlab_project_id) else: - module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", - result_data=[]) + module.exit_json( + changed="OK", result="Nothing to do, please give at least one user or set purge_users true.", result_data=[] + ) changed = False error = False @@ -337,67 +345,115 @@ def main(): changed_data = [] for gitlab_user in gitlab_users_access: - gitlab_user_id = project.get_user_id(gitlab_user['name']) + gitlab_user_id = project.get_user_id(gitlab_user["name"]) # user doesn't exist if not gitlab_user_id: - if state == 'absent': + if state == "absent": changed_users.append(f"user '{gitlab_user['name']}' not found, and thus also not part of the project") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"user '{gitlab_user['name']}' not found, and thus also not part of the project"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"user '{gitlab_user['name']}' not found, and thus also not part of the project", + } + ) else: error = True changed_users.append(f"user '{gitlab_user['name']}' not found.") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"user '{gitlab_user['name']}' not found."}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"user '{gitlab_user['name']}' not found.", + } + ) continue is_user_a_member = project.is_user_a_member(members, gitlab_user_id) # check if the user is a member in the project if not is_user_a_member: - if state == 'present': + if state == "present": # add user to the project try: if not module.check_mode: - project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) + project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user["access_level"]) changed = True changed_users.append(f"Successfully added user '{gitlab_user['name']}' to project") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully added user '{gitlab_user['name']}' to project"}) - except (gitlab.exceptions.GitlabCreateError) as e: + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully added user '{gitlab_user['name']}' to project", + } + ) + except gitlab.exceptions.GitlabCreateError as e: error = True changed_users.append(f"Failed to updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Not allowed to add the access level for the member, {gitlab_user['name']}: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Not allowed to add the access level for the member, {gitlab_user['name']}: {e}", + } + ) # state as absent else: - changed_users.append(f"User, '{gitlab_user['name']}', is not a member in the project. No change to report") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"User, '{gitlab_user['name']}', is not a member in the project. No change to report"}) + changed_users.append( + f"User, '{gitlab_user['name']}', is not a member in the project. No change to report" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"User, '{gitlab_user['name']}', is not a member in the project. No change to report", + } + ) # in case that a user is a member else: - if state == 'present': + if state == "present": # compare the access level user_access_level = project.get_user_access_level(members, gitlab_user_id) - if user_access_level == gitlab_user['access_level']: - changed_users.append(f"User, '{gitlab_user['name']}', is already a member in the project. No change to report") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', - 'msg': f"User, '{gitlab_user['name']}', is already a member in the project. No change to report"}) + if user_access_level == gitlab_user["access_level"]: + changed_users.append( + f"User, '{gitlab_user['name']}', is already a member in the project. No change to report" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "OK", + "msg": f"User, '{gitlab_user['name']}', is already a member in the project. No change to report", + } + ) else: # update the access level for the user try: if not module.check_mode: - project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + project.update_user_access_level(members, gitlab_user_id, gitlab_user["access_level"]) changed = True - changed_users.append(f"Successfully updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully updated the access level for the user, '{gitlab_user['name']}'"}) - except (gitlab.exceptions.GitlabUpdateError) as e: + changed_users.append( + f"Successfully updated the access level for the user, '{gitlab_user['name']}'" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully updated the access level for the user, '{gitlab_user['name']}'", + } + ) + except gitlab.exceptions.GitlabUpdateError as e: error = True - changed_users.append(f"Failed to updated the access level for the user, '{gitlab_user['name']}'") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Not allowed to update the access level for the member, {gitlab_user['name']}: {e}"}) + changed_users.append( + f"Failed to updated the access level for the user, '{gitlab_user['name']}'" + ) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Not allowed to update the access level for the member, {gitlab_user['name']}: {e}", + } + ) else: # remove the user from the project try: @@ -405,44 +461,70 @@ def main(): project.remove_user_from_project(gitlab_user_id, gitlab_project_id) changed = True changed_users.append(f"Successfully removed user, '{gitlab_user['name']}', from the project") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', - 'msg': f"Successfully removed user, '{gitlab_user['name']}', from the project"}) - except (gitlab.exceptions.GitlabDeleteError) as e: + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "CHANGED", + "msg": f"Successfully removed user, '{gitlab_user['name']}', from the project", + } + ) + except gitlab.exceptions.GitlabDeleteError as e: error = True changed_users.append(f"Failed to removed user, '{gitlab_user['name']}', from the project") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Failed to remove user, '{gitlab_user['name']}' from the project: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Failed to remove user, '{gitlab_user['name']}' from the project: {e}", + } + ) # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users - if state == 'present' and purge_users: + if state == "present" and purge_users: uppercase_names_in_gitlab_users_access = [] for name in gitlab_users_access: - uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + uppercase_names_in_gitlab_users_access.append(name["name"].upper()) for member in members: - if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + if ( + member.access_level in purge_users + and member.username.upper() not in uppercase_names_in_gitlab_users_access + ): try: if not module.check_mode: project.remove_user_from_project(member.id, gitlab_project_id) changed = True - changed_users.append(f"Successfully removed user '{member.username}', from project. Was not in given list") - changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', - 'msg': f"Successfully removed user '{member.username}', from project. Was not in given list"}) - except (gitlab.exceptions.GitlabDeleteError) as e: + changed_users.append( + f"Successfully removed user '{member.username}', from project. Was not in given list" + ) + changed_data.append( + { + "gitlab_user": member.username, + "result": "CHANGED", + "msg": f"Successfully removed user '{member.username}', from project. Was not in given list", + } + ) + except gitlab.exceptions.GitlabDeleteError as e: error = True changed_users.append(f"Failed to removed user, '{gitlab_user['name']}', from the project") - changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', - 'msg': f"Failed to remove user, '{gitlab_user['name']}' from the project: {e}"}) + changed_data.append( + { + "gitlab_user": gitlab_user["name"], + "result": "FAILED", + "msg": f"Failed to remove user, '{gitlab_user['name']}' from the project: {e}", + } + ) if len(gitlab_users_access) == 1 and error: # if single user given and an error occurred return error for list errors will be per user module.fail_json(msg=f"FAILED: '{changed_users[0]} '", result_data=changed_data) elif error: - module.fail_json( - msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + module.fail_json(msg="FAILED: At least one given user/permission could not be set", result_data=changed_data) - module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + module.exit_json( + changed=changed, msg="Successfully set memberships", result="\n".join(changed_users), result_data=changed_data + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py index ccc6e9cbf8f..2fded1eb595 100644 --- a/plugins/modules/gitlab_project_variable.py +++ b/plugins/modules/gitlab_project_variable.py @@ -239,16 +239,18 @@ from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, - list_all_kwargs + auth_argument_spec, + gitlab_authentication, + filter_returned_variables, + vars_to_variables, + list_all_kwargs, ) class GitlabProjectVariables: - def __init__(self, module, gitlab_instance): self.repo = gitlab_instance - self.project = self.get_project(module.params['project']) + self.project = self.get_project(module.params["project"]) self._module = module def get_project(self, project_name): @@ -262,18 +264,18 @@ def create_variable(self, var_obj): return True var = { - "key": var_obj.get('key'), - "value": var_obj.get('value'), - "description": var_obj.get('description'), - "masked": var_obj.get('masked'), - "masked_and_hidden": var_obj.get('hidden'), - "protected": var_obj.get('protected'), - "raw": var_obj.get('raw'), - "variable_type": var_obj.get('variable_type'), + "key": var_obj.get("key"), + "value": var_obj.get("value"), + "description": var_obj.get("description"), + "masked": var_obj.get("masked"), + "masked_and_hidden": var_obj.get("hidden"), + "protected": var_obj.get("protected"), + "raw": var_obj.get("raw"), + "variable_type": var_obj.get("variable_type"), } - if var_obj.get('environment_scope') is not None: - var["environment_scope"] = var_obj.get('environment_scope') + if var_obj.get("environment_scope") is not None: + var["environment_scope"] = var_obj.get("environment_scope") self.project.variables.create(var) return True @@ -288,7 +290,9 @@ def update_variable(self, var_obj): def delete_variable(self, var_obj): if self._module.check_mode: return True - self.project.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + self.project.variables.delete( + var_obj.get("key"), filter={"environment_scope": var_obj.get("environment_scope")} + ) return True @@ -304,16 +308,16 @@ def compare(requested_variables, existing_variables, state): updated = list() added = list() - if state == 'present': + if state == "present": existing_key_scope_vars = list() for item in existing_variables: - existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + existing_key_scope_vars.append({"key": item.get("key"), "environment_scope": item.get("environment_scope")}) for var in requested_variables: if var in existing_variables: untouched.append(var) else: - compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + compare_item = {"key": var.get("name"), "environment_scope": var.get("environment_scope")} if compare_item in existing_key_scope_vars: updated.append(var) else: @@ -323,7 +327,6 @@ def compare(requested_variables, existing_variables, state): def native_python_main(this_gitlab, purge, requested_variables, state, module): - change = False return_value = dict(added=[], updated=[], removed=[], untouched=[]) @@ -335,34 +338,34 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): # filter out and enrich before compare for item in requested_variables: - item['key'] = item.pop('name') - item['value'] = str(item.get('value')) - if item.get('protected') is None: - item['protected'] = False - if item.get('raw') is None: - item['raw'] = False - if item.get('masked') is None: - item['masked'] = False - if item.get('hidden') is None: - item['hidden'] = False - if item.get('environment_scope') is None: - item['environment_scope'] = '*' - if item.get('variable_type') is None: - item['variable_type'] = 'env_var' + item["key"] = item.pop("name") + item["value"] = str(item.get("value")) + if item.get("protected") is None: + item["protected"] = False + if item.get("raw") is None: + item["raw"] = False + if item.get("masked") is None: + item["masked"] = False + if item.get("hidden") is None: + item["hidden"] = False + if item.get("environment_scope") is None: + item["environment_scope"] = "*" + if item.get("variable_type") is None: + item["variable_type"] = "env_var" if module.check_mode: untouched, updated, added = compare(requested_variables, existing_variables, state) - if state == 'present': + if state == "present": add_or_update = [x for x in requested_variables if x not in existing_variables] for item in add_or_update: try: if this_gitlab.create_variable(item): - return_value['added'].append(item) + return_value["added"].append(item) except Exception: if this_gitlab.update_variable(item): - return_value['updated'].append(item) + return_value["updated"].append(item) if purge: # refetch and filter @@ -372,11 +375,11 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove = [x for x in existing_variables if x not in requested_variables] for item in remove: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) - elif state == 'absent': + elif state == "absent": # value, type, and description do not matter on removing variables. - keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + keys_ignored_on_deletion = ["value", "variable_type", "description"] for key in keys_ignored_on_deletion: for item in existing_variables: item.pop(key) @@ -387,17 +390,17 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): remove_requested = [x for x in requested_variables if x in existing_variables] for item in remove_requested: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) else: for item in existing_variables: if this_gitlab.delete_variable(item): - return_value['removed'].append(item) + return_value["removed"].append(item) if module.check_mode: - return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + return_value = dict(added=added, updated=updated, removed=return_value["removed"], untouched=untouched) - if any(return_value[x] for x in ['added', 'removed', 'updated']): + if any(return_value[x] for x in ["added", "removed", "updated"]): change = True gitlab_keys = this_gitlab.list_all_project_variables() @@ -410,59 +413,62 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=True), - purge=dict(type='bool', default=False), - vars=dict(type='dict', default=dict(), no_log=True), + project=dict(type="str", required=True), + purge=dict(type="bool", default=False), + vars=dict(type="dict", default=dict(), no_log=True), # please mind whenever changing the variables dict to also change module_utils/gitlab.py's # KNOWN dict in filter_returned_variables or bad evil will happen - variables=dict(type='list', elements='dict', default=list(), options=dict( - name=dict(type='str', required=True), - value=dict(type='str', no_log=True), - description=dict(type='str'), - masked=dict(type='bool', default=False), - hidden=dict(type='bool', default=False), - protected=dict(type='bool', default=False), - raw=dict(type='bool', default=False), - environment_scope=dict(type='str', default='*'), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]), - )), - state=dict(type='str', default="present", choices=["absent", "present"]), + variables=dict( + type="list", + elements="dict", + default=list(), + options=dict( + name=dict(type="str", required=True), + value=dict(type="str", no_log=True), + description=dict(type="str"), + masked=dict(type="bool", default=False), + hidden=dict(type="bool", default=False), + protected=dict(type="bool", default=False), + raw=dict(type="bool", default=False), + environment_scope=dict(type="str", default="*"), + variable_type=dict(type="str", default="env_var", choices=["env_var", "file"]), + ), + ), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['vars', 'variables'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["vars", "variables"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - purge = module.params['purge'] - var_list = module.params['vars'] - state = module.params['state'] + purge = module.params["purge"] + var_list = module.params["vars"] + state = module.params["state"] if var_list: variables = vars_to_variables(var_list, module) else: - variables = module.params['variables'] + variables = module.params["variables"] - if state == 'present': - if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required for all variables in state present') + if state == "present": + if any(x["value"] is None for x in variables): + module.fail_json(msg="value parameter is required for all variables in state present") this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) @@ -470,25 +476,25 @@ def main(): # postprocessing for item in after: - item.pop('project_id') - item['name'] = item.pop('key') + item.pop("project_id") + item["name"] = item.pop("key") for item in before: - item.pop('project_id') - item['name'] = item.pop('key') + item.pop("project_id") + item["name"] = item.pop("key") - untouched_key_name = 'key' + untouched_key_name = "key" if not module.check_mode: - untouched_key_name = 'name' - raw_return_value['untouched'] = [x for x in before if x in after] + untouched_key_name = "name" + raw_return_value["untouched"] = [x for x in before if x in after] - added = [x.get('key') for x in raw_return_value['added']] - updated = [x.get('key') for x in raw_return_value['updated']] - removed = [x.get('key') for x in raw_return_value['removed']] - untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + added = [x.get("key") for x in raw_return_value["added"]] + updated = [x.get("key") for x in raw_return_value["updated"]] + removed = [x.get("key") for x in raw_return_value["removed"]] + untouched = [x.get(untouched_key_name) for x in raw_return_value["untouched"]] return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) module.exit_json(changed=change, project_variable=return_value) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py index fbd0ac3cf2e..c17b41959c0 100644 --- a/plugins/modules/gitlab_protected_branch.py +++ b/plugins/modules/gitlab_protected_branch.py @@ -89,20 +89,21 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab + auth_argument_spec, + gitlab_authentication, + gitlab, ) class GitlabProtectedBranch: - def __init__(self, module, project, gitlab_instance): self.repo = gitlab_instance self._module = module self.project = self.get_project(project) self.ACCESS_LEVEL = { - 'nobody': gitlab.const.NO_ACCESS, - 'developer': gitlab.const.DEVELOPER_ACCESS, - 'maintainer': gitlab.const.MAINTAINER_ACCESS + "nobody": gitlab.const.NO_ACCESS, + "developer": gitlab.const.DEVELOPER_ACCESS, + "maintainer": gitlab.const.MAINTAINER_ACCESS, } def get_project(self, project_name): @@ -116,9 +117,9 @@ def protected_branch_exist(self, name): def create_or_update_protected_branch(self, name, options): protected_branch_options = { - 'name': name, - 'allow_force_push': options['allow_force_push'], - 'code_owner_approval_required': options['code_owner_approval_required'], + "name": name, + "allow_force_push": options["allow_force_push"], + "code_owner_approval_required": options["code_owner_approval_required"], } protected_branch = self.protected_branch_exist(name=name) changed = False @@ -132,8 +133,8 @@ def create_or_update_protected_branch(self, name, options): protected_branch.save() else: # Set immutable options only on (re)creation - protected_branch_options['merge_access_level'] = options['merge_access_levels'] - protected_branch_options['push_access_level'] = options['push_access_level'] + protected_branch_options["merge_access_level"] = options["merge_access_levels"] + protected_branch_options["push_access_level"] = options["push_access_level"] if protected_branch: # Exists, but couldn't update. So, delete first self.delete_protected_branch(name) @@ -145,12 +146,13 @@ def create_or_update_protected_branch(self, name, options): def can_update(self, protected_branch, options): # these keys are not set on update the same way they are on creation - configured_merge = options['merge_access_levels'] - configured_push = options['push_access_level'] - current_merge = protected_branch.merge_access_levels[0]['access_level'] - current_push = protected_branch.push_access_levels[0]['access_level'] - return ((configured_merge is None or current_merge == configured_merge) and - (configured_push is None or current_push == configured_push)) + configured_merge = options["merge_access_levels"] + configured_push = options["push_access_level"] + current_merge = protected_branch.merge_access_levels[0]["access_level"] + current_push = protected_branch.push_access_levels[0]["access_level"] + return (configured_merge is None or current_merge == configured_merge) and ( + configured_push is None or current_push == configured_push + ) def delete_protected_branch(self, name): if self._module.check_mode: @@ -162,47 +164,45 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update( - project=dict(type='str', required=True), - name=dict(type='str', required=True), - merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), - allow_force_push=dict(type='bool'), - code_owner_approval_required=dict(type='bool'), - state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type="str", required=True), + name=dict(type="str", required=True), + merge_access_levels=dict(type="str", default="maintainer", choices=["maintainer", "developer", "nobody"]), + push_access_level=dict(type="str", default="maintainer", choices=["maintainer", "developer", "nobody"]), + allow_force_push=dict(type="bool"), + code_owner_approval_required=dict(type="bool"), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], - supports_check_mode=True + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], + supports_check_mode=True, ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - project = module.params['project'] - name = module.params['name'] - merge_access_levels = module.params['merge_access_levels'] - push_access_level = module.params['push_access_level'] - state = module.params['state'] + project = module.params["project"] + name = module.params["name"] + merge_access_levels = module.params["merge_access_levels"] + push_access_level = module.params["push_access_level"] + state = module.params["state"] gitlab_version = gitlab.__version__ - if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + if LooseVersion(gitlab_version) < LooseVersion("2.3.0"): module.fail_json( msg=f"community.general.gitlab_protected_branch requires python-gitlab Python module >= 2.3.0 (installed version: [{gitlab_version}])." - " Please upgrade python-gitlab to version 2.3.0 or above." + " Please upgrade python-gitlab to version 2.3.0 or above." ) this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) @@ -223,5 +223,5 @@ def main(): module.exit_json(changed=False, msg="No changes are needed.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index ed39fa76a85..69eea8dbee5 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -261,7 +261,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs + auth_argument_spec, + gitlab_authentication, + gitlab, + list_all_kwargs, ) @@ -281,7 +284,7 @@ def __init__(self, module, gitlab_instance, group=None, project=None): self._runners_endpoint = project.runners.list elif group: self._runners_endpoint = group.runners.list - elif module.params['owned']: + elif module.params["owned"]: self._runners_endpoint = gitlab_instance.runners.list else: self._runners_endpoint = gitlab_instance.runners.all @@ -290,36 +293,36 @@ def create_or_update_runner(self, description, options): changed = False arguments = { - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'tag_list': options['tag_list'], + "locked": options["locked"], + "run_untagged": options["run_untagged"], + "maximum_timeout": options["maximum_timeout"], + "tag_list": options["tag_list"], } - if options.get('paused') is not None: - arguments['paused'] = options['paused'] + if options.get("paused") is not None: + arguments["paused"] = options["paused"] else: - arguments['active'] = options['active'] + arguments["active"] = options["active"] - if options.get('access_level') is not None: - arguments['access_level'] = options['access_level'] + if options.get("access_level") is not None: + arguments["access_level"] = options["access_level"] # Because we have already call userExists in main() if self.runner_object is None: - arguments['description'] = description - if options.get('registration_token') is not None: - arguments['token'] = options['registration_token'] - elif options.get('group') is not None: - arguments['runner_type'] = 'group_type' - arguments['group_id'] = options['group'] - elif options.get('project') is not None: - arguments['runner_type'] = 'project_type' - arguments['project_id'] = options['project'] + arguments["description"] = description + if options.get("registration_token") is not None: + arguments["token"] = options["registration_token"] + elif options.get("group") is not None: + arguments["runner_type"] = "group_type" + arguments["group_id"] = options["group"] + elif options.get("project") is not None: + arguments["runner_type"] = "project_type" + arguments["project_id"] = options["project"] else: - arguments['runner_type'] = 'instance_type' + arguments["runner_type"] = "instance_type" - access_level_on_creation = self._module.params['access_level_on_creation'] + access_level_on_creation = self._module.params["access_level_on_creation"] if not access_level_on_creation: - arguments.pop('access_level', None) + arguments.pop("access_level", None) runner = self.create_runner(arguments) changed = True @@ -337,32 +340,36 @@ def create_or_update_runner(self, description, options): self.runner_object = runner return changed - ''' + """ @param arguments Attributes of the runner - ''' + """ + def create_runner(self, arguments): if self._module.check_mode: + class MockRunner: def __init__(self): self._attrs = {} + return MockRunner() try: - if arguments.get('token') is not None: + if arguments.get("token") is not None: runner = self._gitlab.runners.create(arguments) - elif LooseVersion(gitlab.__version__) < LooseVersion('4.0.0'): + elif LooseVersion(gitlab.__version__) < LooseVersion("4.0.0"): self._module.fail_json(msg="New runner creation workflow requires python-gitlab 4.0.0 or higher") else: runner = self._gitlab.user.runners.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create runner: {e}") return runner - ''' + """ @param runner Runner object @param arguments Attributes of the runner - ''' + """ + def update_runner(self, runner, arguments): changed = False @@ -383,9 +390,10 @@ def update_runner(self, runner, arguments): return (changed, runner) - ''' + """ @param description Description of the runner - ''' + """ + def find_runner(self, description): runners = self._runners_endpoint(**list_all_kwargs) @@ -396,12 +404,13 @@ def find_runner(self, description): if runner.description == description: return self._gitlab.runners.get(runner.id) else: - if runner['description'] == description: - return self._gitlab.runners.get(runner['id']) + if runner["description"] == description: + return self._gitlab.runners.get(runner["id"]) - ''' + """ @param description Description of the runner - ''' + """ + def exists_runner(self, description): # When runner exists, object will be stored in self.runner_object. runner = self.find_runner(description) @@ -423,41 +432,43 @@ def delete_runner(self): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - description=dict(type='str', required=True, aliases=["name"]), - active=dict(type='bool', default=True), - paused=dict(type='bool', default=False), - owned=dict(type='bool', default=False), - tag_list=dict(type='list', elements='str', default=[]), - run_untagged=dict(type='bool', default=True), - locked=dict(type='bool', default=False), - access_level=dict(type='str', choices=["not_protected", "ref_protected"]), - access_level_on_creation=dict(type='bool', default=True), - maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', no_log=True), - project=dict(type='str'), - group=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - )) + argument_spec.update( + dict( + description=dict(type="str", required=True, aliases=["name"]), + active=dict(type="bool", default=True), + paused=dict(type="bool", default=False), + owned=dict(type="bool", default=False), + tag_list=dict(type="list", elements="str", default=[]), + run_untagged=dict(type="bool", default=True), + locked=dict(type="bool", default=False), + access_level=dict(type="str", choices=["not_protected", "ref_protected"]), + access_level_on_creation=dict(type="bool", default=True), + maximum_timeout=dict(type="int", default=3600), + registration_token=dict(type="str", no_log=True), + project=dict(type="str"), + group=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['project', 'owned'], - ['group', 'owned'], - ['project', 'group'], - ['active', 'paused'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], + ["project", "owned"], + ["group", "owned"], + ["project", "group"], + ["active", "paused"], ], required_together=[ - ['api_username', 'api_password'], + ["api_username", "api_password"], ], required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ["api_username", "api_token", "api_oauth_token", "api_job_token"], ], supports_check_mode=True, ) @@ -465,18 +476,18 @@ def main(): # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - state = module.params['state'] - runner_description = module.params['description'] - runner_active = module.params['active'] - runner_paused = module.params['paused'] - tag_list = module.params['tag_list'] - run_untagged = module.params['run_untagged'] - runner_locked = module.params['locked'] - access_level = module.params['access_level'] - maximum_timeout = module.params['maximum_timeout'] - registration_token = module.params['registration_token'] - project = module.params['project'] - group = module.params['group'] + state = module.params["state"] + runner_description = module.params["description"] + runner_active = module.params["active"] + runner_paused = module.params["paused"] + tag_list = module.params["tag_list"] + run_untagged = module.params["run_untagged"] + runner_locked = module.params["locked"] + access_level = module.params["access_level"] + maximum_timeout = module.params["maximum_timeout"] + registration_token = module.params["registration_token"] + project = module.params["project"] + group = module.params["group"] gitlab_project = None gitlab_group = None @@ -485,24 +496,24 @@ def main(): try: gitlab_project = gitlab_instance.projects.get(project) except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg=f'No such a project {project}', exception=e) + module.fail_json(msg=f"No such a project {project}", exception=e) elif group: try: gitlab_group = gitlab_instance.groups.get(group) except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg=f'No such a group {group}', exception=e) + module.fail_json(msg=f"No such a group {group}", exception=e) gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project) runner_exists = gitlab_runner.exists_runner(runner_description) - if state == 'absent': + if state == "absent": if runner_exists: gitlab_runner.delete_runner() module.exit_json(changed=True, msg=f"Successfully deleted runner {runner_description}") else: module.exit_json(changed=False, msg="Runner deleted or does not exists") - if state == 'present': + if state == "present": runner_values = { "active": runner_active, "tag_list": tag_list, @@ -518,12 +529,18 @@ def main(): # the paused attribute for runners is available since 14.8 runner_values["paused"] = runner_paused if gitlab_runner.create_or_update_runner(runner_description, runner_values): - module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, - msg=f"Successfully created or updated the runner {runner_description}") + module.exit_json( + changed=True, + runner=gitlab_runner.runner_object._attrs, + msg=f"Successfully created or updated the runner {runner_description}", + ) else: - module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, - msg=f"No need to update the runner {runner_description}") + module.exit_json( + changed=False, + runner=gitlab_runner.runner_object._attrs, + msg=f"No need to update the runner {runner_description}", + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py index 98289814312..54cb27dafd5 100644 --- a/plugins/modules/gitlab_user.py +++ b/plugins/modules/gitlab_user.py @@ -221,7 +221,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, gitlab_authentication, gitlab, list_all_kwargs + auth_argument_spec, + find_group, + gitlab_authentication, + gitlab, + list_all_kwargs, ) @@ -231,63 +235,64 @@ def __init__(self, module, gitlab_instance): self._gitlab = gitlab_instance self.user_object = None self.ACCESS_LEVEL = { - 'guest': gitlab.const.GUEST_ACCESS, - 'reporter': gitlab.const.REPORTER_ACCESS, - 'developer': gitlab.const.DEVELOPER_ACCESS, - 'master': gitlab.const.MAINTAINER_ACCESS, - 'maintainer': gitlab.const.MAINTAINER_ACCESS, - 'owner': gitlab.const.OWNER_ACCESS, + "guest": gitlab.const.GUEST_ACCESS, + "reporter": gitlab.const.REPORTER_ACCESS, + "developer": gitlab.const.DEVELOPER_ACCESS, + "master": gitlab.const.MAINTAINER_ACCESS, + "maintainer": gitlab.const.MAINTAINER_ACCESS, + "owner": gitlab.const.OWNER_ACCESS, } - ''' + """ @param username Username of the user @param options User options - ''' + """ + def create_or_update_user(self, username, options): changed = False potentionally_changed = False # Because we have already call userExists in main() if self.user_object is None: - user = self.create_user({ - 'name': options['name'], - 'username': username, - 'password': options['password'], - 'reset_password': options['reset_password'], - 'email': options['email'], - 'skip_confirmation': not options['confirm'], - 'admin': options['isadmin'], - 'external': options['external'], - 'identities': options['identities'], - }) + user = self.create_user( + { + "name": options["name"], + "username": username, + "password": options["password"], + "reset_password": options["reset_password"], + "email": options["email"], + "skip_confirmation": not options["confirm"], + "admin": options["isadmin"], + "external": options["external"], + "identities": options["identities"], + } + ) changed = True else: changed, user = self.update_user( - self.user_object, { + self.user_object, + { # add "normal" parameters here, put uncheckable # params in the dict below - 'name': {'value': options['name']}, - 'email': {'value': options['email']}, - + "name": {"value": options["name"]}, + "email": {"value": options["email"]}, # note: for some attributes like this one the key # from reading back from server is unfortunately # different to the one needed for pushing/writing, # in that case use the optional setter key - 'is_admin': { - 'value': options['isadmin'], 'setter': 'admin' - }, - 'external': {'value': options['external']}, - 'identities': {'value': options['identities']}, + "is_admin": {"value": options["isadmin"], "setter": "admin"}, + "external": {"value": options["external"]}, + "identities": {"value": options["identities"]}, }, { # put "uncheckable" params here, this means params # which the gitlab does accept for setting but does # not return any information about it - 'skip_reconfirmation': {'value': not options['confirm']}, - 'password': {'value': options['password']}, - 'reset_password': {'value': options['reset_password']}, - 'overwrite_identities': {'value': options['overwrite_identities']}, - } + "skip_reconfirmation": {"value": not options["confirm"]}, + "password": {"value": options["password"]}, + "reset_password": {"value": options["reset_password"]}, + "overwrite_identities": {"value": options["overwrite_identities"]}, + }, ) # note: as we unfortunately have some uncheckable parameters @@ -297,16 +302,20 @@ def create_or_update_user(self, username, options): potentionally_changed = True # Assign ssh keys - if options['sshkey_name'] and options['sshkey_file']: - key_changed = self.add_ssh_key_to_user(user, { - 'name': options['sshkey_name'], - 'file': options['sshkey_file'], - 'expires_at': options['sshkey_expires_at']}) + if options["sshkey_name"] and options["sshkey_file"]: + key_changed = self.add_ssh_key_to_user( + user, + { + "name": options["sshkey_name"], + "file": options["sshkey_file"], + "expires_at": options["sshkey_expires_at"], + }, + ) changed = changed or key_changed # Assign group - if options['group_path']: - group_changed = self.assign_user_to_group(user, options['group_path'], options['access_level']) + if options["group_path"]: + group_changed = self.assign_user_to_group(user, options["group_path"], options["access_level"]) changed = changed or group_changed self.user_object = user @@ -323,50 +332,51 @@ def create_or_update_user(self, username, options): else: return False - ''' + """ @param group User object - ''' + """ + def get_user_id(self, user): if user is not None: return user.id return None - ''' + """ @param user User object @param sshkey_name Name of the ssh key - ''' + """ + def ssh_key_exists(self, user, sshkey_name): - return any( - k.title == sshkey_name - for k in user.keys.list(**list_all_kwargs) - ) + return any(k.title == sshkey_name for k in user.keys.list(**list_all_kwargs)) - ''' + """ @param user User object @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} - ''' + """ + def add_ssh_key_to_user(self, user, sshkey): - if not self.ssh_key_exists(user, sshkey['name']): + if not self.ssh_key_exists(user, sshkey["name"]): if self._module.check_mode: return True try: parameter = { - 'title': sshkey['name'], - 'key': sshkey['file'], + "title": sshkey["name"], + "key": sshkey["file"], } - if sshkey['expires_at'] is not None: - parameter['expires_at'] = sshkey['expires_at'] + if sshkey["expires_at"] is not None: + parameter["expires_at"] = sshkey["expires_at"] user.keys.create(parameter) except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to assign sshkey to user: {e}") return True return False - ''' + """ @param group Group object @param user_id Id of the user to find - ''' + """ + def find_member(self, group, user_id): try: member = group.members.get(user_id) @@ -374,30 +384,33 @@ def find_member(self, group, user_id): return None return member - ''' + """ @param group Group object @param user_id Id of the user to check - ''' + """ + def member_exists(self, group, user_id): member = self.find_member(group, user_id) return member is not None - ''' + """ @param group Group object @param user_id Id of the user to check @param access_level GitLab access_level to check - ''' + """ + def member_as_good_access_level(self, group, user_id, access_level): member = self.find_member(group, user_id) return member.access_level == access_level - ''' + """ @param user User object @param group_path Complete path of the Group including parent group path. / @param access_level GitLab access_level to assign - ''' + """ + def assign_user_to_group(self, user, group_identifier, access_level): group = find_group(self._gitlab, group_identifier) @@ -415,67 +428,70 @@ def assign_user_to_group(self, user, group_identifier, access_level): return True else: try: - group.members.create({ - 'user_id': self.get_user_id(user), - 'access_level': self.ACCESS_LEVEL[access_level]}) + group.members.create( + {"user_id": self.get_user_id(user), "access_level": self.ACCESS_LEVEL[access_level]} + ) except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to assign user to group: {e}") return True return False - ''' + """ @param user User object @param arguments User attributes - ''' + """ + def update_user(self, user, arguments, uncheckable_args): changed = False for arg_key, arg_value in arguments.items(): - av = arg_value['value'] + av = arg_value["value"] if av is not None: if arg_key == "identities": - changed = self.add_identities(user, av, uncheckable_args['overwrite_identities']['value']) + changed = self.add_identities(user, av, uncheckable_args["overwrite_identities"]["value"]) elif getattr(user, arg_key) != av: - setattr(user, arg_value.get('setter', arg_key), av) + setattr(user, arg_value.get("setter", arg_key), av) changed = True for arg_key, arg_value in uncheckable_args.items(): - av = arg_value['value'] + av = arg_value["value"] if av is not None: - setattr(user, arg_value.get('setter', arg_key), av) + setattr(user, arg_value.get("setter", arg_key), av) return (changed, user) - ''' + """ @param arguments User attributes - ''' + """ + def create_user(self, arguments): if self._module.check_mode: return True identities = None - if 'identities' in arguments: - identities = arguments['identities'] - del arguments['identities'] + if "identities" in arguments: + identities = arguments["identities"] + del arguments["identities"] try: user = self._gitlab.users.create(arguments) if identities: self.add_identities(user, identities) - except (gitlab.exceptions.GitlabCreateError) as e: + except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg=f"Failed to create user: {e}") return user - ''' + """ @param user User object @param identities List of identities to be added/updated @param overwrite_identities Overwrite user identities with identities passed to this module - ''' + """ + def add_identities(self, user, identities, overwrite_identities=False): changed = False if overwrite_identities: @@ -483,41 +499,41 @@ def add_identities(self, user, identities, overwrite_identities=False): for identity in identities: if identity not in user.identities: - setattr(user, 'provider', identity['provider']) - setattr(user, 'extern_uid', identity['extern_uid']) + setattr(user, "provider", identity["provider"]) + setattr(user, "extern_uid", identity["extern_uid"]) if not self._module.check_mode: user.save() changed = True return changed - ''' + """ @param user User object @param identities List of identities to be added/updated - ''' + """ + def delete_identities(self, user, identities): changed = False for identity in user.identities: if identity not in identities: if not self._module.check_mode: - user.identityproviders.delete(identity['provider']) + user.identityproviders.delete(identity["provider"]) changed = True return changed - ''' + """ @param username Username of the user - ''' + """ + def find_user(self, username): return next( - ( - user for user in self._gitlab.users.list(search=username, **list_all_kwargs) - if user.username == username - ), - None + (user for user in self._gitlab.users.list(search=username, **list_all_kwargs) if user.username == username), + None, ) - ''' + """ @param username Username of the user - ''' + """ + def exists_user(self, username): # When user exists, object will be stored in self.user_object. user = self.find_user(username) @@ -526,12 +542,13 @@ def exists_user(self, username): return True return False - ''' + """ @param username Username of the user - ''' + """ + def is_active(self, username): user = self.find_user(username) - return user.attributes['state'] == 'active' + return user.attributes["state"] == "active" def delete_user(self): if self._module.check_mode: @@ -568,65 +585,65 @@ def sanitize_arguments(arguments): def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - name=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), - username=dict(type='str', required=True), - password=dict(type='str', no_log=True), - reset_password=dict(type='bool', default=False, no_log=False), - email=dict(type='str'), - sshkey_name=dict(type='str'), - sshkey_file=dict(type='str', no_log=False), - sshkey_expires_at=dict(type='str', no_log=False), - group=dict(type='str'), - access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), - confirm=dict(type='bool', default=True), - isadmin=dict(type='bool', default=False), - external=dict(type='bool', default=False), - identities=dict(type='list', elements='dict'), - overwrite_identities=dict(type='bool', default=False), - )) + argument_spec.update( + dict( + name=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present", "blocked", "unblocked"]), + username=dict(type="str", required=True), + password=dict(type="str", no_log=True), + reset_password=dict(type="bool", default=False, no_log=False), + email=dict(type="str"), + sshkey_name=dict(type="str"), + sshkey_file=dict(type="str", no_log=False), + sshkey_expires_at=dict(type="str", no_log=False), + group=dict(type="str"), + access_level=dict( + type="str", default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"] + ), + confirm=dict(type="bool", default=True), + isadmin=dict(type="bool", default=False), + external=dict(type="bool", default=False), + identities=dict(type="list", elements="dict"), + overwrite_identities=dict(type="bool", default=False), + ) + ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], + ["api_username", "api_token"], + ["api_username", "api_oauth_token"], + ["api_username", "api_job_token"], + ["api_token", "api_oauth_token"], + ["api_token", "api_job_token"], ], required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ["api_username", "api_password"], ], + required_one_of=[["api_username", "api_token", "api_oauth_token", "api_job_token"]], supports_check_mode=True, - required_if=( - ('state', 'present', ['name', 'email']), - ) + required_if=(("state", "present", ["name", "email"]),), ) # check prerequisites and connect to gitlab server gitlab_instance = gitlab_authentication(module) - user_name = module.params['name'] - state = module.params['state'] - user_username = module.params['username'].lower() - user_password = module.params['password'] - user_reset_password = module.params['reset_password'] - user_email = module.params['email'] - user_sshkey_name = module.params['sshkey_name'] - user_sshkey_file = module.params['sshkey_file'] - user_sshkey_expires_at = module.params['sshkey_expires_at'] - group_path = module.params['group'] - access_level = module.params['access_level'] - confirm = module.params['confirm'] - user_isadmin = module.params['isadmin'] - user_external = module.params['external'] - user_identities = module.params['identities'] - overwrite_identities = module.params['overwrite_identities'] + user_name = module.params["name"] + state = module.params["state"] + user_username = module.params["username"].lower() + user_password = module.params["password"] + user_reset_password = module.params["reset_password"] + user_email = module.params["email"] + user_sshkey_name = module.params["sshkey_name"] + user_sshkey_file = module.params["sshkey_file"] + user_sshkey_expires_at = module.params["sshkey_expires_at"] + group_path = module.params["group"] + access_level = module.params["access_level"] + confirm = module.params["confirm"] + user_isadmin = module.params["isadmin"] + user_external = module.params["external"] + user_identities = module.params["identities"] + overwrite_identities = module.params["overwrite_identities"] gitlab_user = GitLabUser(module, gitlab_instance) user_exists = gitlab_user.exists_user(user_username) @@ -635,48 +652,57 @@ def main(): else: user_is_active = False - if state == 'absent': + if state == "absent": if user_exists: gitlab_user.delete_user() module.exit_json(changed=True, msg=f"Successfully deleted user {user_username}") else: module.exit_json(changed=False, msg="User deleted or does not exists") - if state == 'blocked': + if state == "blocked": if user_exists and user_is_active: gitlab_user.block_user() module.exit_json(changed=True, msg=f"Successfully blocked user {user_username}") else: module.exit_json(changed=False, msg="User already blocked or does not exists") - if state == 'unblocked': + if state == "unblocked": if user_exists and not user_is_active: gitlab_user.unblock_user() module.exit_json(changed=True, msg=f"Successfully unblocked user {user_username}") else: module.exit_json(changed=False, msg="User is not blocked or does not exists") - if state == 'present': - if gitlab_user.create_or_update_user(user_username, { - "name": user_name, - "password": user_password, - "reset_password": user_reset_password, - "email": user_email, - "sshkey_name": user_sshkey_name, - "sshkey_file": user_sshkey_file, - "sshkey_expires_at": user_sshkey_expires_at, - "group_path": group_path, - "access_level": access_level, - "confirm": confirm, - "isadmin": user_isadmin, - "external": user_external, - "identities": user_identities, - "overwrite_identities": overwrite_identities, - }): - module.exit_json(changed=True, msg=f"Successfully created or updated the user {user_username}", user=gitlab_user.user_object._attrs) + if state == "present": + if gitlab_user.create_or_update_user( + user_username, + { + "name": user_name, + "password": user_password, + "reset_password": user_reset_password, + "email": user_email, + "sshkey_name": user_sshkey_name, + "sshkey_file": user_sshkey_file, + "sshkey_expires_at": user_sshkey_expires_at, + "group_path": group_path, + "access_level": access_level, + "confirm": confirm, + "isadmin": user_isadmin, + "external": user_external, + "identities": user_identities, + "overwrite_identities": overwrite_identities, + }, + ): + module.exit_json( + changed=True, + msg=f"Successfully created or updated the user {user_username}", + user=gitlab_user.user_object._attrs, + ) else: - module.exit_json(changed=False, msg=f"No need to update the user {user_username}", user=gitlab_user.user_object._attrs) + module.exit_json( + changed=False, msg=f"No need to update the user {user_username}", user=gitlab_user.user_object._attrs + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py index 4a2eb18fd54..a5b5568f88e 100644 --- a/plugins/modules/grove.py +++ b/plugins/modules/grove.py @@ -70,7 +70,7 @@ from ansible.module_utils.urls import fetch_url -BASE_URL = 'https://grove.io/api/notice/%s/' +BASE_URL = "https://grove.io/api/notice/%s/" # ============================================================== # do_notify_grove @@ -81,15 +81,16 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url= my_data = dict(service=service, message=message) if url is not None: - my_data['url'] = url + my_data["url"] = url if icon_url is not None: - my_data['icon_url'] = icon_url + my_data["icon_url"] = icon_url data = urlencode(my_data) response, info = fetch_url(module, my_url, data=data) - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"failed to send notification: {info['msg']}") + # ============================================================== # main @@ -97,20 +98,20 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url= def main(): module = AnsibleModule( argument_spec=dict( - channel_token=dict(type='str', required=True, no_log=True), - message_content=dict(type='str', required=True), - service=dict(type='str', default='ansible'), - url=dict(type='str'), - icon_url=dict(type='str'), - validate_certs=dict(default=True, type='bool'), + channel_token=dict(type="str", required=True, no_log=True), + message_content=dict(type="str", required=True), + service=dict(type="str", default="ansible"), + url=dict(type="str"), + icon_url=dict(type="str"), + validate_certs=dict(default=True, type="bool"), ) ) - channel_token = module.params['channel_token'] - service = module.params['service'] - message = module.params['message_content'] - url = module.params['url'] - icon_url = module.params['icon_url'] + channel_token = module.params["channel_token"] + service = module.params["service"] + message = module.params["message_content"] + url = module.params["url"] + icon_url = module.params["icon_url"] do_notify_grove(module, channel_token, service, message, url, icon_url) @@ -118,5 +119,5 @@ def main(): module.exit_json(msg="OK") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py index 9726163ccf9..86a6eff26c9 100644 --- a/plugins/modules/gunicorn.py +++ b/plugins/modules/gunicorn.py @@ -107,9 +107,9 @@ def search_existing_config(config, option): - ''' search in config file for specified option ''' + """search in config file for specified option""" if config and os.path.isfile(config): - with open(config, 'r') as f: + with open(config, "r") as f: for line in f: if option in line: return line @@ -117,36 +117,35 @@ def search_existing_config(config, option): def remove_tmp_file(file_path): - ''' remove temporary files ''' + """remove temporary files""" if os.path.isfile(file_path): os.remove(file_path) def main(): - # available gunicorn options on module gunicorn_options = { - 'config': '-c', - 'chdir': '--chdir', - 'worker': '-k', - 'user': '-u', + "config": "-c", + "chdir": "--chdir", + "worker": "-k", + "user": "-u", } module = AnsibleModule( argument_spec=dict( - app=dict(required=True, type='str', aliases=['name']), - venv=dict(type='path', aliases=['virtualenv']), - config=dict(type='path', aliases=['conf']), - chdir=dict(type='path'), - pid=dict(type='path'), - user=dict(type='str'), - worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']), + app=dict(required=True, type="str", aliases=["name"]), + venv=dict(type="path", aliases=["virtualenv"]), + config=dict(type="path", aliases=["conf"]), + chdir=dict(type="path"), + pid=dict(type="path"), + user=dict(type="str"), + worker=dict(type="str", choices=["sync", "eventlet", "gevent", "tornado ", "gthread", "gaiohttp"]), ) ) # temporary files in case no option provided - tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log') - tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid') + tmp_error_log = os.path.join(module.tmpdir, "gunicorn.temp.error.log") + tmp_pid_file = os.path.join(module.tmpdir, "gunicorn.temp.pid") # remove temp file if exists remove_tmp_file(tmp_pid_file) @@ -154,15 +153,15 @@ def main(): # obtain app name and venv params = module.params - app = params['app'] - venv = params['venv'] - pid = params['pid'] + app = params["app"] + venv = params["venv"] + pid = params["pid"] # use venv path if exists if venv: gunicorn_command = f"{venv}/bin/gunicorn" else: - gunicorn_command = module.get_bin_path('gunicorn') + gunicorn_command = module.get_bin_path("gunicorn") # to daemonize the process options = ["-D"] @@ -174,19 +173,19 @@ def main(): options.append(gunicorn_options[option]) options.append(param) - error_log = search_existing_config(params['config'], 'errorlog') + error_log = search_existing_config(params["config"], "errorlog") if not error_log: # place error log somewhere in case of fail options.append("--error-logfile") options.append(tmp_error_log) - pid_file = search_existing_config(params['config'], 'pid') - if not params['pid'] and not pid_file: + pid_file = search_existing_config(params["config"], "pid") + if not params["pid"] and not pid_file: pid = tmp_pid_file # add option for pid file if not found on config file if not pid_file: - options.append('--pid') + options.append("--pid") options.append(pid) # put args together @@ -197,31 +196,31 @@ def main(): # wait for gunicorn to dump to log time.sleep(0.5) if os.path.isfile(pid): - with open(pid, 'r') as f: + with open(pid, "r") as f: result = f.readline().strip() - if not params['pid']: + if not params["pid"]: os.remove(pid) module.exit_json(changed=True, pid=result, debug=" ".join(args)) else: # if user defined own error log, check that if error_log: - error = f'Please check your {error_log.strip()}' + error = f"Please check your {error_log.strip()}" else: if os.path.isfile(tmp_error_log): - with open(tmp_error_log, 'r') as f: + with open(tmp_error_log, "r") as f: error = f.read() # delete tmp log os.remove(tmp_error_log) else: error = "Log not found" - module.fail_json(msg=f'Failed to start gunicorn. {error}', error=err) + module.fail_json(msg=f"Failed to start gunicorn. {error}", error=err) else: - module.fail_json(msg=f'Failed to start gunicorn {err}', error=err) + module.fail_json(msg=f"Failed to start gunicorn {err}", error=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index a6b345555d0..1be422b739d 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -217,7 +217,7 @@ DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" RECV_SIZE = 1024 -ACTION_CHOICES = ['enabled', 'disabled', 'drain'] +ACTION_CHOICES = ["enabled", "disabled", "drain"] WAIT_RETRIES = 25 WAIT_INTERVAL = 5 @@ -242,19 +242,19 @@ class HAProxy: def __init__(self, module): self.module = module - self.state = self.module.params['state'] - self.host = self.module.params['host'] - self.backend = self.module.params['backend'] - self.weight = self.module.params['weight'] - self.socket = self.module.params['socket'] - self.shutdown_sessions = self.module.params['shutdown_sessions'] - self.fail_on_not_found = self.module.params['fail_on_not_found'] - self.agent = self.module.params['agent'] - self.health = self.module.params['health'] - self.wait = self.module.params['wait'] - self.wait_retries = self.module.params['wait_retries'] - self.wait_interval = self.module.params['wait_interval'] - self._drain = self.module.params['drain'] + self.state = self.module.params["state"] + self.host = self.module.params["host"] + self.backend = self.module.params["backend"] + self.weight = self.module.params["weight"] + self.socket = self.module.params["socket"] + self.shutdown_sessions = self.module.params["shutdown_sessions"] + self.fail_on_not_found = self.module.params["fail_on_not_found"] + self.agent = self.module.params["agent"] + self.health = self.module.params["health"] + self.wait = self.module.params["wait"] + self.wait_retries = self.module.params["wait_retries"] + self.wait_interval = self.module.params["wait_interval"] + self._drain = self.module.params["drain"] self.command_results = {} def execute(self, cmd, timeout=200, capture_output=True): @@ -264,15 +264,15 @@ def execute(self, cmd, timeout=200, capture_output=True): """ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.client.connect(self.socket) - self.client.sendall(to_bytes(f'{cmd}\n')) + self.client.sendall(to_bytes(f"{cmd}\n")) - result = b'' - buf = b'' + result = b"" + buf = b"" buf = self.client.recv(RECV_SIZE) while buf: result += buf buf = self.client.recv(RECV_SIZE) - result = to_text(result, errors='surrogate_or_strict') + result = to_text(result, errors="surrogate_or_strict") if capture_output: self.capture_command_output(cmd, result.strip()) @@ -283,32 +283,32 @@ def capture_command_output(self, cmd, output): """ Capture the output for a command """ - if 'command' not in self.command_results: - self.command_results['command'] = [] - self.command_results['command'].append(cmd) - if 'output' not in self.command_results: - self.command_results['output'] = [] - self.command_results['output'].append(output) + if "command" not in self.command_results: + self.command_results["command"] = [] + self.command_results["command"].append(cmd) + if "output" not in self.command_results: + self.command_results["output"] = [] + self.command_results["output"].append(output) def discover_all_backends(self): """ Discover all entries with svname = 'BACKEND' and return a list of their corresponding pxnames """ - data = self.execute('show stat', 200, False).lstrip('# ') + data = self.execute("show stat", 200, False).lstrip("# ") r = csv.DictReader(data.splitlines()) - return tuple(d['pxname'] for d in r if d['svname'] == 'BACKEND') + return tuple(d["pxname"] for d in r if d["svname"] == "BACKEND") def discover_version(self): """ Attempt to extract the haproxy version. Return a tuple containing major and minor version. """ - data = self.execute('show info', 200, False) + data = self.execute("show info", 200, False) lines = data.splitlines() - line = [x for x in lines if 'Version:' in x] + line = [x for x in lines if "Version:" in x] try: - version_values = line[0].partition(':')[2].strip().split('.', 3) + version_values = line[0].partition(":")[2].strip().split(".", 3) version = (int(version_values[0]), int(version_values[1])) except (ValueError, TypeError, IndexError): version = None @@ -331,8 +331,7 @@ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): # Fail when backends were not found state = self.get_state_for(backend, svname) if (self.fail_on_not_found) and state is None: - self.module.fail_json( - msg=f"The specified backend '{backend}/{svname}' was not found!") + self.module.fail_json(msg=f"The specified backend '{backend}/{svname}' was not found!") if state is not None: self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) @@ -344,13 +343,13 @@ def get_state_for(self, pxname, svname): Find the state of specific services. When pxname is not set, get all backends for a specific host. Returns a list of dictionaries containing the status and weight for those services. """ - data = self.execute('show stat', 200, False).lstrip('# ') + data = self.execute("show stat", 200, False).lstrip("# ") r = csv.DictReader(data.splitlines()) def unpack_state(d): - return {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']} + return {"status": d["status"], "weight": d["weight"], "scur": d["scur"]} - state = tuple(unpack_state(d) for d in r if (pxname is None or d['pxname'] == pxname) and d['svname'] == svname) + state = tuple(unpack_state(d) for d in r if (pxname is None or d["pxname"] == pxname) and d["svname"] == svname) return state or None def wait_until_status(self, pxname, svname, status): @@ -365,12 +364,14 @@ def wait_until_status(self, pxname, svname, status): # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching - if status in state[0]['status']: - if not self._drain or state[0]['scur'] == '0': + if status in state[0]["status"]: + if not self._drain or state[0]["scur"] == "0": return True time.sleep(self.wait_interval) - self.module.fail_json(msg=f"server {pxname}/{svname} not status '{status}' after {self.wait_retries} retries. Aborting.") + self.module.fail_json( + msg=f"server {pxname}/{svname} not status '{status}' after {self.wait_retries} retries. Aborting." + ) def enabled(self, host, backend, weight): """ @@ -385,7 +386,7 @@ def enabled(self, host, backend, weight): cmd += "; enable health $pxname/$svname" if weight: cmd += f"; set weight $pxname/$svname {weight}" - self.execute_for_backends(cmd, backend, host, 'UP') + self.execute_for_backends(cmd, backend, host, "UP") def disabled(self, host, backend, shutdown_sessions): """ @@ -401,9 +402,9 @@ def disabled(self, host, backend, shutdown_sessions): cmd += "; disable server $pxname/$svname" if shutdown_sessions: cmd += "; shutdown sessions server $pxname/$svname" - self.execute_for_backends(cmd, backend, host, 'MAINT') + self.execute_for_backends(cmd, backend, host, "MAINT") - def drain(self, host, backend, status='DRAIN'): + def drain(self, host, backend, status="DRAIN"): """ Drain action, sets the server to DRAIN mode. In this mode, the server will not accept any new connections @@ -423,47 +424,46 @@ def act(self): Figure out what you want to do from ansible, and then do it. """ # Get the state before the run - self.command_results['state_before'] = self.get_state_for(self.backend, self.host) + self.command_results["state_before"] = self.get_state_for(self.backend, self.host) # toggle enable/disable server - if self.state == 'enabled': + if self.state == "enabled": self.enabled(self.host, self.backend, self.weight) - elif self.state == 'disabled' and self._drain: - self.drain(self.host, self.backend, status='MAINT') - elif self.state == 'disabled': + elif self.state == "disabled" and self._drain: + self.drain(self.host, self.backend, status="MAINT") + elif self.state == "disabled": self.disabled(self.host, self.backend, self.shutdown_sessions) - elif self.state == 'drain': + elif self.state == "drain": self.drain(self.host, self.backend) else: self.module.fail_json(msg=f"unknown state specified: '{self.state}'") # Get the state after the run - self.command_results['state_after'] = self.get_state_for(self.backend, self.host) + self.command_results["state_after"] = self.get_state_for(self.backend, self.host) # Report change status - self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) + self.command_results["changed"] = self.command_results["state_before"] != self.command_results["state_after"] self.module.exit_json(**self.command_results) def main(): - # load ansible module object module = AnsibleModule( argument_spec=dict( - state=dict(type='str', required=True, choices=ACTION_CHOICES), - host=dict(type='str', required=True), - backend=dict(type='str'), - weight=dict(type='str'), - socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), - shutdown_sessions=dict(type='bool', default=False), - fail_on_not_found=dict(type='bool', default=False), - health=dict(type='bool', default=False), - agent=dict(type='bool', default=False), - wait=dict(type='bool', default=False), - wait_retries=dict(type='int', default=WAIT_RETRIES), - wait_interval=dict(type='int', default=WAIT_INTERVAL), - drain=dict(type='bool', default=False), + state=dict(type="str", required=True, choices=ACTION_CHOICES), + host=dict(type="str", required=True), + backend=dict(type="str"), + weight=dict(type="str"), + socket=dict(type="path", default=DEFAULT_SOCKET_LOCATION), + shutdown_sessions=dict(type="bool", default=False), + fail_on_not_found=dict(type="bool", default=False), + health=dict(type="bool", default=False), + agent=dict(type="bool", default=False), + wait=dict(type="bool", default=False), + wait_retries=dict(type="int", default=WAIT_RETRIES), + wait_interval=dict(type="int", default=WAIT_INTERVAL), + drain=dict(type="bool", default=False), ), ) @@ -474,5 +474,5 @@ def main(): ansible_haproxy.act() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py index d310d854aba..cdd16f11d34 100644 --- a/plugins/modules/heroku_collaborator.py +++ b/plugins/modules/heroku_collaborator.py @@ -85,27 +85,27 @@ def add_or_delete_heroku_collaborator(module, client): - user = module.params['user'] - state = module.params['state'] + user = module.params["user"] + state = module.params["state"] affected_apps = [] result_state = False - for app in module.params['apps']: + for app in module.params["apps"]: if app not in client.apps(): - module.fail_json(msg=f'App {app} does not exist') + module.fail_json(msg=f"App {app} does not exist") heroku_app = client.apps()[app] heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] - if state == 'absent' and user in heroku_collaborator_list: + if state == "absent" and user in heroku_collaborator_list: if not module.check_mode: heroku_app.remove_collaborator(user) affected_apps += [app] result_state = True - elif state == 'present' and user not in heroku_collaborator_list: + elif state == "present" and user not in heroku_collaborator_list: if not module.check_mode: - heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) + heroku_app.add_collaborator(user_id_or_email=user, silent=module.params["suppress_invitation"]) affected_apps += [app] result_state = True @@ -115,15 +115,12 @@ def add_or_delete_heroku_collaborator(module, client): def main(): argument_spec = HerokuHelper.heroku_argument_spec() argument_spec.update( - user=dict(required=True, type='str'), - apps=dict(required=True, type='list', elements='str'), - suppress_invitation=dict(default=False, type='bool'), - state=dict(default='present', type='str', choices=['present', 'absent']), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + user=dict(required=True, type="str"), + apps=dict(required=True, type="list", elements="str"), + suppress_invitation=dict(default=False, type="bool"), + state=dict(default="present", type="str", choices=["present", "absent"]), ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) client = HerokuHelper(module).get_heroku_client() @@ -131,5 +128,5 @@ def main(): module.exit_json(changed=has_changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py index 4961a87298a..ed42b1687a2 100644 --- a/plugins/modules/hg.py +++ b/plugins/modules/hg.py @@ -106,7 +106,7 @@ def _command(self, args_list): return (rc, out, err) def _list_untracked(self): - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] + args = ["purge", "--config", "extensions.purge=", "-R", self.dest, "--print"] return self._command(args) def get_revision(self): @@ -119,22 +119,22 @@ def get_revision(self): Read the full description via hg id --help """ - (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) + (rc, out, err) = self._command(["id", "-b", "-i", "-t", "-R", self.dest]) if rc != 0: self.module.fail_json(msg=err) else: - return to_native(out).strip('\n') + return to_native(out).strip("\n") def get_remote_revision(self): - (rc, out, err) = self._command(['id', self.repo]) + (rc, out, err) = self._command(["id", self.repo]) if rc != 0: self.module.fail_json(msg=err) else: - return to_native(out).strip('\n') + return to_native(out).strip("\n") def has_local_mods(self): now = self.get_revision() - if '+' in now: + if "+" in now: return True else: return False @@ -144,7 +144,7 @@ def discard(self): if not before: return False - args = ['update', '-C', '-R', self.dest, '-r', '.'] + args = ["update", "-C", "-R", self.dest, "-r", "."] (rc, out, err) = self._command(args) if rc != 0: self.module.fail_json(msg=err) @@ -160,8 +160,8 @@ def purge(self): self.module.fail_json(msg=err1) # there are some untrackd files - if out1 != '': - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] + if out1 != "": + args = ["purge", "--config", "extensions.purge=", "-R", self.dest] (rc2, out2, err2) = self._command(args) if rc2 != 0: self.module.fail_json(msg=err2) @@ -183,18 +183,17 @@ def cleanup(self, force, purge): return False def pull(self): - return self._command( - ['pull', '-R', self.dest, self.repo]) + return self._command(["pull", "-R", self.dest, self.repo]) def update(self): if self.revision is not None: - return self._command(['update', '-r', self.revision, '-R', self.dest]) - return self._command(['update', '-R', self.dest]) + return self._command(["update", "-r", self.revision, "-R", self.dest]) + return self._command(["update", "-R", self.dest]) def clone(self): if self.revision is not None: - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) - return self._command(['clone', self.repo, self.dest]) + return self._command(["clone", self.repo, self.dest, "-r", self.revision]) + return self._command(["clone", self.repo, self.dest]) @property def at_revision(self): @@ -205,7 +204,7 @@ def at_revision(self): if self.revision is None or len(self.revision) < 7: # Assume it is a rev number, tag, or branch return False - (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + (rc, out, err) = self._command(["--debug", "id", "-i", "-R", self.dest]) if rc != 0: self.module.fail_json(msg=err) if out.startswith(self.revision): @@ -215,32 +214,33 @@ def at_revision(self): # =========================================== + def main(): module = AnsibleModule( argument_spec=dict( - repo=dict(type='str', required=True, aliases=['name']), - dest=dict(type='path'), - revision=dict(type='str', aliases=['version']), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - update=dict(type='bool', default=True), - clone=dict(type='bool', default=True), - executable=dict(type='str'), + repo=dict(type="str", required=True, aliases=["name"]), + dest=dict(type="path"), + revision=dict(type="str", aliases=["version"]), + force=dict(type="bool", default=False), + purge=dict(type="bool", default=False), + update=dict(type="bool", default=True), + clone=dict(type="bool", default=True), + executable=dict(type="str"), ), ) - repo = module.params['repo'] - dest = module.params['dest'] - revision = module.params['revision'] - force = module.params['force'] - purge = module.params['purge'] - update = module.params['update'] - clone = module.params['clone'] - hg_path = module.params['executable'] or module.get_bin_path('hg', True) + repo = module.params["repo"] + dest = module.params["dest"] + revision = module.params["revision"] + force = module.params["force"] + purge = module.params["purge"] + update = module.params["update"] + clone = module.params["clone"] + hg_path = module.params["executable"] or module.get_bin_path("hg", True) if dest is not None: - hgrc = os.path.join(dest, '.hg/hgrc') + hgrc = os.path.join(dest, ".hg/hgrc") # initial states - before = '' + before = "" changed = False cleaned = False @@ -292,5 +292,5 @@ def main(): module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index 2216ee683f2..004d723408c 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -186,29 +186,33 @@ # exceptions -------------------------------------------------------------- {{{ class HomebrewException(Exception): pass + + # /exceptions ------------------------------------------------------------- }}} # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = [_f for _f in (line.split("#")[0].strip() for line in lines) if _f] group = rf"[^{''.join(chars)}]" return re.compile(group) def _check_package_in_json(json_output, package_type): return bool(json_output.get(package_type, []) and json_output[package_type][0].get("installed")) + + # /utils ------------------------------------------------------------------ }}} class Homebrew: - '''A class to manage Homebrew packages.''' + """A class to manage Homebrew packages.""" # class validations -------------------------------------------- {{{ @classmethod def valid_state(cls, state): - ''' + """ A valid state is one of: - None - installed @@ -217,26 +221,23 @@ def valid_state(cls, state): - linked - unlinked - absent - ''' + """ if state is None: return True else: - return ( - isinstance(state, str) - and state.lower() in ( - 'installed', - 'upgraded', - 'head', - 'linked', - 'unlinked', - 'absent', - ) + return isinstance(state, str) and state.lower() in ( + "installed", + "upgraded", + "head", + "linked", + "unlinked", + "absent", ) @classmethod def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' + """A valid module is an instance of AnsibleModule.""" return isinstance(module, AnsibleModule) @@ -252,7 +253,7 @@ def module(self, module): if not self.valid_module(module): self._module = None self.failed = True - self.message = f'Invalid module: {module}.' + self.message = f"Invalid module: {module}." raise HomebrewException(self.message) else: @@ -268,12 +269,12 @@ def path(self, path): if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True - self.message = f'Invalid path: {path}.' + self.message = f"Invalid path: {path}." raise HomebrewException(self.message) else: if isinstance(path, str): - self._path = path.split(':') + self._path = path.split(":") else: self._path = path @@ -288,7 +289,7 @@ def brew_path(self, brew_path): if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True - self.message = f'Invalid brew_path: {brew_path}.' + self.message = f"Invalid brew_path: {brew_path}." raise HomebrewException(self.message) else: @@ -306,21 +307,34 @@ def params(self, params): # /class properties -------------------------------------------- }}} - def __init__(self, module, path, packages=None, state=None, - update_homebrew=False, upgrade_all=False, - install_options=None, upgrade_options=None, - force_formula=False): + def __init__( + self, + module, + path, + packages=None, + state=None, + update_homebrew=False, + upgrade_all=False, + install_options=None, + upgrade_options=None, + force_formula=False, + ): if not install_options: install_options = list() if not upgrade_options: upgrade_options = list() self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, - install_options=install_options, - upgrade_options=upgrade_options, - force_formula=force_formula) + self._setup_instance_vars( + module=module, + path=path, + packages=packages, + state=state, + update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, + upgrade_options=upgrade_options, + force_formula=force_formula, + ) self._prep() @@ -330,7 +344,7 @@ def _setup_status_vars(self): self.changed = False self.changed_pkgs = [] self.unchanged_pkgs = [] - self.message = '' + self.message = "" def _setup_instance_vars(self, **kwargs): self.installed_packages = set() @@ -345,19 +359,19 @@ def _prep_brew_path(self): if not self.module: self.brew_path = None self.failed = True - self.message = 'AnsibleModule not set.' + self.message = "AnsibleModule not set." raise HomebrewException(self.message) self.brew_path = self.module.get_bin_path( - 'brew', + "brew", required=True, opt_dirs=self.path, ) if not self.brew_path: self.brew_path = None self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewException('Unable to locate homebrew executable.') + self.message = "Unable to locate homebrew executable." + raise HomebrewException("Unable to locate homebrew executable.") return self.brew_path @@ -407,7 +421,7 @@ def _extract_package_name(self, package_detail, is_cask): package_names.update(package_detail.get("aliases", [])) package_names.update(package_detail.get("oldnames", [])) package_names.update(package_detail.get("old_tokens", [])) - if package_detail['tap']: + if package_detail["tap"]: # names so far, with tap prefix added to each tapped_names = {f"{package_detail['tap']}/{x}" for x in package_names} package_names.update(tapped_names) @@ -416,7 +430,9 @@ def _extract_package_name(self, package_detail, is_cask): package_names = package_names & set(self.packages) if len(package_names) != 1: self.failed = True - self.message = f"Package names for {name} are missing or ambiguous: {', '.join((str(p) for p in package_names))}" + self.message = ( + f"Package names for {name} are missing or ambiguous: {', '.join((str(p) for p in package_names))}" + ) raise HomebrewException(self.message) # Then make sure the user provided name resurface. @@ -472,72 +488,74 @@ def _run(self): if self.packages: self._validate_packages_names() self._get_packages_info() - if self.state == 'installed': + if self.state == "installed": return self._install_packages() - elif self.state == 'upgraded': + elif self.state == "upgraded": return self._upgrade_packages() - elif self.state == 'head': + elif self.state == "head": return self._install_packages() - elif self.state == 'linked': + elif self.state == "linked": return self._link_packages() - elif self.state == 'unlinked': + elif self.state == "unlinked": return self._unlink_packages() - elif self.state == 'absent': + elif self.state == "absent": return self._uninstall_packages() # updated -------------------------------- {{{ def _update_homebrew(self): if self.module.check_mode: self.changed = True - self.message = 'Homebrew would be updated.' + self.message = "Homebrew would be updated." raise HomebrewException(self.message) - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) + rc, out, err = self.module.run_command( + [ + self.brew_path, + "update", + ] + ) if rc == 0: if out and isinstance(out, str): already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s + re.search(r"Already up-to-date.", s.strip(), re.IGNORECASE) for s in out.split("\n") if s ) if not already_updated: self.changed = True - self.message = 'Homebrew updated successfully.' + self.message = "Homebrew updated successfully." else: - self.message = 'Homebrew already up-to-date.' + self.message = "Homebrew already up-to-date." return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) + # /updated ------------------------------- }}} # _upgrade_all --------------------------- {{{ def _upgrade_all(self): if self.module.check_mode: self.changed = True - self.message = 'Homebrew packages would be upgraded.' + self.message = "Homebrew packages would be upgraded." raise HomebrewException(self.message) - cmd = [self.brew_path, 'upgrade'] + self.upgrade_options + cmd = [self.brew_path, "upgrade"] + self.upgrade_options rc, out, err = self.module.run_command(cmd) if rc == 0: if not out: - self.message = 'Homebrew packages already upgraded.' + self.message = "Homebrew packages already upgraded." else: self.changed = True - self.message = 'Homebrew upgraded.' + self.message = "Homebrew upgraded." return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) + # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ @@ -546,7 +564,9 @@ def _install_packages(self): if len(packages_to_install) == 0: self.unchanged_pkgs.extend(self.packages) - self.message = f"Package{'s' if len(self.packages) > 1 else ''} already installed: {', '.join(self.packages)}" + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} already installed: {', '.join(self.packages)}" + ) return True if self.module.check_mode: @@ -554,22 +574,17 @@ def _install_packages(self): self.message = f"Package{'s' if len(packages_to_install) > 1 else ''} would be installed: {', '.join(packages_to_install)}" raise HomebrewException(self.message) - if self.state == 'head': - head = '--HEAD' + if self.state == "head": + head = "--HEAD" else: head = None if self.force_formula: - formula = '--formula' + formula = "--formula" else: formula = None - opts = ( - [self.brew_path, 'install'] - + self.install_options - + list(packages_to_install) - + [head, formula] - ) + opts = [self.brew_path, "install"] + self.install_options + list(packages_to_install) + [head, formula] cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -577,26 +592,26 @@ def _install_packages(self): self.changed_pkgs.extend(packages_to_install) self.unchanged_pkgs.extend(self.installed_packages) self.changed = True - self.message = f"Package{'s' if len(packages_to_install) > 1 else ''} installed: {', '.join(packages_to_install)}" + self.message = ( + f"Package{'s' if len(packages_to_install) > 1 else ''} installed: {', '.join(packages_to_install)}" + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) + # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ def _upgrade_all_packages(self): - opts = ( - [self.brew_path, 'upgrade'] - + self.install_options - ) + opts = [self.brew_path, "upgrade"] + self.install_options cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: self.changed = True - self.message = 'All packages upgraded.' + self.message = "All packages upgraded." return True else: self.failed = True @@ -617,7 +632,9 @@ def _upgrade_packages(self): if len(packages_to_install_or_upgrade) == 0: self.unchanged_pkgs.extend(self.packages) - self.message = f"Package{'s' if len(self.packages) > 1 else ''} already upgraded: {', '.join(self.packages)}" + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} already upgraded: {', '.join(self.packages)}" + ) return True if self.module.check_mode: @@ -625,18 +642,11 @@ def _upgrade_packages(self): self.message = f"Package{'s' if len(packages_to_install_or_upgrade) > 1 else ''} would be upgraded: {', '.join(packages_to_install_or_upgrade)}" raise HomebrewException(self.message) - for command, packages in [ - ("install", packages_to_install), - ("upgrade", packages_to_upgrade) - ]: + for command, packages in [("install", packages_to_install), ("upgrade", packages_to_upgrade)]: if not packages: continue - opts = ( - [self.brew_path, command] - + self.install_options - + list(packages) - ) + opts = [self.brew_path, command] + self.install_options + list(packages) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -649,6 +659,7 @@ def _upgrade_packages(self): self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade) self.changed = True self.message = f"Package{'s' if len(packages_to_install_or_upgrade) > 1 else ''} upgraded: {', '.join(packages_to_install_or_upgrade)}" + # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ @@ -657,7 +668,9 @@ def _uninstall_packages(self): if len(packages_to_uninstall) == 0: self.unchanged_pkgs.extend(self.packages) - self.message = f"Package{'s' if len(self.packages) > 1 else ''} already uninstalled: {', '.join(self.packages)}" + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} already uninstalled: {', '.join(self.packages)}" + ) return True if self.module.check_mode: @@ -665,11 +678,7 @@ def _uninstall_packages(self): self.message = f"Package{'s' if len(packages_to_uninstall) > 1 else ''} would be uninstalled: {', '.join(packages_to_uninstall)}" raise HomebrewException(self.message) - opts = ( - [self.brew_path, 'uninstall', '--force'] - + self.install_options - + list(packages_to_uninstall) - ) + opts = [self.brew_path, "uninstall", "--force"] + self.install_options + list(packages_to_uninstall) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -683,6 +692,7 @@ def _uninstall_packages(self): self.failed = True self.message = err.strip() raise HomebrewException(self.message) + # /uninstalled ----------------------------- }}} # linked --------------------------------- {{{ @@ -690,7 +700,9 @@ def _link_packages(self): missing_packages = set(self.packages) - self.installed_packages if missing_packages: self.failed = True - self.message = f"Package{'s' if len(missing_packages) > 1 else ''} not installed: {', '.join(missing_packages)}." + self.message = ( + f"Package{'s' if len(missing_packages) > 1 else ''} not installed: {', '.join(missing_packages)}." + ) raise HomebrewException(self.message) if self.module.check_mode: @@ -698,11 +710,7 @@ def _link_packages(self): self.message = f"Package{'s' if len(self.packages) > 1 else ''} would be linked: {', '.join(self.packages)}" raise HomebrewException(self.message) - opts = ( - [self.brew_path, 'link'] - + self.install_options - + self.packages - ) + opts = [self.brew_path, "link"] + self.install_options + self.packages cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -713,8 +721,11 @@ def _link_packages(self): return True else: self.failed = True - self.message = f"Package{'s' if len(self.packages) > 1 else ''} could not be linked: {', '.join(self.packages)}." + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} could not be linked: {', '.join(self.packages)}." + ) raise HomebrewException(self.message) + # /linked -------------------------------- }}} # unlinked ------------------------------- {{{ @@ -722,19 +733,19 @@ def _unlink_packages(self): missing_packages = set(self.packages) - self.installed_packages if missing_packages: self.failed = True - self.message = f"Package{'s' if len(missing_packages) > 1 else ''} not installed: {', '.join(missing_packages)}." + self.message = ( + f"Package{'s' if len(missing_packages) > 1 else ''} not installed: {', '.join(missing_packages)}." + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = f"Package{'s' if len(self.packages) > 1 else ''} would be unlinked: {', '.join(self.packages)}" + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} would be unlinked: {', '.join(self.packages)}" + ) raise HomebrewException(self.message) - opts = ( - [self.brew_path, 'unlink'] - + self.install_options - + self.packages - ) + opts = [self.brew_path, "unlink"] + self.install_options + self.packages cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) @@ -745,8 +756,11 @@ def _unlink_packages(self): return True else: self.failed = True - self.message = f"Package{'s' if len(self.packages) > 1 else ''} could not be unlinked: {', '.join(self.packages)}." + self.message = ( + f"Package{'s' if len(self.packages) > 1 else ''} could not be unlinked: {', '.join(self.packages)}." + ) raise HomebrewException(self.message) + # /unlinked ------------------------------ }}} # /commands ---------------------------------------------------- }}} @@ -756,106 +770,110 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "formula"], - type='list', - elements='str', + type="list", + elements="str", ), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - type='path', + type="path", ), state=dict( default="present", choices=[ - "present", "installed", - "latest", "upgraded", "head", - "linked", "unlinked", - "absent", "removed", "uninstalled", + "present", + "installed", + "latest", + "upgraded", + "head", + "linked", + "unlinked", + "absent", + "removed", + "uninstalled", ], ), update_homebrew=dict( default=False, - type='bool', + type="bool", ), upgrade_all=dict( default=False, aliases=["upgrade"], - type='bool', + type="bool", ), install_options=dict( - aliases=['options'], - type='list', - elements='str', + aliases=["options"], + type="list", + elements="str", ), upgrade_options=dict( - type='list', - elements='str', + type="list", + elements="str", ), force_formula=dict( default=False, - type='bool', + type="bool", ), ), supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") p = module.params - if p['name']: - packages = [package_name.lower() for package_name in p['name']] + if p["name"]: + packages = [package_name.lower() for package_name in p["name"]] else: packages = None - path = p['path'] + path = p["path"] if path: - path = path.split(':') - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('head', ): - state = 'head' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state == 'linked': - state = 'linked' - if state == 'unlinked': - state = 'unlinked' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - force_formula = p['force_formula'] - update_homebrew = p['update_homebrew'] + path = path.split(":") + + state = p["state"] + if state in ("present", "installed"): + state = "installed" + if state in ("head",): + state = "head" + if state in ("latest", "upgraded"): + state = "upgraded" + if state == "linked": + state = "linked" + if state == "unlinked": + state = "unlinked" + if state in ("absent", "removed", "uninstalled"): + state = "absent" + + force_formula = p["force_formula"] + update_homebrew = p["update_homebrew"] if not update_homebrew: - module.run_command_environ_update.update( - dict(HOMEBREW_NO_AUTO_UPDATE="True") - ) - upgrade_all = p['upgrade_all'] - p['install_options'] = p['install_options'] or [] - install_options = [f'--{install_option}' - for install_option in p['install_options']] - - p['upgrade_options'] = p['upgrade_options'] or [] - upgrade_options = [f'--{upgrade_option}' - for upgrade_option in p['upgrade_options']] - brew = Homebrew(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options, force_formula=force_formula) + module.run_command_environ_update.update(dict(HOMEBREW_NO_AUTO_UPDATE="True")) + upgrade_all = p["upgrade_all"] + p["install_options"] = p["install_options"] or [] + install_options = [f"--{install_option}" for install_option in p["install_options"]] + + p["upgrade_options"] = p["upgrade_options"] or [] + upgrade_options = [f"--{upgrade_option}" for upgrade_option in p["upgrade_options"]] + brew = Homebrew( + module=module, + path=path, + packages=packages, + state=state, + update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, + upgrade_options=upgrade_options, + force_formula=force_formula, + ) (failed, changed, message) = brew.run() changed_pkgs = brew.changed_pkgs unchanged_pkgs = brew.unchanged_pkgs if failed: module.fail_json(msg=message) - module.exit_json( - changed=changed, - msg=message, - unchanged_pkgs=unchanged_pkgs, - changed_pkgs=changed_pkgs - ) + module.exit_json(changed=changed, msg=message, unchanged_pkgs=unchanged_pkgs, changed_pkgs=changed_pkgs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index 95389631d9c..48964fd2ee9 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -163,30 +163,34 @@ # exceptions -------------------------------------------------------------- {{{ class HomebrewCaskException(Exception): pass + + # /exceptions ------------------------------------------------------------- }}} # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = [_f for _f in (line.split("#")[0].strip() for line in lines) if _f] group = rf"[^{''.join(chars)}]" return re.compile(group) + + # /utils ------------------------------------------------------------------ }}} class HomebrewCask: - '''A class to manage Homebrew casks.''' + """A class to manage Homebrew casks.""" # class regexes ------------------------------------------------ {{{ - VALID_CASK_CHARS = r''' + VALID_CASK_CHARS = r""" \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots / # slash (for taps) \- # dashes @ # at symbol \+ # plus symbol - ''' + """ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) # /class regexes ----------------------------------------------- }}} @@ -194,40 +198,35 @@ class HomebrewCask: # class validations -------------------------------------------- {{{ @classmethod def valid_cask(cls, cask): - '''A valid cask is either None or alphanumeric + backslashes.''' + """A valid cask is either None or alphanumeric + backslashes.""" if cask is None: return True - return ( - isinstance(cask, str) - and not cls.INVALID_CASK_REGEX.search(cask) - ) + return isinstance(cask, str) and not cls.INVALID_CASK_REGEX.search(cask) @classmethod def valid_state(cls, state): - ''' + """ A valid state is one of: - installed - absent - ''' + """ if state is None: return True else: - return ( - isinstance(state, str) - and state.lower() in ( - 'installed', - 'absent', - ) + return isinstance(state, str) and state.lower() in ( + "installed", + "absent", ) @classmethod def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' + """A valid module is an instance of AnsibleModule.""" return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} # class properties --------------------------------------------- {{{ @@ -240,7 +239,7 @@ def module(self, module): if not self.valid_module(module): self._module = None self.failed = True - self.message = f'Invalid module: {module}.' + self.message = f"Invalid module: {module}." raise HomebrewCaskException(self.message) else: @@ -256,12 +255,12 @@ def path(self, path): if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True - self.message = f'Invalid path: {path}.' + self.message = f"Invalid path: {path}." raise HomebrewCaskException(self.message) else: if isinstance(path, str): - self._path = path.split(':') + self._path = path.split(":") else: self._path = path @@ -276,7 +275,7 @@ def brew_path(self, brew_path): if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True - self.message = f'Invalid brew_path: {brew_path}.' + self.message = f"Invalid brew_path: {brew_path}." raise HomebrewCaskException(self.message) else: @@ -301,7 +300,7 @@ def current_cask(self, cask): if not self.valid_cask(cask): self._current_cask = None self.failed = True - self.message = f'Invalid cask: {cask}.' + self.message = f"Invalid cask: {cask}." raise HomebrewCaskException(self.message) else: @@ -321,20 +320,34 @@ def brew_version(self, brew_version): # /class properties -------------------------------------------- }}} - def __init__(self, module, path=path, casks=None, state=None, - sudo_password=None, update_homebrew=False, - install_options=None, accept_external_apps=False, - upgrade_all=False, greedy=False): + def __init__( + self, + module, + path=path, + casks=None, + state=None, + sudo_password=None, + update_homebrew=False, + install_options=None, + accept_external_apps=False, + upgrade_all=False, + greedy=False, + ): if not install_options: install_options = list() self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, casks=casks, - state=state, sudo_password=sudo_password, - update_homebrew=update_homebrew, - install_options=install_options, - accept_external_apps=accept_external_apps, - upgrade_all=upgrade_all, - greedy=greedy, ) + self._setup_instance_vars( + module=module, + path=path, + casks=casks, + state=state, + sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, + ) self._prep() @@ -344,7 +357,7 @@ def _setup_status_vars(self): self.changed = False self.changed_count = 0 self.unchanged_count = 0 - self.message = '' + self.message = "" def _setup_instance_vars(self, **kwargs): for key, val in kwargs.items(): @@ -357,24 +370,25 @@ def _prep_brew_path(self): if not self.module: self.brew_path = None self.failed = True - self.message = 'AnsibleModule not set.' + self.message = "AnsibleModule not set." raise HomebrewCaskException(self.message) self.brew_path = self.module.get_bin_path( - 'brew', + "brew", required=True, opt_dirs=self.path, ) if not self.brew_path: self.brew_path = None self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewCaskException('Unable to locate homebrew executable.') + self.message = "Unable to locate homebrew executable." + raise HomebrewCaskException("Unable to locate homebrew executable.") return self.brew_path def _status(self): return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} def run(self): @@ -395,11 +409,11 @@ def _current_cask_is_outdated(self): return False if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'outdated', '--cask'] + base_opts = [self.brew_path, "outdated", "--cask"] else: - base_opts = [self.brew_path, 'cask', 'outdated'] + base_opts = [self.brew_path, "cask", "outdated"] - cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask] + cask_is_outdated_command = base_opts + (["--greedy"] if self.greedy else []) + [self.current_cask] rc, out, err = self.module.run_command(cask_is_outdated_command) @@ -408,7 +422,7 @@ def _current_cask_is_outdated(self): def _current_cask_is_installed(self): if not self.valid_cask(self.current_cask): self.failed = True - self.message = f'Invalid cask: {self.current_cask}.' + self.message = f"Invalid cask: {self.current_cask}." raise HomebrewCaskException(self.message) if self._brew_cask_command_is_deprecated(): @@ -425,7 +439,7 @@ def _get_brew_version(self): if self.brew_version: return self.brew_version - cmd = [self.brew_path, '--version'] + cmd = [self.brew_path, "--version"] dummy, out, dummy = self.module.run_command(cmd, check_rc=True) @@ -438,7 +452,8 @@ def _get_brew_version(self): def _brew_cask_command_is_deprecated(self): # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/) - return LooseVersion(self._get_brew_version()) >= LooseVersion('2.6.0') + return LooseVersion(self._get_brew_version()) >= LooseVersion("2.6.0") + # /checks ------------------------------------------------------ }}} # commands ----------------------------------------------------- {{{ @@ -447,11 +462,11 @@ def _run(self): return self._upgrade_all() if self.casks: - if self.state == 'installed': + if self.state == "installed": return self._install_casks() - elif self.state == 'upgraded': + elif self.state == "upgraded": return self._upgrade_casks() - elif self.state == 'absent': + elif self.state == "absent": return self._uninstall_casks() self.failed = True @@ -460,65 +475,64 @@ def _run(self): # sudo_password fix ---------------------- {{{ def _run_command_with_sudo_password(self, cmd): - rc, out, err = '', '', '' + rc, out, err = "", "", "" with tempfile.NamedTemporaryFile() as sudo_askpass_file: sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password)) os.chmod(sudo_askpass_file.name, 0o700) sudo_askpass_file.file.close() - rc, out, err = self.module.run_command( - cmd, - environ_update={'SUDO_ASKPASS': sudo_askpass_file.name} - ) + rc, out, err = self.module.run_command(cmd, environ_update={"SUDO_ASKPASS": sudo_askpass_file.name}) self.module.add_cleanup_file(sudo_askpass_file.name) return (rc, out, err) + # /sudo_password fix --------------------- }}} # updated -------------------------------- {{{ def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) + rc, out, err = self.module.run_command( + [ + self.brew_path, + "update", + ] + ) if rc == 0: if out and isinstance(out, str): already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s + re.search(r"Already up-to-date.", s.strip(), re.IGNORECASE) for s in out.split("\n") if s ) if not already_updated: self.changed = True - self.message = 'Homebrew updated successfully.' + self.message = "Homebrew updated successfully." else: - self.message = 'Homebrew already up-to-date.' + self.message = "Homebrew already up-to-date." return True else: self.failed = True self.message = err.strip() raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} # _upgrade_all --------------------------- {{{ def _upgrade_all(self): if self.module.check_mode: self.changed = True - self.message = 'Casks would be upgraded.' + self.message = "Casks would be upgraded." raise HomebrewCaskException(self.message) if self._brew_cask_command_is_deprecated(): - cmd = [self.brew_path, 'upgrade', '--cask'] + cmd = [self.brew_path, "upgrade", "--cask"] else: - cmd = [self.brew_path, 'cask', 'upgrade'] + cmd = [self.brew_path, "cask", "upgrade"] if self.greedy: - cmd = cmd + ['--greedy'] + cmd = cmd + ["--greedy"] - rc, out, err = '', '', '' + rc, out, err = "", "", "" if self.sudo_password: rc, out, err = self._run_command_with_sudo_password(cmd) @@ -528,50 +542,51 @@ def _upgrade_all(self): if rc == 0: # 'brew upgrade --cask' does not output anything if no casks are upgraded if not out.strip(): - self.message = 'Homebrew casks already upgraded.' + self.message = "Homebrew casks already upgraded." # handle legacy 'brew cask upgrade' - elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): - self.message = 'Homebrew casks already upgraded.' + elif re.search(r"==> No Casks to upgrade", out.strip(), re.IGNORECASE): + self.message = "Homebrew casks already upgraded." else: self.changed = True - self.message = 'Homebrew casks upgraded.' + self.message = "Homebrew casks upgraded." return True else: self.failed = True self.message = err.strip() raise HomebrewCaskException(self.message) + # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ def _install_current_cask(self): if not self.valid_cask(self.current_cask): self.failed = True - self.message = f'Invalid cask: {self.current_cask}.' + self.message = f"Invalid cask: {self.current_cask}." raise HomebrewCaskException(self.message) - if '--force' not in self.install_options and self._current_cask_is_installed(): + if "--force" not in self.install_options and self._current_cask_is_installed(): self.unchanged_count += 1 - self.message = f'Cask already installed: {self.current_cask}' + self.message = f"Cask already installed: {self.current_cask}" return True if self.module.check_mode: self.changed = True - self.message = f'Cask would be installed: {self.current_cask}' + self.message = f"Cask would be installed: {self.current_cask}" raise HomebrewCaskException(self.message) if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'install', '--cask'] + base_opts = [self.brew_path, "install", "--cask"] else: - base_opts = [self.brew_path, 'cask', 'install'] + base_opts = [self.brew_path, "cask", "install"] opts = base_opts + [self.current_cask] + self.install_options cmd = [opt for opt in opts if opt] - rc, out, err = '', '', '' + rc, out, err = "", "", "" if self.sudo_password: rc, out, err = self._run_command_with_sudo_password(cmd) @@ -581,11 +596,11 @@ def _install_current_cask(self): if self._current_cask_is_installed(): self.changed_count += 1 self.changed = True - self.message = f'Cask installed: {self.current_cask}' + self.message = f"Cask installed: {self.current_cask}" return True elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err): self.unchanged_count += 1 - self.message = f'Cask already installed: {self.current_cask}' + self.message = f"Cask already installed: {self.current_cask}" return True else: self.failed = True @@ -598,40 +613,41 @@ def _install_casks(self): self._install_current_cask() return True + # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ def _upgrade_current_cask(self): - command = 'upgrade' + command = "upgrade" if not self.valid_cask(self.current_cask): self.failed = True - self.message = f'Invalid cask: {self.current_cask}.' + self.message = f"Invalid cask: {self.current_cask}." raise HomebrewCaskException(self.message) if not self._current_cask_is_installed(): - command = 'install' + command = "install" if self._current_cask_is_installed() and not self._current_cask_is_outdated(): - self.message = f'Cask is already upgraded: {self.current_cask}' + self.message = f"Cask is already upgraded: {self.current_cask}" self.unchanged_count += 1 return True if self.module.check_mode: self.changed = True - self.message = f'Cask would be upgraded: {self.current_cask}' + self.message = f"Cask would be upgraded: {self.current_cask}" raise HomebrewCaskException(self.message) if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, command, '--cask'] + base_opts = [self.brew_path, command, "--cask"] else: - base_opts = [self.brew_path, 'cask', command] + base_opts = [self.brew_path, "cask", command] opts = base_opts + self.install_options + [self.current_cask] cmd = [opt for opt in opts if opt] - rc, out, err = '', '', '' + rc, out, err = "", "", "" if self.sudo_password: rc, out, err = self._run_command_with_sudo_password(cmd) @@ -641,7 +657,7 @@ def _upgrade_current_cask(self): if self._current_cask_is_installed() and not self._current_cask_is_outdated(): self.changed_count += 1 self.changed = True - self.message = f'Cask upgraded: {self.current_cask}' + self.message = f"Cask upgraded: {self.current_cask}" return True else: self.failed = True @@ -654,35 +670,36 @@ def _upgrade_casks(self): self._upgrade_current_cask() return True + # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ def _uninstall_current_cask(self): if not self.valid_cask(self.current_cask): self.failed = True - self.message = f'Invalid cask: {self.current_cask}.' + self.message = f"Invalid cask: {self.current_cask}." raise HomebrewCaskException(self.message) if not self._current_cask_is_installed(): self.unchanged_count += 1 - self.message = f'Cask already uninstalled: {self.current_cask}' + self.message = f"Cask already uninstalled: {self.current_cask}" return True if self.module.check_mode: self.changed = True - self.message = f'Cask would be uninstalled: {self.current_cask}' + self.message = f"Cask would be uninstalled: {self.current_cask}" raise HomebrewCaskException(self.message) if self._brew_cask_command_is_deprecated(): - base_opts = [self.brew_path, 'uninstall', '--cask'] + base_opts = [self.brew_path, "uninstall", "--cask"] else: - base_opts = [self.brew_path, 'cask', 'uninstall'] + base_opts = [self.brew_path, "cask", "uninstall"] opts = base_opts + [self.current_cask] + self.install_options cmd = [opt for opt in opts if opt] - rc, out, err = '', '', '' + rc, out, err = "", "", "" if self.sudo_password: rc, out, err = self._run_command_with_sudo_password(cmd) @@ -692,7 +709,7 @@ def _uninstall_current_cask(self): if not self._current_cask_is_installed(): self.changed_count += 1 self.changed = True - self.message = f'Cask uninstalled: {self.current_cask}' + self.message = f"Cask uninstalled: {self.current_cask}" return True else: self.failed = True @@ -705,6 +722,7 @@ def _uninstall_casks(self): self._uninstall_current_cask() return True + # /uninstalled --------------------------- }}} # /commands ---------------------------------------------------- }}} @@ -714,19 +732,23 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "cask"], - type='list', - elements='str', + type="list", + elements="str", ), path=dict( default="/usr/local/bin:/opt/homebrew/bin", - type='path', + type="path", ), state=dict( default="present", choices=[ - "present", "installed", - "latest", "upgraded", - "absent", "removed", "uninstalled", + "present", + "installed", + "latest", + "upgraded", + "absent", + "removed", + "uninstalled", ], ), sudo_password=dict( @@ -735,70 +757,73 @@ def main(): ), update_homebrew=dict( default=False, - type='bool', + type="bool", ), install_options=dict( - aliases=['options'], - type='list', - elements='str', + aliases=["options"], + type="list", + elements="str", ), accept_external_apps=dict( default=False, - type='bool', + type="bool", ), upgrade_all=dict( default=False, aliases=["upgrade"], - type='bool', + type="bool", ), greedy=dict( default=False, - type='bool', + type="bool", ), ), supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") p = module.params - if p['name']: - casks = p['name'] + if p["name"]: + casks = p["name"] else: casks = None - path = p['path'] + path = p["path"] if path: - path = path.split(':') - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - sudo_password = p['sudo_password'] - - update_homebrew = p['update_homebrew'] - upgrade_all = p['upgrade_all'] - greedy = p['greedy'] - p['install_options'] = p['install_options'] or [] - install_options = [f'--{install_option}' - for install_option in p['install_options']] - - accept_external_apps = p['accept_external_apps'] - - brew_cask = HomebrewCask(module=module, path=path, casks=casks, - state=state, sudo_password=sudo_password, - update_homebrew=update_homebrew, - install_options=install_options, - accept_external_apps=accept_external_apps, - upgrade_all=upgrade_all, - greedy=greedy, - ) + path = path.split(":") + + state = p["state"] + if state in ("present", "installed"): + state = "installed" + if state in ("latest", "upgraded"): + state = "upgraded" + if state in ("absent", "removed", "uninstalled"): + state = "absent" + + sudo_password = p["sudo_password"] + + update_homebrew = p["update_homebrew"] + upgrade_all = p["upgrade_all"] + greedy = p["greedy"] + p["install_options"] = p["install_options"] or [] + install_options = [f"--{install_option}" for install_option in p["install_options"]] + + accept_external_apps = p["accept_external_apps"] + + brew_cask = HomebrewCask( + module=module, + path=path, + casks=casks, + state=state, + sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, + ) (failed, changed, message) = brew_cask.run() if failed: module.fail_json(msg=message) @@ -806,5 +831,5 @@ def main(): module.exit_json(changed=changed, msg=message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py index b3778ee5ed5..53e3d66973f 100644 --- a/plugins/modules/homebrew_services.py +++ b/plugins/modules/homebrew_services.py @@ -100,9 +100,7 @@ # Stores validated arguments for an instance of an action. # See DOCUMENTATION string for argument-specific information. - HomebrewServiceArgs = namedtuple( - "HomebrewServiceArgs", ["name", "state", "brew_path"] - ) + HomebrewServiceArgs = namedtuple("HomebrewServiceArgs", ["name", "state", "brew_path"]) # Stores the state of a Homebrew service. HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) @@ -112,14 +110,10 @@ # Stores validated arguments for an instance of an action. # See DOCUMENTATION string for argument-specific information. - HomebrewServiceArgs = NamedTuple( - "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] - ) + HomebrewServiceArgs = NamedTuple("HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)]) # Stores the state of a Homebrew service. - HomebrewServiceState = NamedTuple( - "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] - ) + HomebrewServiceState = NamedTuple("HomebrewServiceState", [("running", bool), ("pid", Optional[int])]) def _brew_service_state(args, module): @@ -139,9 +133,7 @@ def _exit_with_state(args, module, changed=False, message=None): # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None state = _brew_service_state(args, module) if message is None: - message = ( - f"Running: {state.running}, Changed: {changed}, PID: {state.pid}" - ) + message = f"Running: {state.running}, Changed: {changed}, PID: {state.pid}" module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) @@ -199,9 +191,7 @@ def restart_service(args, module): # type: (HomebrewServiceArgs, AnsibleModule) -> None """Restart the requested brew service. This always results in a change.""" if module.check_mode: - _exit_with_state( - args, module, changed=True, message="Service would be restarted" - ) + _exit_with_state(args, module, changed=True, message="Service would be restarted") restart_cmd = [args.brew_path, "services", "restart", args.name] rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) @@ -229,9 +219,7 @@ def main(): supports_check_mode=True, ) - module.run_command_environ_update = dict( - LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" - ) + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") # Pre-validate arguments. service_args = validate_and_load_arguments(module) diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py index b46478b9cff..d7855e3317f 100644 --- a/plugins/modules/homebrew_tap.py +++ b/plugins/modules/homebrew_tap.py @@ -84,59 +84,63 @@ def a_valid_tap(tap): - '''Returns True if the tap is valid.''' - regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') + """Returns True if the tap is valid.""" + regex = re.compile(r"^([\w-]+)/(homebrew-)?([\w-]+)$") return regex.match(tap) def already_tapped(module, brew_path, tap): - '''Returns True if already tapped.''' + """Returns True if already tapped.""" - rc, out, err = module.run_command([ - brew_path, - 'tap', - ]) + rc, out, err = module.run_command( + [ + brew_path, + "tap", + ] + ) - taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] - tap_name = re.sub('homebrew-', '', tap.lower()) + taps = [tap_.strip().lower() for tap_ in out.split("\n") if tap_] + tap_name = re.sub("homebrew-", "", tap.lower()) return tap_name in taps def add_tap(module, brew_path, tap, url=None): - '''Adds a single tap.''' - failed, changed, msg = False, False, '' + """Adds a single tap.""" + failed, changed, msg = False, False, "" if not a_valid_tap(tap): failed = True - msg = f'not a valid tap: {tap}' + msg = f"not a valid tap: {tap}" elif not already_tapped(module, brew_path, tap): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([ - brew_path, - 'tap', - tap, - url, - ]) + rc, out, err = module.run_command( + [ + brew_path, + "tap", + tap, + url, + ] + ) if rc == 0: changed = True - msg = f'successfully tapped: {tap}' + msg = f"successfully tapped: {tap}" else: failed = True - msg = f'failed to tap: {tap} due to {err}' + msg = f"failed to tap: {tap} due to {err}" else: - msg = f'already tapped: {tap}' + msg = f"already tapped: {tap}" return (failed, changed, msg) def add_taps(module, brew_path, taps): - '''Adds one or more taps.''' - failed, changed, unchanged, added, msg = False, False, 0, 0, '' + """Adds one or more taps.""" + failed, changed, unchanged, added, msg = False, False, 0, 0, "" for tap in taps: (failed, changed, msg) = add_tap(module, brew_path, tap) @@ -152,46 +156,48 @@ def add_taps(module, brew_path, taps): msg = msg % (added, unchanged) elif added: changed = True - msg = f'added: {added}, unchanged: {unchanged}' + msg = f"added: {added}, unchanged: {unchanged}" else: - msg = f'added: {added}, unchanged: {unchanged}' + msg = f"added: {added}, unchanged: {unchanged}" return (failed, changed, msg) def remove_tap(module, brew_path, tap): - '''Removes a single tap.''' - failed, changed, msg = False, False, '' + """Removes a single tap.""" + failed, changed, msg = False, False, "" if not a_valid_tap(tap): failed = True - msg = f'not a valid tap: {tap}' + msg = f"not a valid tap: {tap}" elif already_tapped(module, brew_path, tap): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([ - brew_path, - 'untap', - tap, - ]) + rc, out, err = module.run_command( + [ + brew_path, + "untap", + tap, + ] + ) if not already_tapped(module, brew_path, tap): changed = True - msg = f'successfully untapped: {tap}' + msg = f"successfully untapped: {tap}" else: failed = True - msg = f'failed to untap: {tap} due to {err}' + msg = f"failed to untap: {tap} due to {err}" else: - msg = f'already untapped: {tap}' + msg = f"already untapped: {tap}" return (failed, changed, msg) def remove_taps(module, brew_path, taps): - '''Removes one or more taps.''' - failed, changed, unchanged, removed, msg = False, False, 0, 0, '' + """Removes one or more taps.""" + failed, changed, unchanged, removed, msg = False, False, 0, 0, "" for tap in taps: (failed, changed, msg) = remove_tap(module, brew_path, tap) @@ -207,9 +213,9 @@ def remove_taps(module, brew_path, taps): msg = msg % (removed, unchanged) elif removed: changed = True - msg = f'removed: {removed}, unchanged: {unchanged}' + msg = f"removed: {removed}, unchanged: {unchanged}" else: - msg = f'removed: {removed}, unchanged: {unchanged}' + msg = f"removed: {removed}, unchanged: {unchanged}" return (failed, changed, msg) @@ -217,31 +223,31 @@ def remove_taps(module, brew_path, taps): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['tap'], type='list', required=True, elements='str'), + name=dict(aliases=["tap"], type="list", required=True, elements="str"), url=dict(), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - type='path', + type="path", ), ), supports_check_mode=True, ) - path = module.params['path'] + path = module.params["path"] if path: - path = path.split(':') + path = path.split(":") brew_path = module.get_bin_path( - 'brew', + "brew", required=True, opt_dirs=path, ) - taps = module.params['name'] - url = module.params['url'] + taps = module.params["name"] + url = module.params["url"] - if module.params['state'] == 'present': + if module.params["state"] == "present": if url is None: # No tap URL provided explicitly, continue with bulk addition # of all the taps. @@ -260,7 +266,7 @@ def main(): else: module.exit_json(changed=changed, msg=msg) - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": failed, changed, msg = remove_taps(module, brew_path, taps) if failed: @@ -269,5 +275,5 @@ def main(): module.exit_json(changed=changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index 140839efa21..5cc6724bba5 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -286,6 +286,7 @@ LEGACYCRYPT_IMPORT_ERROR: str | None try: import legacycrypt + if not HAS_CRYPT: crypt = legacycrypt except ImportError: @@ -299,45 +300,45 @@ class Homectl: def __init__(self, module): self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.password = module.params['password'] - self.storage = module.params['storage'] - self.disksize = module.params['disksize'] - self.resize = module.params['resize'] - self.realname = module.params['realname'] - self.realm = module.params['realm'] - self.email = module.params['email'] - self.location = module.params['location'] - self.iconname = module.params['iconname'] - self.homedir = module.params['homedir'] - self.imagepath = module.params['imagepath'] - self.uid = module.params['uid'] - self.gid = module.params['gid'] - self.umask = module.params['umask'] - self.memberof = module.params['memberof'] - self.skeleton = module.params['skeleton'] - self.shell = module.params['shell'] - self.environment = module.params['environment'] - self.timezone = module.params['timezone'] - self.locked = module.params['locked'] - self.passwordhint = module.params['passwordhint'] - self.sshkeys = module.params['sshkeys'] - self.language = module.params['language'] - self.notbefore = module.params['notbefore'] - self.notafter = module.params['notafter'] - self.mountopts = module.params['mountopts'] + self.state = module.params["state"] + self.name = module.params["name"] + self.password = module.params["password"] + self.storage = module.params["storage"] + self.disksize = module.params["disksize"] + self.resize = module.params["resize"] + self.realname = module.params["realname"] + self.realm = module.params["realm"] + self.email = module.params["email"] + self.location = module.params["location"] + self.iconname = module.params["iconname"] + self.homedir = module.params["homedir"] + self.imagepath = module.params["imagepath"] + self.uid = module.params["uid"] + self.gid = module.params["gid"] + self.umask = module.params["umask"] + self.memberof = module.params["memberof"] + self.skeleton = module.params["skeleton"] + self.shell = module.params["shell"] + self.environment = module.params["environment"] + self.timezone = module.params["timezone"] + self.locked = module.params["locked"] + self.passwordhint = module.params["passwordhint"] + self.sshkeys = module.params["sshkeys"] + self.language = module.params["language"] + self.notbefore = module.params["notbefore"] + self.notafter = module.params["notafter"] + self.mountopts = module.params["mountopts"] self.result = {} # Cannot run homectl commands if service is not active def homed_service_active(self): is_active = True - cmd = ['systemctl', 'show', 'systemd-homed.service', '-p', 'ActiveState'] + cmd = ["systemctl", "show", "systemd-homed.service", "-p", "ActiveState"] rc, show_service_stdout, stderr = self.module.run_command(cmd) if rc == 0: - state = show_service_stdout.rsplit('=')[1] - if state.strip() != 'active': + state = show_service_stdout.rsplit("=")[1] + if state.strip() != "active": is_active = False return is_active @@ -349,17 +350,17 @@ def user_exists(self): if rc == 0: exists = True # User exists now compare password given with current hashed password stored in the user metadata. - if self.state != 'absent': # Don't need checking on remove user - stored_pwhash = json.loads(stdout)['privileged']['hashedPassword'][0] + if self.state != "absent": # Don't need checking on remove user + stored_pwhash = json.loads(stdout)["privileged"]["hashedPassword"][0] if self._check_password(stored_pwhash): valid_pw = True return exists, valid_pw def create_user(self): record = self.create_json_record(create=True) - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('create') - cmd.append('--identity=-') # Read the user record from standard input. + cmd = [self.module.get_bin_path("homectl", True)] + cmd.append("create") + cmd.append("--identity=-") # Read the user record from standard input. return self.module.run_command(cmd, data=record) def _hash_password(self, password): @@ -373,31 +374,31 @@ def _check_password(self, pwhash): return pwhash == hash def remove_user(self): - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('remove') + cmd = [self.module.get_bin_path("homectl", True)] + cmd.append("remove") cmd.append(self.name) return self.module.run_command(cmd) def prepare_modify_user_command(self): record = self.create_json_record() - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('update') + cmd = [self.module.get_bin_path("homectl", True)] + cmd.append("update") cmd.append(self.name) - cmd.append('--identity=-') # Read the user record from standard input. + cmd.append("--identity=-") # Read the user record from standard input. # Resize disksize now resize = true # This is not valid in user record (json) and requires it to be passed on command. if self.disksize and self.resize: - cmd.append('--and-resize') - cmd.append('true') - self.result['changed'] = True + cmd.append("--and-resize") + cmd.append("true") + self.result["changed"] = True return cmd, record def get_user_metadata(self): - cmd = [self.module.get_bin_path('homectl', True)] - cmd.append('inspect') + cmd = [self.module.get_bin_path("homectl", True)] + cmd.append("inspect") cmd.append(self.name) - cmd.append('-j') - cmd.append('--no-pager') + cmd.append("-j") + cmd.append("--no-pager") rc, stdout, stderr = self.module.run_command(cmd) return rc, stdout, stderr @@ -405,172 +406,172 @@ def get_user_metadata(self): def create_json_record(self, create=False): record = {} user_metadata = {} - self.result['changed'] = False + self.result["changed"] = False # Get the current user record if not creating a new user record. if not create: rc, user_metadata, stderr = self.get_user_metadata() user_metadata = json.loads(user_metadata) # Remove elements that are not meant to be updated from record. # These are always part of the record when a user exists. - user_metadata.pop('signature', None) - user_metadata.pop('binding', None) - user_metadata.pop('status', None) + user_metadata.pop("signature", None) + user_metadata.pop("binding", None) + user_metadata.pop("status", None) # Let last change Usec be updated by homed when command runs. - user_metadata.pop('lastChangeUSec', None) + user_metadata.pop("lastChangeUSec", None) # Now only change fields that are called on leaving what's currently in the record intact. record = user_metadata - record['userName'] = self.name - record['secret'] = {'password': [self.password]} + record["userName"] = self.name + record["secret"] = {"password": [self.password]} if create: password_hash = self._hash_password(self.password) - record['privileged'] = {'hashedPassword': [password_hash]} - self.result['changed'] = True + record["privileged"] = {"hashedPassword": [password_hash]} + self.result["changed"] = True if self.uid and self.gid and create: - record['uid'] = self.uid - record['gid'] = self.gid - self.result['changed'] = True + record["uid"] = self.uid + record["gid"] = self.gid + self.result["changed"] = True if self.memberof: - member_list = list(self.memberof.split(',')) - if member_list != record.get('memberOf', [None]): - record['memberOf'] = member_list - self.result['changed'] = True + member_list = list(self.memberof.split(",")) + if member_list != record.get("memberOf", [None]): + record["memberOf"] = member_list + self.result["changed"] = True if self.realname: - if self.realname != record.get('realName'): - record['realName'] = self.realname - self.result['changed'] = True + if self.realname != record.get("realName"): + record["realName"] = self.realname + self.result["changed"] = True # Cannot update storage unless were creating a new user. # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ if self.storage and create: - record['storage'] = self.storage - self.result['changed'] = True + record["storage"] = self.storage + self.result["changed"] = True # Cannot update homedir unless were creating a new user. # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ if self.homedir and create: - record['homeDirectory'] = self.homedir - self.result['changed'] = True + record["homeDirectory"] = self.homedir + self.result["changed"] = True # Cannot update imagepath unless were creating a new user. # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ if self.imagepath and create: - record['imagePath'] = self.imagepath - self.result['changed'] = True + record["imagePath"] = self.imagepath + self.result["changed"] = True if self.disksize: # convert human readable to bytes - if self.disksize != record.get('diskSize'): - record['diskSize'] = human_to_bytes(self.disksize) - self.result['changed'] = True + if self.disksize != record.get("diskSize"): + record["diskSize"] = human_to_bytes(self.disksize) + self.result["changed"] = True if self.realm: - if self.realm != record.get('realm'): - record['realm'] = self.realm - self.result['changed'] = True + if self.realm != record.get("realm"): + record["realm"] = self.realm + self.result["changed"] = True if self.email: - if self.email != record.get('emailAddress'): - record['emailAddress'] = self.email - self.result['changed'] = True + if self.email != record.get("emailAddress"): + record["emailAddress"] = self.email + self.result["changed"] = True if self.location: - if self.location != record.get('location'): - record['location'] = self.location - self.result['changed'] = True + if self.location != record.get("location"): + record["location"] = self.location + self.result["changed"] = True if self.iconname: - if self.iconname != record.get('iconName'): - record['iconName'] = self.iconname - self.result['changed'] = True + if self.iconname != record.get("iconName"): + record["iconName"] = self.iconname + self.result["changed"] = True if self.skeleton: - if self.skeleton != record.get('skeletonDirectory'): - record['skeletonDirectory'] = self.skeleton - self.result['changed'] = True + if self.skeleton != record.get("skeletonDirectory"): + record["skeletonDirectory"] = self.skeleton + self.result["changed"] = True if self.shell: - if self.shell != record.get('shell'): - record['shell'] = self.shell - self.result['changed'] = True + if self.shell != record.get("shell"): + record["shell"] = self.shell + self.result["changed"] = True if self.umask: - if self.umask != record.get('umask'): - record['umask'] = self.umask - self.result['changed'] = True + if self.umask != record.get("umask"): + record["umask"] = self.umask + self.result["changed"] = True if self.environment: - if self.environment != record.get('environment', [None]): - record['environment'] = list(self.environment.split(',')) - self.result['changed'] = True + if self.environment != record.get("environment", [None]): + record["environment"] = list(self.environment.split(",")) + self.result["changed"] = True if self.timezone: - if self.timezone != record.get('timeZone'): - record['timeZone'] = self.timezone - self.result['changed'] = True + if self.timezone != record.get("timeZone"): + record["timeZone"] = self.timezone + self.result["changed"] = True if self.locked: - if self.locked != record.get('locked'): - record['locked'] = self.locked - self.result['changed'] = True + if self.locked != record.get("locked"): + record["locked"] = self.locked + self.result["changed"] = True if self.passwordhint: - if self.passwordhint != record.get('privileged', {}).get('passwordHint'): - record['privileged']['passwordHint'] = self.passwordhint - self.result['changed'] = True + if self.passwordhint != record.get("privileged", {}).get("passwordHint"): + record["privileged"]["passwordHint"] = self.passwordhint + self.result["changed"] = True if self.sshkeys: - if self.sshkeys != record.get('privileged', {}).get('sshAuthorizedKeys'): - record['privileged']['sshAuthorizedKeys'] = list(self.sshkeys.split(',')) - self.result['changed'] = True + if self.sshkeys != record.get("privileged", {}).get("sshAuthorizedKeys"): + record["privileged"]["sshAuthorizedKeys"] = list(self.sshkeys.split(",")) + self.result["changed"] = True if self.language: - if self.locked != record.get('preferredLanguage'): - record['preferredLanguage'] = self.language - self.result['changed'] = True + if self.locked != record.get("preferredLanguage"): + record["preferredLanguage"] = self.language + self.result["changed"] = True if self.notbefore: - if self.locked != record.get('notBeforeUSec'): - record['notBeforeUSec'] = self.notbefore - self.result['changed'] = True + if self.locked != record.get("notBeforeUSec"): + record["notBeforeUSec"] = self.notbefore + self.result["changed"] = True if self.notafter: - if self.locked != record.get('notAfterUSec'): - record['notAfterUSec'] = self.notafter - self.result['changed'] = True + if self.locked != record.get("notAfterUSec"): + record["notAfterUSec"] = self.notafter + self.result["changed"] = True if self.mountopts: - opts = list(self.mountopts.split(',')) - if 'nosuid' in opts: - if record.get('mountNoSuid') is not True: - record['mountNoSuid'] = True - self.result['changed'] = True + opts = list(self.mountopts.split(",")) + if "nosuid" in opts: + if record.get("mountNoSuid") is not True: + record["mountNoSuid"] = True + self.result["changed"] = True else: - if record.get('mountNoSuid') is not False: - record['mountNoSuid'] = False - self.result['changed'] = True - - if 'nodev' in opts: - if record.get('mountNoDevices') is not True: - record['mountNoDevices'] = True - self.result['changed'] = True + if record.get("mountNoSuid") is not False: + record["mountNoSuid"] = False + self.result["changed"] = True + + if "nodev" in opts: + if record.get("mountNoDevices") is not True: + record["mountNoDevices"] = True + self.result["changed"] = True else: - if record.get('mountNoDevices') is not False: - record['mountNoDevices'] = False - self.result['changed'] = True - - if 'noexec' in opts: - if record.get('mountNoExecute') is not True: - record['mountNoExecute'] = True - self.result['changed'] = True + if record.get("mountNoDevices") is not False: + record["mountNoDevices"] = False + self.result["changed"] = True + + if "noexec" in opts: + if record.get("mountNoExecute") is not True: + record["mountNoExecute"] = True + self.result["changed"] = True else: - if record.get('mountNoExecute') is not False: - record['mountNoExecute'] = False - self.result['changed'] = True + if record.get("mountNoExecute") is not False: + record["mountNoExecute"] = False + self.result["changed"] = True return jsonify(record) @@ -578,58 +579,57 @@ def create_json_record(self, create=False): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - name=dict(type='str', required=True, aliases=['user', 'username']), - password=dict(type='str', no_log=True), - storage=dict(type='str', choices=['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs']), - disksize=dict(type='str'), - resize=dict(type='bool', default=False), - realname=dict(type='str', aliases=['comment']), - realm=dict(type='str'), - email=dict(type='str'), - location=dict(type='str'), - iconname=dict(type='str'), - homedir=dict(type='path'), - imagepath=dict(type='path'), - uid=dict(type='int'), - gid=dict(type='int'), - umask=dict(type='int'), - environment=dict(type='str', aliases=['setenv']), - timezone=dict(type='str'), - memberof=dict(type='str', aliases=['groups']), - skeleton=dict(type='path', aliases=['skel']), - shell=dict(type='str'), - locked=dict(type='bool'), - passwordhint=dict(type='str', no_log=True), - sshkeys=dict(type='str', no_log=True), - language=dict(type='str'), - notbefore=dict(type='int'), - notafter=dict(type='int'), - mountopts=dict(type='str'), + state=dict(type="str", default="present", choices=["absent", "present"]), + name=dict(type="str", required=True, aliases=["user", "username"]), + password=dict(type="str", no_log=True), + storage=dict(type="str", choices=["classic", "luks", "directory", "subvolume", "fscrypt", "cifs"]), + disksize=dict(type="str"), + resize=dict(type="bool", default=False), + realname=dict(type="str", aliases=["comment"]), + realm=dict(type="str"), + email=dict(type="str"), + location=dict(type="str"), + iconname=dict(type="str"), + homedir=dict(type="path"), + imagepath=dict(type="path"), + uid=dict(type="int"), + gid=dict(type="int"), + umask=dict(type="int"), + environment=dict(type="str", aliases=["setenv"]), + timezone=dict(type="str"), + memberof=dict(type="str", aliases=["groups"]), + skeleton=dict(type="path", aliases=["skel"]), + shell=dict(type="str"), + locked=dict(type="bool"), + passwordhint=dict(type="str", no_log=True), + sshkeys=dict(type="str", no_log=True), + language=dict(type="str"), + notbefore=dict(type="int"), + notafter=dict(type="int"), + mountopts=dict(type="str"), ), supports_check_mode=True, - required_if=[ - ('state', 'present', ['password']), - ('resize', True, ['disksize']), - ] + ("state", "present", ["password"]), + ("resize", True, ["disksize"]), + ], ) if not HAS_CRYPT and not HAS_LEGACYCRYPT: module.fail_json( - msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + msg=missing_required_lib("crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)"), exception=CRYPT_IMPORT_ERROR, ) homectl = Homectl(module) - homectl.result['state'] = homectl.state + homectl.result["state"] = homectl.state # First we need to make sure homed service is active if not homectl.homed_service_active(): - module.fail_json(msg='systemd-homed.service is not active') + module.fail_json(msg="systemd-homed.service is not active") # handle removing user - if homectl.state == 'absent': + if homectl.state == "absent": user_exists, valid_pwhash = homectl.user_exists() if user_exists: if module.check_mode: @@ -637,15 +637,15 @@ def main(): rc, stdout, stderr = homectl.remove_user() if rc != 0: module.fail_json(name=homectl.name, msg=stderr, rc=rc) - homectl.result['changed'] = True - homectl.result['rc'] = rc - homectl.result['msg'] = f'User {homectl.name} removed!' + homectl.result["changed"] = True + homectl.result["rc"] = rc + homectl.result["msg"] = f"User {homectl.name} removed!" else: - homectl.result['changed'] = False - homectl.result['msg'] = 'User does not exist!' + homectl.result["changed"] = False + homectl.result["msg"] = "User does not exist!" # Handle adding a user - if homectl.state == 'present': + if homectl.state == "present": user_exists, valid_pwhash = homectl.user_exists() if not user_exists: if module.check_mode: @@ -654,35 +654,35 @@ def main(): if rc != 0: module.fail_json(name=homectl.name, msg=stderr, rc=rc) rc, user_metadata, stderr = homectl.get_user_metadata() - homectl.result['data'] = json.loads(user_metadata) - homectl.result['rc'] = rc - homectl.result['msg'] = f'User {homectl.name} created!' + homectl.result["data"] = json.loads(user_metadata) + homectl.result["rc"] = rc + homectl.result["msg"] = f"User {homectl.name} created!" else: if valid_pwhash: # Run this to see if changed would be True or False which is useful for check_mode cmd, record = homectl.prepare_modify_user_command() else: # User gave wrong password fail with message - homectl.result['changed'] = False - homectl.result['msg'] = 'User exists but password is incorrect!' + homectl.result["changed"] = False + homectl.result["msg"] = "User exists but password is incorrect!" module.fail_json(**homectl.result) if module.check_mode: module.exit_json(**homectl.result) # Now actually modify the user if changed was set to true at any point. - if homectl.result['changed']: + if homectl.result["changed"]: rc, stdout, stderr = module.run_command(cmd, data=record) if rc != 0: module.fail_json(name=homectl.name, msg=stderr, rc=rc, changed=False) rc, user_metadata, stderr = homectl.get_user_metadata() - homectl.result['data'] = json.loads(user_metadata) - homectl.result['rc'] = rc - if homectl.result['changed']: - homectl.result['msg'] = f'User {homectl.name} modified' + homectl.result["data"] = json.loads(user_metadata) + homectl.result["rc"] = rc + if homectl.result["changed"]: + homectl.result["msg"] = f"User {homectl.name} modified" module.exit_json(**homectl.result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py index 54488a4772b..4b6de2b650c 100644 --- a/plugins/modules/honeybadger_deployment.py +++ b/plugins/modules/honeybadger_deployment.py @@ -78,8 +78,8 @@ # Module execution. # -def main(): +def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), @@ -87,10 +87,10 @@ def main(): user=dict(required=False), repo=dict(), revision=dict(), - url=dict(default='https://api.honeybadger.io/v1/deploys'), - validate_certs=dict(default=True, type='bool'), + url=dict(default="https://api.honeybadger.io/v1/deploys"), + validate_certs=dict(default=True, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) params = {} @@ -109,7 +109,7 @@ def main(): params["api_key"] = module.params["token"] - url = module.params.get('url') + url = module.params.get("url") # If we're in check mode, just exit pretending like we succeeded if module.check_mode: @@ -119,13 +119,13 @@ def main(): data = urlencode(params) response, info = fetch_url(module, url, data=data) except Exception as e: - module.fail_json(msg=f'Unable to notify Honeybadger: {e}', exception=traceback.format_exc()) + module.fail_json(msg=f"Unable to notify Honeybadger: {e}", exception=traceback.format_exc()) else: - if info['status'] == 201: + if info["status"] == 201: module.exit_json(changed=True) else: module.fail_json(msg=f"HTTP result code: {info['status']} connecting to {url}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py index 2f3db742295..6829fda644a 100644 --- a/plugins/modules/hpilo_boot.py +++ b/plugins/modules/hpilo_boot.py @@ -119,6 +119,7 @@ HPILO_IMP_ERR = None try: import hpilo + HAS_HPILO = True except ImportError: HPILO_IMP_ERR = traceback.format_exc() @@ -128,45 +129,47 @@ # Suppress warnings from hpilo -warnings.simplefilter('ignore') +warnings.simplefilter("ignore") def main(): - module = AnsibleModule( argument_spec=dict( - host=dict(type='str', required=True), - login=dict(type='str', default='Administrator'), - password=dict(type='str', default='admin', no_log=True), - media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']), - image=dict(type='str'), - state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), - force=dict(type='bool', default=False), - idempotent_boot_once=dict(type='bool', default=False), - ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + host=dict(type="str", required=True), + login=dict(type="str", default="Administrator"), + password=dict(type="str", default="admin", no_log=True), + media=dict(type="str", choices=["cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb"]), + image=dict(type="str"), + state=dict( + type="str", + default="boot_once", + choices=["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"], + ), + force=dict(type="bool", default=False), + idempotent_boot_once=dict(type="bool", default=False), + ssl_version=dict(type="str", default="TLSv1", choices=["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"]), ) ) if not HAS_HPILO: - module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) - - host = module.params['host'] - login = module.params['login'] - password = module.params['password'] - media = module.params['media'] - image = module.params['image'] - state = module.params['state'] - force = module.params['force'] - idempotent_boot_once = module.params['idempotent_boot_once'] + module.fail_json(msg=missing_required_lib("python-hpilo"), exception=HPILO_IMP_ERR) + + host = module.params["host"] + login = module.params["login"] + password = module.params["password"] + media = module.params["media"] + image = module.params["image"] + state = module.params["state"] + force = module.params["force"] + idempotent_boot_once = module.params["idempotent_boot_once"] ssl_version = getattr(hpilo.ssl, f"PROTOCOL_{module.params.get('ssl_version').upper().replace('V', 'v')}") ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) changed = False status = {} - power_status = 'UNKNOWN' - - if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): + power_status = "UNKNOWN" + if media and state in ("boot_always", "boot_once", "connect", "disconnect", "no_boot"): # Workaround for: Error communicating with iLO: Problem manipulating EV try: ilo.set_one_time_boot(media) @@ -179,21 +182,20 @@ def main(): ilo.insert_virtual_media(media, image) changed = True - if media == 'cdrom': - ilo.set_vm_status('cdrom', state, True) + if media == "cdrom": + ilo.set_vm_status("cdrom", state, True) status = ilo.get_vm_status() changed = True - elif media in ('floppy', 'usb'): + elif media in ("floppy", "usb"): ilo.set_vf_status(state, True) status = ilo.get_vf_status() changed = True # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot - if state in ('boot_once', 'boot_always') or force: - + if state in ("boot_once", "boot_always") or force: power_status = ilo.get_host_power_status() - if power_status == 'ON': + if power_status == "ON": if not force and not idempotent_boot_once: # module.deprecate( # 'The failure of the module when the server is already powered on is being deprecated.' @@ -201,30 +203,29 @@ def main(): # version='11.0.0', # collection_name='community.general' # ) - module.fail_json(msg=f'HP iLO ({host}) reports that the server is already powered on !') + module.fail_json(msg=f"HP iLO ({host}) reports that the server is already powered on !") elif not force and idempotent_boot_once: pass elif force: ilo.warm_boot_server() - # ilo.cold_boot_server() + # ilo.cold_boot_server() changed = True else: ilo.press_pwr_btn() -# ilo.reset_server() -# ilo.set_host_power(host_power=True) + # ilo.reset_server() + # ilo.set_host_power(host_power=True) changed = True - elif state in ('poweroff'): - + elif state in ("poweroff"): power_status = ilo.get_host_power_status() - if not power_status == 'OFF': + if not power_status == "OFF": ilo.hold_pwr_btn() -# ilo.set_host_power(host_power=False) + # ilo.set_host_power(host_power=False) changed = True module.exit_json(changed=changed, power=power_status, **status) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py index bfa33994d15..20ec59bbd5e 100644 --- a/plugins/modules/hpilo_info.py +++ b/plugins/modules/hpilo_info.py @@ -133,6 +133,7 @@ HPILO_IMP_ERR = None try: import hpilo + HAS_HPILO = True except ImportError: HPILO_IMP_ERR = traceback.format_exc() @@ -143,46 +144,42 @@ # Suppress warnings from hpilo -warnings.simplefilter('ignore') +warnings.simplefilter("ignore") -def parse_flat_interface(entry, non_numeric='hw_eth_ilo'): +def parse_flat_interface(entry, non_numeric="hw_eth_ilo"): try: infoname = f"hw_eth{int(entry['Port']) - 1}" except Exception: infoname = non_numeric - info = { - 'macaddress': entry['MAC'].replace('-', ':'), - 'macaddress_dash': entry['MAC'] - } + info = {"macaddress": entry["MAC"].replace("-", ":"), "macaddress_dash": entry["MAC"]} return (infoname, info) def main(): - module = AnsibleModule( argument_spec=dict( - host=dict(type='str', required=True), - login=dict(type='str', default='Administrator'), - password=dict(type='str', default='admin', no_log=True), - ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + host=dict(type="str", required=True), + login=dict(type="str", default="Administrator"), + password=dict(type="str", default="admin", no_log=True), + ssl_version=dict(type="str", default="TLSv1", choices=["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"]), ), supports_check_mode=True, ) if not HAS_HPILO: - module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-hpilo"), exception=HPILO_IMP_ERR) - host = module.params['host'] - login = module.params['login'] - password = module.params['password'] + host = module.params["host"] + login = module.params["login"] + password = module.params["password"] ssl_version = getattr(hpilo.ssl, f"PROTOCOL_{module.params.get('ssl_version').upper().replace('V', 'v')}") ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) info = { - 'module_hw': True, + "module_hw": True, } # TODO: Count number of CPUs, DIMMs and total memory @@ -193,73 +190,67 @@ def main(): module.fail_json(msg=to_native(e)) for entry in data: - if 'type' not in entry: + if "type" not in entry: continue - elif entry['type'] == 0: # BIOS Information - info['hw_bios_version'] = entry['Family'] - info['hw_bios_date'] = entry['Date'] - elif entry['type'] == 1: # System Information - info['hw_uuid'] = entry['UUID'] - info['hw_system_serial'] = entry['Serial Number'].rstrip() - info['hw_product_name'] = entry['Product Name'] - info['hw_product_uuid'] = entry['cUUID'] - elif entry['type'] == 209: # Embedded NIC MAC Assignment - if 'fields' in entry: - for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: - if name.startswith('Port'): + elif entry["type"] == 0: # BIOS Information + info["hw_bios_version"] = entry["Family"] + info["hw_bios_date"] = entry["Date"] + elif entry["type"] == 1: # System Information + info["hw_uuid"] = entry["UUID"] + info["hw_system_serial"] = entry["Serial Number"].rstrip() + info["hw_product_name"] = entry["Product Name"] + info["hw_product_uuid"] = entry["cUUID"] + elif entry["type"] == 209: # Embedded NIC MAC Assignment + if "fields" in entry: + for name, value in [(e["name"], e["value"]) for e in entry["fields"]]: + if name.startswith("Port"): try: infoname = f"hw_eth{int(value) - 1}" except Exception: - infoname = 'hw_eth_ilo' - elif name.startswith('MAC'): - info[infoname] = { - 'macaddress': value.replace('-', ':'), - 'macaddress_dash': value - } + infoname = "hw_eth_ilo" + elif name.startswith("MAC"): + info[infoname] = {"macaddress": value.replace("-", ":"), "macaddress_dash": value} else: - (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + (infoname, entry_info) = parse_flat_interface(entry, "hw_eth_ilo") info[infoname] = entry_info - elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info - for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: - if name.startswith('Port'): + elif entry["type"] == 209: # HPQ NIC iSCSI MAC Info + for name, value in [(e["name"], e["value"]) for e in entry["fields"]]: + if name.startswith("Port"): try: infoname = f"hw_iscsi{int(value) - 1}" except Exception: - infoname = 'hw_iscsi_ilo' - elif name.startswith('MAC'): - info[infoname] = { - 'macaddress': value.replace('-', ':'), - 'macaddress_dash': value - } - elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format) - (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + infoname = "hw_iscsi_ilo" + elif name.startswith("MAC"): + info[infoname] = {"macaddress": value.replace("-", ":"), "macaddress_dash": value} + elif entry["type"] == 233: # Embedded NIC MAC Assignment (Alternate data format) + (infoname, entry_info) = parse_flat_interface(entry, "hw_eth_ilo") info[infoname] = entry_info # Collect health (RAM/CPU data) health = ilo.get_embedded_health() - info['hw_health'] = health + info["hw_health"] = health - memory_details_summary = health.get('memory', {}).get('memory_details_summary') + memory_details_summary = health.get("memory", {}).get("memory_details_summary") # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8 if memory_details_summary: - info['hw_memory_details_summary'] = memory_details_summary - info['hw_memory_total'] = 0 + info["hw_memory_details_summary"] = memory_details_summary + info["hw_memory_total"] = 0 for cpu, details in memory_details_summary.items(): - cpu_total_memory_size = details.get('total_memory_size') + cpu_total_memory_size = details.get("total_memory_size") if cpu_total_memory_size: - ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size) + ram = re.search(r"(\d+)\s+(\w+)", cpu_total_memory_size) if ram: - if ram.group(2) == 'GB': - info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1)) + if ram.group(2) == "GB": + info["hw_memory_total"] = info["hw_memory_total"] + int(ram.group(1)) # reformat into a text friendly format - info['hw_memory_total'] = f"{info['hw_memory_total']} GB" + info["hw_memory_total"] = f"{info['hw_memory_total']} GB" # Report host state - info['host_power_status'] = power_state or 'UNKNOWN' + info["host_power_status"] = power_state or "UNKNOWN" module.exit_json(**info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py index a17a905916e..00ccb2b1afe 100644 --- a/plugins/modules/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -84,10 +84,10 @@ class HPOnCfg(ModuleHelper): module = dict( argument_spec=dict( - src=dict(type='path', required=True, aliases=['path']), - minfw=dict(type='str'), - executable=dict(default='hponcfg', type='str'), - verbose=dict(default=False, type='bool'), + src=dict(type="path", required=True, aliases=["path"]), + minfw=dict(type="str"), + executable=dict(default="hponcfg", type="str"), + verbose=dict(default=False, type="bool"), ) ) command_args_formats = dict( @@ -103,7 +103,7 @@ def __run__(self): self.command_args_formats, check_rc=True, ) - runner(['src', 'verbose', 'minfw']).run() + runner(["src", "verbose", "minfw"]).run() # Consider every action a change (not idempotent yet!) self.changed = True @@ -113,5 +113,5 @@ def main(): HPOnCfg.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py index e8d7b918a25..e13dbdef56a 100644 --- a/plugins/modules/htpasswd.py +++ b/plugins/modules/htpasswd.py @@ -128,16 +128,16 @@ def create_missing_directories(dest): def present(dest, username, password, hash_scheme, create, check_mode): - """ Ensures user is present + """Ensures user is present - Returns (msg, changed) """ + Returns (msg, changed)""" if hash_scheme in apache_hashes: context = htpasswd_context else: context = CryptContext(schemes=[hash_scheme] + apache_hashes) if not os.path.exists(dest): if not create: - raise ValueError(f'Destination {dest} does not exist') + raise ValueError(f"Destination {dest} does not exist") if check_mode: return (f"Create {dest}", True) create_missing_directories(dest) @@ -160,9 +160,9 @@ def present(dest, username, password, hash_scheme, create, check_mode): def absent(dest, username, check_mode): - """ Ensures user is absent + """Ensures user is absent - Returns (msg, changed) """ + Returns (msg, changed)""" ht = HtpasswdFile(dest, new=False) if username not in ht.users(): @@ -175,10 +175,8 @@ def absent(dest, username, check_mode): def check_file_attrs(module, changed, message): - file_args = module.load_file_common_arguments(module.params) if module.set_fs_attributes_if_different(file_args, False): - if changed: message += " and " changed = True @@ -189,24 +187,21 @@ def check_file_attrs(module, changed, message): def main(): arg_spec = dict( - path=dict(type='path', required=True, aliases=["dest", "destfile"]), - name=dict(type='str', required=True, aliases=["username"]), - password=dict(type='str', no_log=True), - hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]), - state=dict(type='str', default="present", choices=["present", "absent"]), - create=dict(type='bool', default=True), - + path=dict(type="path", required=True, aliases=["dest", "destfile"]), + name=dict(type="str", required=True, aliases=["username"]), + password=dict(type="str", no_log=True), + hash_scheme=dict(type="str", default="apr_md5_crypt", aliases=["crypt_scheme"]), + state=dict(type="str", default="present", choices=["present", "absent"]), + create=dict(type="bool", default=True), ) - module = AnsibleModule(argument_spec=arg_spec, - add_file_common_args=True, - supports_check_mode=True) - - path = module.params['path'] - username = module.params['name'] - password = module.params['password'] - hash_scheme = module.params['hash_scheme'] - state = module.params['state'] - create = module.params['create'] + module = AnsibleModule(argument_spec=arg_spec, add_file_common_args=True, supports_check_mode=True) + + path = module.params["path"] + username = module.params["name"] + password = module.params["password"] + hash_scheme = module.params["hash_scheme"] + state = module.params["state"] + create = module.params["create"] check_mode = module.check_mode deps.validate(module) @@ -237,9 +232,9 @@ def main(): pass try: - if state == 'present': + if state == "present": (msg, changed) = present(path, username, password, hash_scheme, create, check_mode) - elif state == 'absent': + elif state == "absent": if not os.path.exists(path): module.warn(f"{path} does not exist") module.exit_json(msg=f"{username} not present", changed=False) @@ -254,5 +249,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py index 69dd90dbfdc..e2ba681b349 100644 --- a/plugins/modules/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -472,51 +472,64 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - flavor_name=dict(type='str', required=True), - image_id=dict(type='str', required=True), - name=dict(type='str', required=True), + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="30m", type="str"), + update=dict(default="30m", type="str"), + delete=dict(default="30m", type="str"), + ), + default=dict(), + ), + availability_zone=dict(type="str", required=True), + flavor_name=dict(type="str", required=True), + image_id=dict(type="str", required=True), + name=dict(type="str", required=True), nics=dict( - type='list', required=True, elements='dict', + type="list", + required=True, + elements="dict", + options=dict(ip_address=dict(type="str", required=True), subnet_id=dict(type="str", required=True)), + ), + root_volume=dict( + type="dict", + required=True, options=dict( - ip_address=dict(type='str', required=True), - subnet_id=dict(type='str', required=True) + volume_type=dict(type="str", required=True), size=dict(type="int"), snapshot_id=dict(type="str") ), ), - root_volume=dict(type='dict', required=True, options=dict( - volume_type=dict(type='str', required=True), - size=dict(type='int'), - snapshot_id=dict(type='str') - )), - vpc_id=dict(type='str', required=True), - admin_pass=dict(type='str', no_log=True), - data_volumes=dict(type='list', elements='dict', options=dict( - volume_id=dict(type='str', required=True), - device=dict(type='str') - )), - description=dict(type='str'), - eip_id=dict(type='str'), - enable_auto_recovery=dict(type='bool'), - enterprise_project_id=dict(type='str'), - security_groups=dict(type='list', elements='str'), - server_metadata=dict(type='dict'), - server_tags=dict(type='dict'), - ssh_key_name=dict(type='str'), - user_data=dict(type='str') + vpc_id=dict(type="str", required=True), + admin_pass=dict(type="str", no_log=True), + data_volumes=dict( + type="list", + elements="dict", + options=dict(volume_id=dict(type="str", required=True), device=dict(type="str")), + ), + description=dict(type="str"), + eip_id=dict(type="str"), + enable_auto_recovery=dict(type="bool"), + enterprise_project_id=dict(type="str"), + security_groups=dict(type="list", elements="str"), + server_metadata=dict(type="dict"), + server_tags=dict(type="dict"), + ssh_key_name=dict(type="str"), + user_data=dict(type="str"), ), supports_check_mode=True, ) @@ -530,11 +543,11 @@ def main(): try: _init(config) - is_exist = module.params['id'] + is_exist = module.params["id"] result = None changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if not is_exist: if not module.check_mode: create(config) @@ -553,12 +566,11 @@ def main(): result = build_state(inputv, resp, array_index) set_readonly_options(inputv, result) if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") + raise Exception("Update resource failed, some attributes are not updated") changed = True - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: result = dict() if is_exist: @@ -570,13 +582,13 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) def _init(config): module = config.module - if module.params['id']: + if module.params["id"]: return v = search_resource(config) @@ -585,7 +597,7 @@ def _init(config): raise Exception(f"Found more than one resource({', '.join([navigate_value(i, ['id']) for i in v])})") if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) + module.params["id"] = navigate_value(v[0], ["id"]) def user_input_parameters(module): @@ -614,7 +626,7 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) opts["ansible_module"] = module @@ -634,14 +646,14 @@ def create(config): break else: raise Exception("Can't find the sub job") - module.params['id'] = navigate_value(obj, ["entities", "server_id"]) + module.params["id"] = navigate_value(obj, ["entities", "server_id"]) def update(config, expect_state, current_state): module = config.module expect_state["current_state"] = current_state current_state["current_state"] = current_state - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["update"].rstrip("m")) client = config.client(get_region(module), "ecs", "project") params = build_delete_nics_parameters(expect_state) @@ -669,7 +681,7 @@ def update(config, expect_state, current_state): def delete(config): module = config.module client = config.client(get_region(module), "ecs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["delete"].rstrip("m")) opts = user_input_parameters(module) opts["ansible_module"] = module @@ -746,13 +758,11 @@ def _build_query_link(opts): v = navigate_value(opts, ["enterprise_project_id"]) if v or v in [False, 0]: - query_params.append( - f"enterprise_project_id={str(v) if v else str(v).lower()}") + query_params.append(f"enterprise_project_id={str(v) if v else str(v).lower()}") v = navigate_value(opts, ["name"]) if v or v in [False, 0]: - query_params.append( - f"name={str(v) if v else str(v).lower()}") + query_params.append(f"name={str(v) if v else str(v).lower()}") query_link = "?limit=10&offset={offset}" if query_params: @@ -770,7 +780,7 @@ def search_resource(config): link = f"cloudservers/detail{query_link}" result = [] - p = {'offset': 1} + p = {"offset": 1} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -786,7 +796,7 @@ def search_resource(config): if len(result) > 1: break - p['offset'] += 1 + p["offset"] += 1 return result @@ -964,8 +974,7 @@ def expand_create_nics(d, array_index): req = [] - v = navigate_value( - d, ["nics"], new_ai) + v = navigate_value(d, ["nics"], new_ai) if not v: return req @@ -1227,10 +1236,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) + return wait_to_finish(["SUCCESS"], ["RUNNING", "INIT"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_ecs_instance): error waiting to be done, error= {ex}") @@ -1243,11 +1249,7 @@ def multi_invoke_delete_volume(config, opts, client, timeout): current = opts["current_state"]["data_volumes"] if expect and current: v = [i["volume_id"] for i in expect] - opts1 = { - "data_volumes": [ - i for i in current if i["volume_id"] not in v - ] - } + opts1 = {"data_volumes": [i for i in current if i["volume_id"] not in v]} loop_val = navigate_value(opts1, ["data_volumes"]) if not loop_val: @@ -1266,11 +1268,7 @@ def multi_invoke_attach_data_disk(config, opts, client, timeout): current = opts["current_state"]["data_volumes"] if expect and current: v = [i["volume_id"] for i in current] - opts1 = { - "data_volumes": [ - i for i in expect if i["volume_id"] not in v - ] - } + opts1 = {"data_volumes": [i for i in expect if i["volume_id"] not in v]} loop_val = navigate_value(opts1, ["data_volumes"]) if not loop_val: @@ -1300,13 +1298,11 @@ def fill_read_resp_body(body): result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") + result["OS-EXT-AZ:availability_zone"] = body.get("OS-EXT-AZ:availability_zone") result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") + result["OS-EXT-SRV-ATTR:instance_name"] = body.get("OS-EXT-SRV-ATTR:instance_name") result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") @@ -1338,8 +1334,7 @@ def fill_read_resp_body(body): result["name"] = body.get("name") - v = fill_read_resp_os_extended_volumes_volumes_attached( - body.get("os-extended-volumes:volumes_attached")) + v = fill_read_resp_os_extended_volumes_volumes_attached(body.get("os-extended-volumes:volumes_attached")) result["os-extended-volumes:volumes_attached"] = v v = fill_read_resp_root_volume(body.get("root_volume")) @@ -1462,8 +1457,7 @@ def fill_read_auto_recovery_resp_body(body): def flatten_options(response, array_index): r = dict() - v = navigate_value( - response, ["read", "OS-EXT-AZ:availability_zone"], array_index) + v = navigate_value(response, ["read", "OS-EXT-AZ:availability_zone"], array_index) r["availability_zone"] = v v = navigate_value(response, ["read", "config_drive"], array_index) @@ -1484,22 +1478,19 @@ def flatten_options(response, array_index): v = flatten_enable_auto_recovery(response, array_index) r["enable_auto_recovery"] = v - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) + v = navigate_value(response, ["read", "enterprise_project_id"], array_index) r["enterprise_project_id"] = v v = navigate_value(response, ["read", "flavor", "id"], array_index) r["flavor_name"] = v - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) + v = navigate_value(response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) r["host_name"] = v v = navigate_value(response, ["read", "image", "id"], array_index) r["image_id"] = v - v = navigate_value( - response, ["read", "metadata", "image_name"], array_index) + v = navigate_value(response, ["read", "metadata", "image_name"], array_index) r["image_name"] = v v = navigate_value(response, ["read", "name"], array_index) @@ -1508,15 +1499,13 @@ def flatten_options(response, array_index): v = flatten_nics(response, array_index) r["nics"] = v - v = navigate_value( - response, ["read", "OS-EXT-STS:power_state"], array_index) + v = navigate_value(response, ["read", "OS-EXT-STS:power_state"], array_index) r["power_state"] = v v = flatten_root_volume(response, array_index) r["root_volume"] = v - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) + v = navigate_value(response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) r["server_alias"] = v v = flatten_server_tags(response, array_index) @@ -1528,8 +1517,7 @@ def flatten_options(response, array_index): v = navigate_value(response, ["read", "status"], array_index) r["status"] = v - v = navigate_value( - response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) + v = navigate_value(response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) r["user_data"] = v v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) @@ -1539,8 +1527,7 @@ def flatten_options(response, array_index): def flatten_data_volumes(d, array_index): - v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], - array_index) + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], array_index) if not v: return None n = len(v) @@ -1555,12 +1542,10 @@ def flatten_data_volumes(d, array_index): val = dict() - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) val["device"] = v - v = navigate_value( - d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) val["volume_id"] = v for v in val.values(): @@ -1572,14 +1557,12 @@ def flatten_data_volumes(d, array_index): def flatten_enable_auto_recovery(d, array_index): - v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], - array_index) + v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], array_index) return v == "true" def flatten_nics(d, array_index): - v = navigate_value(d, ["read", "address"], - array_index) + v = navigate_value(d, ["read", "address"], array_index) if not v: return None n = len(v) @@ -1597,8 +1580,7 @@ def flatten_nics(d, array_index): v = navigate_value(d, ["read", "address", "addr"], new_ai) val["ip_address"] = v - v = navigate_value( - d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) + v = navigate_value(d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) val["port_id"] = v for v in val.values(): @@ -1681,8 +1663,7 @@ def adjust_data_volumes(parent_input, parent_cur): result.append(cv[i]) if len(result) != lcv: - raise Exception("adjust property(data_volumes) failed, " - "the array number is not equal") + raise Exception("adjust property(data_volumes) failed, the array number is not equal") parent_cur["data_volumes"] = result @@ -1725,8 +1706,7 @@ def adjust_nics(parent_input, parent_cur): result.append(cv[i]) if len(result) != lcv: - raise Exception("adjust property(nics) failed, " - "the array number is not equal") + raise Exception("adjust property(nics) failed, the array number is not equal") parent_cur["nics"] = result @@ -1736,11 +1716,9 @@ def set_unreadable_options(opts, states): states["eip_id"] = opts.get("eip_id") - set_unread_nics( - opts.get("nics"), states.get("nics")) + set_unread_nics(opts.get("nics"), states.get("nics")) - set_unread_root_volume( - opts.get("root_volume"), states.get("root_volume")) + set_unread_root_volume(opts.get("root_volume"), states.get("root_volume")) states["security_groups"] = opts.get("security_groups") @@ -1803,13 +1781,11 @@ def set_readonly_options(opts, states): opts["image_name"] = states.get("image_name") - set_readonly_nics( - opts.get("nics"), states.get("nics")) + set_readonly_nics(opts.get("nics"), states.get("nics")) opts["power_state"] = states.get("power_state") - set_readonly_root_volume( - opts.get("root_volume"), states.get("root_volume")) + set_readonly_root_volume(opts.get("root_volume"), states.get("root_volume")) opts["server_alias"] = states.get("server_alias") @@ -1860,7 +1836,6 @@ def set_readonly_root_volume(inputv, curv): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -1972,13 +1947,11 @@ def fill_list_resp_body(body): result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") - result["OS-EXT-AZ:availability_zone"] = body.get( - "OS-EXT-AZ:availability_zone") + result["OS-EXT-AZ:availability_zone"] = body.get("OS-EXT-AZ:availability_zone") result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") - result["OS-EXT-SRV-ATTR:instance_name"] = body.get( - "OS-EXT-SRV-ATTR:instance_name") + result["OS-EXT-SRV-ATTR:instance_name"] = body.get("OS-EXT-SRV-ATTR:instance_name") result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") @@ -2076,5 +2049,5 @@ def adjust_list_api_tags(parent_input, parent_cur): parent_cur["tags"] = result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py index efa84d03607..6f131f0f486 100644 --- a/plugins/modules/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -295,33 +295,44 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='30m', type='str'), - update=dict(default='30m', type='str'), - delete=dict(default='30m', type='str'), - ), default=dict()), - availability_zone=dict(type='str', required=True), - name=dict(type='str', required=True), - volume_type=dict(type='str', required=True), - backup_id=dict(type='str'), - description=dict(type='str'), - enable_full_clone=dict(type='bool'), - enable_scsi=dict(type='bool'), - enable_share=dict(type='bool'), - encryption_id=dict(type='str'), - enterprise_project_id=dict(type='str'), - image_id=dict(type='str'), - size=dict(type='int'), - snapshot_id=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="30m", type="str"), + update=dict(default="30m", type="str"), + delete=dict(default="30m", type="str"), + ), + default=dict(), + ), + availability_zone=dict(type="str", required=True), + name=dict(type="str", required=True), + volume_type=dict(type="str", required=True), + backup_id=dict(type="str"), + description=dict(type="str"), + enable_full_clone=dict(type="bool"), + enable_scsi=dict(type="bool"), + enable_share=dict(type="bool"), + encryption_id=dict(type="str"), + enterprise_project_id=dict(type="str"), + image_id=dict(type="str"), + size=dict(type="int"), + snapshot_id=dict(type="str"), ), supports_check_mode=True, ) @@ -335,11 +346,11 @@ def main(): try: _init(config) - is_exist = module.params.get('id') + is_exist = module.params.get("id") result = None changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if not is_exist: if not module.check_mode: create(config) @@ -358,12 +369,11 @@ def main(): result = build_state(inputv, resp, array_index) set_readonly_options(inputv, result) if are_different_dicts(inputv, result): - raise Exception("Update resource failed, " - "some attributes are not updated") + raise Exception("Update resource failed, some attributes are not updated") changed = True - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: result = dict() if is_exist: @@ -375,13 +385,13 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) def _init(config): module = config.module - if module.params.get('id'): + if module.params.get("id"): return v = search_resource(config) @@ -390,7 +400,7 @@ def _init(config): raise Exception(f"find more than one resources({', '.join([navigate_value(i, ['id']) for i in v])})") if n == 1: - module.params['id'] = navigate_value(v[0], ["id"]) + module.params["id"] = navigate_value(v[0], ["id"]) def user_input_parameters(module): @@ -414,7 +424,7 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "volumev3", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) opts["ansible_module"] = module @@ -424,7 +434,7 @@ def create(config): client1 = config.client(get_region(module), "volume", "project") client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") obj = async_wait(config, r, client1, timeout) - module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) + module.params["id"] = navigate_value(obj, ["entities", "volume_id"]) def update(config, expect_state, current_state): @@ -432,7 +442,7 @@ def update(config, expect_state, current_state): expect_state["current_state"] = current_state current_state["current_state"] = current_state client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["update"].rstrip("m")) params = build_update_parameters(expect_state) params1 = build_update_parameters(current_state) @@ -453,7 +463,7 @@ def update(config, expect_state, current_state): def delete(config): module = config.module client = config.client(get_region(module), "evs", "project") - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["delete"].rstrip("m")) r = send_delete_request(module, None, client) @@ -485,18 +495,15 @@ def _build_query_link(opts): v = navigate_value(opts, ["enable_share"]) if v or v in [False, 0]: - query_params.append( - f"multiattach={str(v) if v else str(v).lower()}") + query_params.append(f"multiattach={str(v) if v else str(v).lower()}") v = navigate_value(opts, ["name"]) if v or v in [False, 0]: - query_params.append( - f"name={str(v) if v else str(v).lower()}") + query_params.append(f"name={str(v) if v else str(v).lower()}") v = navigate_value(opts, ["availability_zone"]) if v or v in [False, 0]: - query_params.append( - f"availability_zone={str(v) if v else str(v).lower()}") + query_params.append(f"availability_zone={str(v) if v else str(v).lower()}") query_link = "?limit=10&offset={start}" if query_params: @@ -514,7 +521,7 @@ def search_resource(config): link = f"os-vendor-volumes/detail{query_link}" result = [] - p = {'start': 0} + p = {"start": 0} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -528,7 +535,7 @@ def search_resource(config): if len(result) > 1: break - p['start'] += len(r) + p["start"] += len(r) return result @@ -737,10 +744,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["SUCCESS"], - ["RUNNING", "INIT"], - _query_status, timeout) + return wait_to_finish(["SUCCESS"], ["RUNNING", "INIT"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_evs_disk): error waiting to be done, error= {ex}") @@ -881,16 +885,13 @@ def flatten_options(response, array_index): v = navigate_value(response, ["read", "multiattach"], array_index) r["enable_share"] = v - v = navigate_value( - response, ["read", "metadata", "__system__cmkid"], array_index) + v = navigate_value(response, ["read", "metadata", "__system__cmkid"], array_index) r["encryption_id"] = v - v = navigate_value( - response, ["read", "enterprise_project_id"], array_index) + v = navigate_value(response, ["read", "enterprise_project_id"], array_index) r["enterprise_project_id"] = v - v = navigate_value( - response, ["read", "volume_image_metadata", "id"], array_index) + v = navigate_value(response, ["read", "volume_image_metadata", "id"], array_index) r["image_id"] = v v = flatten_is_bootable(response, array_index) @@ -924,8 +925,7 @@ def flatten_options(response, array_index): def flatten_attachments(d, array_index): - v = navigate_value(d, ["read", "attachments"], - array_index) + v = navigate_value(d, ["read", "attachments"], array_index) if not v: return None n = len(v) @@ -961,16 +961,14 @@ def flatten_attachments(d, array_index): def flatten_enable_full_clone(d, array_index): - v = navigate_value(d, ["read", "metadata", "full_clone"], - array_index) + v = navigate_value(d, ["read", "metadata", "full_clone"], array_index) if v is None: return v return True if v == "0" else False def flatten_enable_scsi(d, array_index): - v = navigate_value(d, ["read", "metadata", "hw:passthrough"], - array_index) + v = navigate_value(d, ["read", "metadata", "hw:passthrough"], array_index) if v is None: return v return True if v in ["true", "True"] else False @@ -984,8 +982,7 @@ def flatten_is_bootable(d, array_index): def flatten_is_readonly(d, array_index): - v = navigate_value(d, ["read", "metadata", "readonly"], - array_index) + v = navigate_value(d, ["read", "metadata", "readonly"], array_index) if v is None: return v return True if v in ["true", "True"] else False @@ -1014,7 +1011,6 @@ def set_readonly_options(opts, states): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -1157,5 +1153,5 @@ def fill_list_resp_volume_image_metadata(value): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py index d2b0e1b9cfe..9ff2367b472 100644 --- a/plugins/modules/hwc_network_vpc.py +++ b/plugins/modules/hwc_network_vpc.py @@ -129,11 +129,18 @@ # Imports ############################################################################### -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcClientException404, HwcModule, - are_different_dicts, is_empty_value, - wait_to_finish, get_region, - build_path, navigate_value) +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, + HwcClientException, + HwcClientException404, + HwcModule, + are_different_dicts, + is_empty_value, + wait_to_finish, + get_region, + build_path, + navigate_value, +) import re ############################################################################### @@ -146,44 +153,47 @@ def main(): module = HwcModule( argument_spec=dict( - state=dict( - default='present', choices=['present', 'absent'], type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - delete=dict(default='15m', type='str'), - ), default=dict()), - name=dict(required=True, type='str'), - cidr=dict(required=True, type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="15m", type="str"), + update=dict(default="15m", type="str"), + delete=dict(default="15m", type="str"), + ), + default=dict(), + ), + name=dict(required=True, type="str"), + cidr=dict(required=True, type="str"), ), supports_check_mode=True, ) - config = Config(module, 'vpc') + config = Config(module, "vpc") - state = module.params['state'] + state = module.params["state"] if (not module.params.get("id")) and module.params.get("name"): - module.params['id'] = get_id_by_name(config) + module.params["id"] = get_id_by_name(config) fetch = None link = self_link(module) # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): + if not re.search("/None/|/None$", link): client = config.client(get_region(module), "vpc", "project") fetch = fetch_resource(module, client, link) if fetch: - fetch = fetch.get('vpc') + fetch = fetch.get("vpc") changed = False if fetch: - if state == 'present': + if state == "present": expect = _get_editable_properties(module) current_state = response_to_hash(module, fetch) current = {"cidr": current_state["cidr"]} if are_different_dicts(expect, current): if not module.check_mode: fetch = update(config, self_link(module)) - fetch = response_to_hash(module, fetch.get('vpc')) + fetch = response_to_hash(module, fetch.get("vpc")) changed = True else: fetch = current_state @@ -193,15 +203,15 @@ def main(): fetch = {} changed = True else: - if state == 'present': + if state == "present": if not module.check_mode: fetch = create(config, "vpcs") - fetch = response_to_hash(module, fetch.get('vpc')) + fetch = response_to_hash(module, fetch.get("vpc")) changed = True else: fetch = {} - fetch.update({'changed': changed}) + fetch.update({"changed": changed}) module.exit_json(**fetch) @@ -217,14 +227,14 @@ def create(config, link): msg = f"module(hwc_network_vpc): error creating resource, error: {ex}" module.fail_json(msg=msg) - wait_done = wait_for_operation(config, 'create', r) + wait_done = wait_for_operation(config, "create", r) v = "" try: - v = navigate_value(wait_done, ['vpc', 'id']) + v = navigate_value(wait_done, ["vpc", "id"]) except Exception as ex: module.fail_json(msg=str(ex)) - url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) + url = build_path(module, "vpcs/{op_id}", {"op_id": v}) return fetch_resource(module, client, url) @@ -239,7 +249,7 @@ def update(config, link): msg = f"module(hwc_network_vpc): error updating resource, error: {ex}" module.fail_json(msg=msg) - wait_for_operation(config, 'update', r) + wait_for_operation(config, "update", r) return fetch_resource(module, client, link) @@ -283,22 +293,18 @@ def get_id_by_name(config): pass if r is None: return None - r = r.get('vpcs', []) - ids = [ - i.get('id') for i in r if i.get('name', '') == name - ] + r = r.get("vpcs", []) + ids = [i.get("id") for i in r if i.get("name", "") == name] if not ids: return None elif len(ids) == 1: return ids[0] else: - module.fail_json( - msg="Multiple resources with same name are found.") + module.fail_json(msg="Multiple resources with same name are found.") elif none_values: - module.fail_json( - msg="Can not find id by name because url includes None.") + module.fail_json(msg="Can not find id by name because url includes None.") else: - p = {'marker': ''} + p = {"marker": ""} ids = set() while True: r = None @@ -308,17 +314,16 @@ def get_id_by_name(config): pass if r is None: break - r = r.get('vpcs', []) + r = r.get("vpcs", []) if r == []: break for i in r: - if i.get('name') == name: - ids.add(i.get('id')) + if i.get("name") == name: + ids.add(i.get("id")) if len(ids) >= 2: - module.fail_json( - msg="Multiple resources with same name are found.") + module.fail_json(msg="Multiple resources with same name are found.") - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return ids.pop() if ids else None @@ -330,11 +335,11 @@ def self_link(module): def resource_to_create(module): params = dict() - v = module.params.get('cidr') + v = module.params.get("cidr") if not is_empty_value(v): params["cidr"] = v - v = module.params.get('name') + v = module.params.get("name") if not is_empty_value(v): params["name"] = v @@ -349,7 +354,7 @@ def resource_to_create(module): def resource_to_update(module): params = dict() - v = module.params.get('cidr') + v = module.params.get("cidr") if not is_empty_value(v): params["cidr"] = v @@ -368,17 +373,16 @@ def _get_editable_properties(module): def response_to_hash(module, response): - """ Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. + """Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. """ return { - 'id': response.get('id'), - 'name': response.get('name'), - 'cidr': response.get('cidr'), - 'status': response.get('status'), - 'routes': VpcRoutesArray( - response.get('routes', []), module).from_response(), - 'enable_shared_snat': response.get('enable_shared_snat') + "id": response.get("id"), + "name": response.get("name"), + "cidr": response.get("cidr"), + "status": response.get("status"), + "routes": VpcRoutesArray(response.get("routes", []), module).from_response(), + "enable_shared_snat": response.get("enable_shared_snat"), } @@ -386,29 +390,27 @@ def wait_for_operation(config, op_type, op_result): module = config.module op_id = "" try: - op_id = navigate_value(op_result, ['vpc', 'id']) + op_id = navigate_value(op_result, ["vpc", "id"]) except Exception as ex: module.fail_json(msg=str(ex)) - url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) - timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) + url = build_path(module, "vpcs/{op_id}", {"op_id": op_id}) + timeout = 60 * int(module.params["timeouts"][op_type].rstrip("m")) states = { - 'create': { - 'allowed': ['CREATING', 'DONW', 'OK'], - 'complete': ['OK'], + "create": { + "allowed": ["CREATING", "DONW", "OK"], + "complete": ["OK"], + }, + "update": { + "allowed": ["PENDING_UPDATE", "DONW", "OK"], + "complete": ["OK"], }, - 'update': { - 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], - 'complete': ['OK'], - } } - return wait_for_completion(url, timeout, states[op_type]['allowed'], - states[op_type]['complete'], config) + return wait_for_completion(url, timeout, states[op_type]["allowed"], states[op_type]["complete"], config) -def wait_for_completion(op_uri, timeout, allowed_states, - complete_states, config): +def wait_for_completion(op_uri, timeout, allowed_states, complete_states, config): module = config.module client = config.client(get_region(module), "vpc", "project") @@ -421,21 +423,19 @@ def _refresh_status(): status = "" try: - status = navigate_value(r, ['vpc', 'status']) + status = navigate_value(r, ["vpc", "status"]) except Exception: return None, "" return r, status try: - return wait_to_finish(complete_states, allowed_states, - _refresh_status, timeout) + return wait_to_finish(complete_states, allowed_states, _refresh_status, timeout) except Exception as ex: module.fail_json(msg=str(ex)) def wait_for_delete(module, client, link): - def _refresh_status(): try: client.get(link) @@ -447,7 +447,7 @@ def _refresh_status(): return True, "Pending" - timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["delete"].rstrip("m")) try: return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) except Exception as ex: @@ -475,17 +475,11 @@ def from_response(self): return items def _request_for_item(self, item): - return { - 'destination': item.get('destination'), - 'nexthop': item.get('next_hop') - } + return {"destination": item.get("destination"), "nexthop": item.get("next_hop")} def _response_from_item(self, item): - return { - 'destination': item.get('destination'), - 'next_hop': item.get('nexthop') - } + return {"destination": item.get("destination"), "next_hop": item.get("nexthop")} -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py index 6fb94a832c6..33bbf478e2e 100644 --- a/plugins/modules/hwc_smn_topic.py +++ b/plugins/modules/hwc_smn_topic.py @@ -101,10 +101,16 @@ # Imports ############################################################################### -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, - HwcModule, navigate_value, - are_different_dicts, is_empty_value, - build_path, get_region) +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, + HwcClientException, + HwcModule, + navigate_value, + are_different_dicts, + is_empty_value, + build_path, + get_region, +) import re ############################################################################### @@ -117,34 +123,33 @@ def main(): module = HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - display_name=dict(type='str'), - name=dict(required=True, type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + display_name=dict(type="str"), + name=dict(required=True, type="str"), ), supports_check_mode=True, ) config = Config(module, "smn") - state = module.params['state'] + state = module.params["state"] if not module.params.get("id"): - module.params['id'] = get_resource_id(config) + module.params["id"] = get_resource_id(config) fetch = None link = self_link(module) # the link will include Nones if required format parameters are missed - if not re.search('/None/|/None$', link): + if not re.search("/None/|/None$", link): client = config.client(get_region(module), "smn", "project") fetch = fetch_resource(module, client, link) changed = False if fetch: - if state == 'present': + if state == "present": expect = _get_resource_editable_properties(module) current_state = response_to_hash(module, fetch) - current = {'display_name': current_state['display_name']} + current = {"display_name": current_state["display_name"]} if are_different_dicts(expect, current): if not module.check_mode: fetch = update(config) @@ -158,7 +163,7 @@ def main(): fetch = {} changed = True else: - if state == 'present': + if state == "present": if not module.check_mode: fetch = create(config) fetch = response_to_hash(module, fetch) @@ -166,7 +171,7 @@ def main(): else: fetch = {} - fetch.update({'changed': changed}) + fetch.update({"changed": changed}) module.exit_json(**fetch) @@ -226,12 +231,12 @@ def get_resource(config, result): v = "" try: - v = navigate_value(result, ['topic_urn']) + v = navigate_value(result, ["topic_urn"]) except Exception as ex: module.fail_json(msg=str(ex)) - d = {'topic_urn': v} - url = build_path(module, 'notifications/topics/{topic_urn}', d) + d = {"topic_urn": v} + url = build_path(module, "notifications/topics/{topic_urn}", d) return fetch_resource(module, client, url) @@ -244,8 +249,8 @@ def get_resource_id(config): query_link = "?offset={offset}&limit=10" link += query_link - p = {'offset': 0} - v = module.params.get('name') + p = {"offset": 0} + v = module.params.get("name") ids = set() while True: r = None @@ -255,16 +260,16 @@ def get_resource_id(config): pass if r is None: break - r = r.get('topics', []) + r = r.get("topics", []) if r == []: break for i in r: - if i.get('name') == v: - ids.add(i.get('topic_urn')) + if i.get("name") == v: + ids.add(i.get("topic_urn")) if len(ids) >= 2: module.fail_json(msg="Multiple resources are found") - p['offset'] += 1 + p["offset"] += 1 return ids.pop() if ids else None @@ -276,11 +281,11 @@ def self_link(module): def create_resource_opts(module): params = dict() - v = module.params.get('display_name') + v = module.params.get("display_name") if not is_empty_value(v): params["display_name"] = v - v = module.params.get('name') + v = module.params.get("name") if not is_empty_value(v): params["name"] = v @@ -290,7 +295,7 @@ def create_resource_opts(module): def update_resource_opts(module): params = dict() - v = module.params.get('display_name') + v = module.params.get("display_name") if not is_empty_value(v): params["display_name"] = v @@ -305,15 +310,15 @@ def _get_resource_editable_properties(module): def response_to_hash(module, response): """Remove unnecessary properties from the response. - This is for doing comparisons with Ansible's current parameters. + This is for doing comparisons with Ansible's current parameters. """ return { - 'create_time': response.get('create_time'), - 'display_name': response.get('display_name'), - 'name': response.get('name'), - 'push_policy': _push_policy_convert_from_response(response.get('push_policy')), - 'topic_urn': response.get('topic_urn'), - 'update_time': response.get('update_time') + "create_time": response.get("create_time"), + "display_name": response.get("display_name"), + "name": response.get("name"), + "push_policy": _push_policy_convert_from_response(response.get("push_policy")), + "topic_urn": response.get("topic_urn"), + "update_time": response.get("update_time"), } @@ -324,5 +329,5 @@ def _push_policy_convert_from_response(value): }.get(int(value)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py index 163a9244be7..a3dcb7e718e 100644 --- a/plugins/modules/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -228,31 +228,45 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcClientException404, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='5m', type='str'), - update=dict(default='5m', type='str'), - ), default=dict()), - type=dict(type='str', required=True), - dedicated_bandwidth=dict(type='dict', options=dict( - charge_mode=dict(type='str', required=True), - name=dict(type='str', required=True), - size=dict(type='int', required=True) - )), - enterprise_project_id=dict(type='str'), - ip_version=dict(type='int'), - ipv4_address=dict(type='str'), - port_id=dict(type='str'), - shared_bandwidth_id=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="5m", type="str"), + update=dict(default="5m", type="str"), + ), + default=dict(), + ), + type=dict(type="str", required=True), + dedicated_bandwidth=dict( + type="dict", + options=dict( + charge_mode=dict(type="str", required=True), + name=dict(type="str", required=True), + size=dict(type="int", required=True), + ), + ), + enterprise_project_id=dict(type="str"), + ip_version=dict(type="int"), + ipv4_address=dict(type="str"), + port_id=dict(type="str"), + shared_bandwidth_id=dict(type="str"), ), supports_check_mode=True, ) @@ -266,7 +280,7 @@ def main(): try: resource = None - if module.params['id']: + if module.params["id"]: resource = True else: v = search_resource(config) @@ -275,11 +289,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -293,7 +307,7 @@ def main(): changed = True result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -304,7 +318,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -323,19 +337,19 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) params = build_create_parameters(opts) r = send_create_request(module, params, client) obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["publicip", "id"]) + module.params["id"] = navigate_value(obj, ["publicip", "id"]) def update(config): module = config.module client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["update"].rstrip("m")) opts = user_input_parameters(module) params = build_update_parameters(opts) @@ -367,7 +381,7 @@ def _refresh_status(): return True, "Pending" - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) try: wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) except Exception as ex: @@ -413,7 +427,7 @@ def search_resource(config): link = f"publicips{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -428,7 +442,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -455,23 +469,15 @@ def expand_create_bandwidth(d, array_index): v = navigate_value(d, ["dedicated_bandwidth"], array_index) sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") + raise Exception("don't input shared_bandwidth_id and dedicated_bandwidth at same time") if not (v or sbwid): - raise Exception("must input shared_bandwidth_id or " - "dedicated_bandwidth") + raise Exception("must input shared_bandwidth_id or dedicated_bandwidth") if sbwid: - return { - "id": sbwid, - "share_type": "WHOLE"} + return {"id": sbwid, "share_type": "WHOLE"} - return { - "charge_mode": v["charge_mode"], - "name": v["name"], - "share_type": "PER", - "size": v["size"]} + return {"charge_mode": v["charge_mode"], "name": v["name"], "share_type": "PER", "size": v["size"]} def expand_create_publicip(d, array_index): @@ -527,10 +533,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) + return wait_to_finish(["ACTIVE", "DOWN"], None, _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_eip): error waiting for api(create) to be done, error= {ex}") @@ -585,10 +588,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - None, - _query_status, timeout) + return wait_to_finish(["ACTIVE", "DOWN"], None, _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_eip): error waiting for api(update) to be done, error= {ex}") @@ -665,8 +665,7 @@ def update_properties(module, response, array_index, exclude_output=False): v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) r["dedicated_bandwidth"] = v - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) + v = navigate_value(response, ["read", "enterprise_project_id"], array_index) r["enterprise_project_id"] = v v = navigate_value(response, ["read", "ip_version"], array_index) @@ -676,16 +675,14 @@ def update_properties(module, response, array_index, exclude_output=False): r["ipv4_address"] = v if not exclude_output: - v = navigate_value(response, ["read", "public_ipv6_address"], - array_index) + v = navigate_value(response, ["read", "public_ipv6_address"], array_index) r["ipv6_address"] = v v = navigate_value(response, ["read", "port_id"], array_index) r["port_id"] = v if not exclude_output: - v = navigate_value(response, ["read", "private_ip_address"], - array_index) + v = navigate_value(response, ["read", "private_ip_address"], array_index) r["private_ip_address"] = v v = r.get("shared_bandwidth_id") @@ -732,7 +729,6 @@ def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -791,8 +787,7 @@ def expand_list_bandwidth_id(d, array_index): v = navigate_value(d, ["dedicated_bandwidth"], array_index) sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) if v and sbwid: - raise Exception("don't input shared_bandwidth_id and " - "dedicated_bandwidth at same time") + raise Exception("don't input shared_bandwidth_id and dedicated_bandwidth at same time") return sbwid @@ -833,5 +828,5 @@ def fill_list_resp_body(body): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py index 624642d7b52..019f7ec58fd 100644 --- a/plugins/modules/hwc_vpc_peering_connect.py +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -134,26 +134,38 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcClientException404, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - local_vpc_id=dict(type='str', required=True), - name=dict(type='str', required=True), - peering_vpc=dict(type='dict', required=True, options=dict( - vpc_id=dict(type='str', required=True), - project_id=dict(type='str') - )), - description=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="15m", type="str"), + ), + default=dict(), + ), + local_vpc_id=dict(type="str", required=True), + name=dict(type="str", required=True), + peering_vpc=dict( + type="dict", + required=True, + options=dict(vpc_id=dict(type="str", required=True), project_id=dict(type="str")), + ), + description=dict(type="str"), ), supports_check_mode=True, ) @@ -167,7 +179,7 @@ def main(): try: resource = None - if module.params['id']: + if module.params["id"]: resource = True else: v = search_resource(config) @@ -176,11 +188,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -194,7 +206,7 @@ def main(): changed = True result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -205,7 +217,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -221,13 +233,13 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "network", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) params = build_create_parameters(opts) r = send_create_request(module, params, client) obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["peering", "id"]) + module.params["id"] = navigate_value(obj, ["peering", "id"]) def update(config): @@ -259,7 +271,7 @@ def _refresh_status(): return True, "Pending" - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) try: wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) except Exception as ex: @@ -305,7 +317,7 @@ def search_resource(config): link = f"v2.0/vpc/peerings{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -320,7 +332,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -413,10 +425,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE"], - ["PENDING_ACCEPTANCE"], - _query_status, timeout) + return wait_to_finish(["ACTIVE"], ["PENDING_ACCEPTANCE"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_peering_connect): error waiting for api(create) to be done, error= {ex}") @@ -529,8 +538,7 @@ def update_properties(module, response, array_index, exclude_output=False): v = navigate_value(response, ["read", "description"], array_index) r["description"] = v - v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], - array_index) + v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], array_index) r["local_vpc_id"] = v v = navigate_value(response, ["read", "name"], array_index) @@ -550,8 +558,7 @@ def flatten_peering_vpc(d, array_index, current_value, exclude_output): result = dict() has_init_value = False - v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], - array_index) + v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], array_index) result["project_id"] = v v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) @@ -567,7 +574,6 @@ def flatten_peering_vpc(d, array_index, current_value, exclude_output): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -675,5 +681,5 @@ def fill_list_resp_request_vpc_info(value): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py index e273782bdac..926fc3c05c6 100644 --- a/plugins/modules/hwc_vpc_port.py +++ b/plugins/modules/hwc_vpc_port.py @@ -196,35 +196,43 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcClientException404, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - ), default=dict()), - subnet_id=dict(type='str', required=True), - admin_state_up=dict(type='bool'), - allowed_address_pairs=dict( - type='list', elements='dict', + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", options=dict( - ip_address=dict(type='str'), - mac_address=dict(type='str') + create=dict(default="15m", type="str"), ), + default=dict(), + ), + subnet_id=dict(type="str", required=True), + admin_state_up=dict(type="bool"), + allowed_address_pairs=dict( + type="list", + elements="dict", + options=dict(ip_address=dict(type="str"), mac_address=dict(type="str")), ), - extra_dhcp_opts=dict(type='list', elements='dict', options=dict( - name=dict(type='str'), - value=dict(type='str') - )), - ip_address=dict(type='str'), - name=dict(type='str'), - security_groups=dict(type='list', elements='str') + extra_dhcp_opts=dict( + type="list", elements="dict", options=dict(name=dict(type="str"), value=dict(type="str")) + ), + ip_address=dict(type="str"), + name=dict(type="str"), + security_groups=dict(type="list", elements="str"), ), supports_check_mode=True, ) @@ -238,7 +246,7 @@ def main(): try: resource = None - if module.params['id']: + if module.params["id"]: resource = True else: v = search_resource(config) @@ -247,11 +255,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -265,7 +273,7 @@ def main(): changed = True result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -276,7 +284,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -295,13 +303,13 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) params = build_create_parameters(opts) r = send_create_request(module, params, client) obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["port", "id"]) + module.params["id"] = navigate_value(obj, ["port", "id"]) def update(config): @@ -333,7 +341,7 @@ def _refresh_status(): return True, "Pending" - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) try: wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) except Exception as ex: @@ -387,7 +395,7 @@ def search_resource(config): link = f"ports{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -402,7 +410,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -453,8 +461,7 @@ def expand_create_allowed_address_pairs(d, array_index): req = [] - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs"], new_array_index) if not v: return req n = len(v) @@ -462,13 +469,11 @@ def expand_create_allowed_address_pairs(d, array_index): new_array_index["allowed_address_pairs"] = i transformed = dict() - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], new_array_index) if not is_empty_value(v): transformed["ip_address"] = v - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], new_array_index) if not is_empty_value(v): transformed["mac_address"] = v @@ -485,8 +490,7 @@ def expand_create_extra_dhcp_opts(d, array_index): req = [] - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) + v = navigate_value(d, ["extra_dhcp_opts"], new_array_index) if not v: return req n = len(v) @@ -564,10 +568,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE", "DOWN"], - ["BUILD"], - _query_status, timeout) + return wait_to_finish(["ACTIVE", "DOWN"], ["BUILD"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_port): error waiting for api(create) to be done, error= {ex}") @@ -606,8 +607,7 @@ def expand_update_allowed_address_pairs(d, array_index): req = [] - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs"], new_array_index) if not v: return req n = len(v) @@ -615,13 +615,11 @@ def expand_update_allowed_address_pairs(d, array_index): new_array_index["allowed_address_pairs"] = i transformed = dict() - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], new_array_index) if not is_empty_value(v): transformed["ip_address"] = v - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], new_array_index) if not is_empty_value(v): transformed["mac_address"] = v @@ -638,8 +636,7 @@ def expand_update_extra_dhcp_opts(d, array_index): req = [] - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) + v = navigate_value(d, ["extra_dhcp_opts"], new_array_index) if not v: return req n = len(v) @@ -802,8 +799,7 @@ def update_properties(module, response, array_index, exclude_output=False): v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) r["extra_dhcp_opts"] = v - v = navigate_value(response, ["read", "fixed_ips", "ip_address"], - array_index) + v = navigate_value(response, ["read", "fixed_ips", "ip_address"], array_index) r["ip_address"] = v if not exclude_output: @@ -822,8 +818,7 @@ def update_properties(module, response, array_index, exclude_output=False): return r -def flatten_allowed_address_pairs(d, array_index, - current_value, exclude_output): +def flatten_allowed_address_pairs(d, array_index, current_value, exclude_output): n = 0 result = current_value has_init_value = True @@ -832,8 +827,7 @@ def flatten_allowed_address_pairs(d, array_index, else: has_init_value = False result = [] - v = navigate_value(d, ["read", "allowed_address_pairs"], - array_index) + v = navigate_value(d, ["read", "allowed_address_pairs"], array_index) if not v: return current_value n = len(v) @@ -849,12 +843,10 @@ def flatten_allowed_address_pairs(d, array_index, if len(result) >= (i + 1) and result[i]: val = result[i] - v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], - new_array_index) + v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], new_array_index) val["ip_address"] = v - v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], - new_array_index) + v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], new_array_index) val["mac_address"] = v if len(result) >= (i + 1): @@ -877,8 +869,7 @@ def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): else: has_init_value = False result = [] - v = navigate_value(d, ["read", "extra_dhcp_opts"], - array_index) + v = navigate_value(d, ["read", "extra_dhcp_opts"], array_index) if not v: return current_value n = len(v) @@ -894,12 +885,10 @@ def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): if len(result) >= (i + 1) and result[i]: val = result[i] - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], - new_array_index) + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], new_array_index) val["name"] = v - v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], - new_array_index) + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], new_array_index) val["value"] = v if len(result) >= (i + 1): @@ -914,7 +903,6 @@ def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -977,20 +965,17 @@ def expand_list_allowed_address_pairs(d, array_index): req = [] - v = navigate_value(d, ["allowed_address_pairs"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs"], new_array_index) n = len(v) if v else 1 for i in range(n): new_array_index["allowed_address_pairs"] = i transformed = dict() - v = navigate_value(d, ["allowed_address_pairs", "ip_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], new_array_index) transformed["ip_address"] = v - v = navigate_value(d, ["allowed_address_pairs", "mac_address"], - new_array_index) + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], new_array_index) transformed["mac_address"] = v for v in transformed.values(): @@ -1008,8 +993,7 @@ def expand_list_extra_dhcp_opts(d, array_index): req = [] - v = navigate_value(d, ["extra_dhcp_opts"], - new_array_index) + v = navigate_value(d, ["extra_dhcp_opts"], new_array_index) n = len(v) if v else 1 for i in range(n): @@ -1142,5 +1126,5 @@ def fill_list_resp_fixed_ips(value): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py index 0381335a566..f451c133e06 100644 --- a/plugins/modules/hwc_vpc_private_ip.py +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -89,17 +89,23 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - subnet_id=dict(type='str', required=True), - ip_address=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + subnet_id=dict(type="str", required=True), + ip_address=dict(type="str"), ), supports_check_mode=True, ) @@ -113,7 +119,7 @@ def main(): try: resource = None - if module.params['id']: + if module.params["id"]: resource = True else: v = search_resource(config) @@ -122,11 +128,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -136,10 +142,11 @@ def main(): expect = user_input_parameters(module) if are_different_dicts(expect, current): raise Exception( - f"Cannot change option from ({current}) to ({expect})of an existing resource.({module.params.get('id')})") + f"Cannot change option from ({current}) to ({expect})of an existing resource.({module.params.get('id')})" + ) result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -150,7 +157,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -168,8 +175,7 @@ def create(config): params = build_create_parameters(opts) r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["privateips", "id"], - {"privateips": 0}) + module.params["id"] = navigate_value(r, ["privateips", "id"], {"privateips": 0}) def delete(config): @@ -206,7 +212,7 @@ def search_resource(config): link = build_path(module, "subnets/{subnet_id}/privateips") + query_link result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -221,7 +227,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -306,7 +312,6 @@ def update_properties(module, response, array_index, exclude_output=False): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -343,5 +348,5 @@ def fill_list_resp_body(body): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py index 4d07108fb9d..7040318a5b3 100644 --- a/plugins/modules/hwc_vpc_route.py +++ b/plugins/modules/hwc_vpc_route.py @@ -119,20 +119,26 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - destination=dict(type='str', required=True), - next_hop=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - type=dict(type='str', default='peering'), - id=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + destination=dict(type="str", required=True), + next_hop=dict(type="str", required=True), + vpc_id=dict(type="str", required=True), + type=dict(type="str", default="peering"), + id=dict(type="str"), ), supports_check_mode=True, ) @@ -148,11 +154,12 @@ def main(): resource = None if module.params.get("id"): resource = get_resource_by_id(config) - if module.params['state'] == 'present': + if module.params["state"] == "present": opts = user_input_parameters(module) if are_different_dicts(resource, opts): raise Exception( - f"Cannot change option from ({resource}) to ({opts}) for an existing route.({config.module.params['id']})") + f"Cannot change option from ({resource}) to ({opts}) for an existing route.({config.module.params['id']})" + ) else: v = search_resource(config) if len(v) > 1: @@ -160,11 +167,11 @@ def main(): if len(v) == 1: resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: resource = create(config) @@ -181,7 +188,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -202,7 +209,7 @@ def create(config): params = build_create_parameters(opts) r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["route", "id"]) + module.params["id"] = navigate_value(r, ["route", "id"]) result = update_properties(module, {"read": fill_resp_body(r)}, None) return result @@ -259,7 +266,7 @@ def search_resource(config): link = f"v2.0/vpc/routes{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -274,7 +281,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -380,7 +387,6 @@ def update_properties(module, response, array_index, exclude_output=False): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -428,5 +434,5 @@ def fill_list_resp_body(body): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py index d321fae0a02..87be2a663b7 100644 --- a/plugins/modules/hwc_vpc_security_group.py +++ b/plugins/modules/hwc_vpc_security_group.py @@ -147,18 +147,24 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - name=dict(type='str', required=True), - enterprise_project_id=dict(type='str'), - vpc_id=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + name=dict(type="str", required=True), + enterprise_project_id=dict(type="str"), + vpc_id=dict(type="str"), ), supports_check_mode=True, ) @@ -174,7 +180,7 @@ def main(): resource = None if module.params.get("id"): resource = read_resource(config) - if module.params['state'] == 'present': + if module.params["state"] == "present": check_resource_option(resource, module) else: v = search_resource(config) @@ -183,11 +189,11 @@ def main(): if len(v) == 1: resource = update_properties(module, {"read": v[0]}, None) - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: resource = create(config) @@ -204,7 +210,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -229,7 +235,8 @@ def check_resource_option(resource, module): if are_different_dicts(resource, opts): raise Exception( - f"Cannot change option from ({resource}) to ({opts}) for an existing security group({module.params['id']}).") + f"Cannot change option from ({resource}) to ({opts}) for an existing security group({module.params['id']})." + ) def create(config): @@ -239,7 +246,7 @@ def create(config): params = build_create_parameters(opts) r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group", "id"]) + module.params["id"] = navigate_value(r, ["security_group", "id"]) result = update_properties(module, {"read": fill_read_resp_body(r)}, None) return result @@ -291,7 +298,7 @@ def search_resource(config): link = f"security-groups{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -306,7 +313,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -425,8 +432,7 @@ def fill_read_resp_security_group_rules(value): def update_properties(module, response, array_index, exclude_output=False): r = user_input_parameters(module) - v = navigate_value(response, ["read", "enterprise_project_id"], - array_index) + v = navigate_value(response, ["read", "enterprise_project_id"], array_index) r["enterprise_project_id"] = v v = navigate_value(response, ["read", "name"], array_index) @@ -455,8 +461,7 @@ def flatten_rules(d, array_index, current_value, exclude_output): else: has_init_value = False result = [] - v = navigate_value(d, ["read", "security_group_rules"], - array_index) + v = navigate_value(d, ["read", "security_group_rules"], array_index) if not v: return current_value n = len(v) @@ -473,53 +478,43 @@ def flatten_rules(d, array_index, current_value, exclude_output): val = result[i] if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "description"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "description"], new_array_index) val["description"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "direction"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "direction"], new_array_index) val["direction"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "ethertype"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "ethertype"], new_array_index) val["ethertype"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "id"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "id"], new_array_index) val["id"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], new_array_index) val["port_range_max"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], new_array_index) val["port_range_min"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "protocol"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "protocol"], new_array_index) val["protocol"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], new_array_index) val["remote_address_group_id"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], new_array_index) val["remote_group_id"] = v if not exclude_output: - v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], - new_array_index) + v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], new_array_index) val["remote_ip_prefix"] = v if len(result) >= (i + 1): @@ -534,7 +529,6 @@ def flatten_rules(d, array_index, current_value, exclude_output): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -616,5 +610,5 @@ def fill_list_resp_security_group_rules(value): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py index 98852f07a03..3aae7fe2be5 100644 --- a/plugins/modules/hwc_vpc_security_group_rule.py +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -166,24 +166,30 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcModule, are_different_dicts, build_path, - get_region, is_empty_value, navigate_value) + Config, + HwcClientException, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - direction=dict(type='str', required=True), - security_group_id=dict(type='str', required=True), - description=dict(type='str'), - ethertype=dict(type='str'), - port_range_max=dict(type='int'), - port_range_min=dict(type='int'), - protocol=dict(type='str'), - remote_group_id=dict(type='str'), - remote_ip_prefix=dict(type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + direction=dict(type="str", required=True), + security_group_id=dict(type="str", required=True), + description=dict(type="str"), + ethertype=dict(type="str"), + port_range_max=dict(type="int"), + port_range_min=dict(type="int"), + protocol=dict(type="str"), + remote_group_id=dict(type="str"), + remote_ip_prefix=dict(type="str"), ), supports_check_mode=True, ) @@ -197,7 +203,7 @@ def main(): try: resource = None - if module.params['id']: + if module.params["id"]: resource = True else: v = search_resource(config) @@ -206,11 +212,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -220,9 +226,10 @@ def main(): expect = user_input_parameters(module) if are_different_dicts(expect, current): raise Exception( - f"Cannot change option from ({current}) to ({expect}) for an existing security group({module.params.get('id')}).") + f"Cannot change option from ({current}) to ({expect}) for an existing security group({module.params.get('id')})." + ) result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -233,7 +240,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -258,7 +265,7 @@ def create(config): params = build_create_parameters(opts) r = send_create_request(module, params, client) - module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) + module.params["id"] = navigate_value(r, ["security_group_rule", "id"]) def delete(config): @@ -298,7 +305,7 @@ def search_resource(config): link = f"security-group-rules{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -313,7 +320,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -463,7 +470,6 @@ def update_properties(module, response, array_index, exclude_output=False): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -539,5 +545,5 @@ def fill_list_resp_body(body): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py index fb8ec4890eb..0aa8751f1a7 100644 --- a/plugins/modules/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -152,27 +152,38 @@ """ from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( - Config, HwcClientException, HwcClientException404, HwcModule, - are_different_dicts, build_path, get_region, is_empty_value, - navigate_value, wait_to_finish) + Config, + HwcClientException, + HwcClientException404, + HwcModule, + are_different_dicts, + build_path, + get_region, + is_empty_value, + navigate_value, + wait_to_finish, +) def build_module(): return HwcModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], - type='str'), - timeouts=dict(type='dict', options=dict( - create=dict(default='15m', type='str'), - update=dict(default='15m', type='str'), - ), default=dict()), - cidr=dict(type='str', required=True), - gateway_ip=dict(type='str', required=True), - name=dict(type='str', required=True), - vpc_id=dict(type='str', required=True), - availability_zone=dict(type='str'), - dhcp_enable=dict(type='bool'), - dns_address=dict(type='list', elements='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + timeouts=dict( + type="dict", + options=dict( + create=dict(default="15m", type="str"), + update=dict(default="15m", type="str"), + ), + default=dict(), + ), + cidr=dict(type="str", required=True), + gateway_ip=dict(type="str", required=True), + name=dict(type="str", required=True), + vpc_id=dict(type="str", required=True), + availability_zone=dict(type="str"), + dhcp_enable=dict(type="bool"), + dns_address=dict(type="list", elements="str"), ), supports_check_mode=True, ) @@ -186,7 +197,7 @@ def main(): try: resource = None - if module.params.get('id'): + if module.params.get("id"): resource = True else: v = search_resource(config) @@ -195,11 +206,11 @@ def main(): if len(v) == 1: resource = v[0] - module.params['id'] = navigate_value(resource, ["id"]) + module.params["id"] = navigate_value(resource, ["id"]) result = {} changed = False - if module.params['state'] == 'present': + if module.params["state"] == "present": if resource is None: if not module.check_mode: create(config) @@ -213,7 +224,7 @@ def main(): changed = True result = read_resource(config) - result['id'] = module.params.get('id') + result["id"] = module.params.get("id") else: if resource: if not module.check_mode: @@ -224,7 +235,7 @@ def main(): module.fail_json(msg=str(ex)) else: - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) @@ -243,19 +254,19 @@ def user_input_parameters(module): def create(config): module = config.module client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) opts = user_input_parameters(module) params = build_create_parameters(opts) r = send_create_request(module, params, client) obj = async_wait_create(config, r, client, timeout) - module.params['id'] = navigate_value(obj, ["subnet", "id"]) + module.params["id"] = navigate_value(obj, ["subnet", "id"]) def update(config): module = config.module client = config.client(get_region(module), "vpc", "project") - timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["update"].rstrip("m")) opts = user_input_parameters(module) params = build_update_parameters(opts) @@ -283,7 +294,7 @@ def _refresh_status(): return True, "Pending" - timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + timeout = 60 * int(module.params["timeouts"]["create"].rstrip("m")) try: wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) except Exception as ex: @@ -320,7 +331,7 @@ def search_resource(config): link = f"subnets{query_link}" result = [] - p = {'marker': ''} + p = {"marker": ""} while True: url = link.format(**p) r = send_list_request(module, client, url) @@ -335,7 +346,7 @@ def search_resource(config): if len(result) > 1: break - p['marker'] = r[-1].get('id') + p["marker"] = r[-1].get("id") return result @@ -437,10 +448,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) + return wait_to_finish(["ACTIVE"], ["UNKNOWN"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_subnet): error waiting for api(create) to be done, error= {ex}") @@ -531,10 +539,7 @@ def _query_status(): return None, "" try: - return wait_to_finish( - ["ACTIVE"], - ["UNKNOWN"], - _query_status, timeout) + return wait_to_finish(["ACTIVE"], ["UNKNOWN"], _query_status, timeout) except Exception as ex: module.fail_json(msg=f"module(hwc_vpc_subnet): error waiting for api(update) to be done, error= {ex}") @@ -624,7 +629,6 @@ def update_properties(module, response, array_index, exclude_output=False): def send_list_request(module, client, url): - r = None try: r = client.get(url) @@ -706,5 +710,5 @@ def fill_list_resp_body(body): return result -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py index db204a1a1e1..ad3ad2378be 100644 --- a/plugins/modules/ibm_sa_domain.py +++ b/plugins/modules/ibm_sa_domain.py @@ -118,15 +118,19 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), domain=dict(required=True), size=dict(), max_dms=dict(), @@ -137,7 +141,7 @@ def main(): max_volumes=dict(), perf_class=dict(), hard_capacity=dict(), - soft_capacity=dict() + soft_capacity=dict(), ) ) @@ -146,19 +150,16 @@ def main(): is_pyxcli_installed(module) xcli_client = connect_ssl(module) - domain = xcli_client.cmd.domain_list( - domain=module.params['domain']).as_single_element - state = module.params['state'] + domain = xcli_client.cmd.domain_list(domain=module.params["domain"]).as_single_element + state = module.params["state"] state_changed = False msg = f"Domain '{module.params['domain']}'" - if state == 'present' and not domain: - state_changed = execute_pyxcli_command( - module, 'domain_create', xcli_client) + if state == "present" and not domain: + state_changed = execute_pyxcli_command(module, "domain_create", xcli_client) msg += " created successfully." - elif state == 'absent' and domain: - state_changed = execute_pyxcli_command( - module, 'domain_delete', xcli_client) + elif state == "absent" and domain: + state_changed = execute_pyxcli_command(module, "domain_delete", xcli_client) msg += " deleted successfully." else: msg += " state unchanged." @@ -166,5 +167,5 @@ def main(): module.exit_json(changed=state_changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py index 17615390f06..3b4d6e153f5 100644 --- a/plugins/modules/ibm_sa_host.py +++ b/plugins/modules/ibm_sa_host.py @@ -83,15 +83,19 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), host=dict(required=True), cluster=dict(), domain=dict(), @@ -105,20 +109,17 @@ def main(): is_pyxcli_installed(module) xcli_client = connect_ssl(module) - host = xcli_client.cmd.host_list( - host=module.params['host']).as_single_element - state = module.params['state'] + host = xcli_client.cmd.host_list(host=module.params["host"]).as_single_element + state = module.params["state"] state_changed = False - if state == 'present' and not host: - state_changed = execute_pyxcli_command( - module, 'host_define', xcli_client) - elif state == 'absent' and host: - state_changed = execute_pyxcli_command( - module, 'host_delete', xcli_client) + if state == "present" and not host: + state_changed = execute_pyxcli_command(module, "host_define", xcli_client) + elif state == "absent" and host: + state_changed = execute_pyxcli_command(module, "host_delete", xcli_client) module.exit_json(changed=state_changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py index 4c5b2b2d048..ee47c7c7fba 100644 --- a/plugins/modules/ibm_sa_host_ports.py +++ b/plugins/modules/ibm_sa_host_ports.py @@ -79,19 +79,23 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, - spectrum_accelerate_spec, is_pyxcli_installed) +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), host=dict(required=True), iscsi_name=dict(), fcaddress=dict(), - num_of_visible_targets=dict() + num_of_visible_targets=dict(), ) ) @@ -102,32 +106,27 @@ def main(): # required args ports = [] try: - ports = xcli_client.cmd.host_list_ports( - host=module.params.get('host')).as_list + ports = xcli_client.cmd.host_list_ports(host=module.params.get("host")).as_list except Exception: pass - state = module.params['state'] + state = module.params["state"] port_exists = False - ports = [port.get('port_name') for port in ports] + ports = [port.get("port_name") for port in ports] - fc_ports = (module.params.get('fcaddress') - if module.params.get('fcaddress') else []) - iscsi_ports = (module.params.get('iscsi_name') - if module.params.get('iscsi_name') else []) + fc_ports = module.params.get("fcaddress") if module.params.get("fcaddress") else [] + iscsi_ports = module.params.get("iscsi_name") if module.params.get("iscsi_name") else [] for port in ports: if port in iscsi_ports or port in fc_ports: port_exists = True break state_changed = False - if state == 'present' and not port_exists: - state_changed = execute_pyxcli_command( - module, 'host_add_port', xcli_client) - if state == 'absent' and port_exists: - state_changed = execute_pyxcli_command( - module, 'host_remove_port', xcli_client) + if state == "present" and not port_exists: + state_changed = execute_pyxcli_command(module, "host_add_port", xcli_client) + if state == "absent" and port_exists: + state_changed = execute_pyxcli_command(module, "host_remove_port", xcli_client) module.exit_json(changed=state_changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py index bb7102fa712..bf967b81a3e 100644 --- a/plugins/modules/ibm_sa_pool.py +++ b/plugins/modules/ibm_sa_pool.py @@ -83,20 +83,24 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), pool=dict(required=True), size=dict(), snapshot_size=dict(), domain=dict(), - perf_class=dict() + perf_class=dict(), ) ) @@ -105,20 +109,17 @@ def main(): is_pyxcli_installed(module) xcli_client = connect_ssl(module) - pool = xcli_client.cmd.pool_list( - pool=module.params['pool']).as_single_element - state = module.params['state'] + pool = xcli_client.cmd.pool_list(pool=module.params["pool"]).as_single_element + state = module.params["state"] state_changed = False - if state == 'present' and not pool: - state_changed = execute_pyxcli_command( - module, 'pool_create', xcli_client) - if state == 'absent' and pool: - state_changed = execute_pyxcli_command( - module, 'pool_delete', xcli_client) + if state == "present" and not pool: + state_changed = execute_pyxcli_command(module, "pool_create", xcli_client) + if state == "absent" and pool: + state_changed = execute_pyxcli_command(module, "pool_delete", xcli_client) module.exit_json(changed=state_changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py index 48450084e27..e6a3b32b553 100644 --- a/plugins/modules/ibm_sa_vol.py +++ b/plugins/modules/ibm_sa_vol.py @@ -74,18 +74,22 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), vol=dict(required=True), pool=dict(), - size=dict() + size=dict(), ) ) @@ -95,20 +99,17 @@ def main(): xcli_client = connect_ssl(module) # required args - volume = xcli_client.cmd.vol_list( - vol=module.params.get('vol')).as_single_element - state = module.params['state'] + volume = xcli_client.cmd.vol_list(vol=module.params.get("vol")).as_single_element + state = module.params["state"] state_changed = False - if state == 'present' and not volume: - state_changed = execute_pyxcli_command( - module, 'vol_create', xcli_client) - elif state == 'absent' and volume: - state_changed = execute_pyxcli_command( - module, 'vol_delete', xcli_client) + if state == "present" and not volume: + state_changed = execute_pyxcli_command(module, "vol_create", xcli_client) + elif state == "absent" and volume: + state_changed = execute_pyxcli_command(module, "vol_delete", xcli_client) module.exit_json(changed=state_changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py index 03c87ca37b5..74219f9ad60 100644 --- a/plugins/modules/ibm_sa_vol_map.py +++ b/plugins/modules/ibm_sa_vol_map.py @@ -95,20 +95,24 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, - connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import ( + execute_pyxcli_command, + connect_ssl, + spectrum_accelerate_spec, + is_pyxcli_installed, +) def main(): argument_spec = spectrum_accelerate_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), vol=dict(required=True), lun=dict(), cluster=dict(), host=dict(), - override=dict() + override=dict(), ) ) @@ -119,24 +123,22 @@ def main(): # required args mapping = False try: - mapped_hosts = xcli_client.cmd.vol_mapping_list( - vol=module.params.get('vol')).as_list + mapped_hosts = xcli_client.cmd.vol_mapping_list(vol=module.params.get("vol")).as_list for host in mapped_hosts: - if host['host'] == module.params.get("host", ""): + if host["host"] == module.params.get("host", ""): mapping = True except Exception: pass - state = module.params['state'] + state = module.params["state"] state_changed = False - if state == 'present' and not mapping: - state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) - if state == 'absent' and mapping: - state_changed = execute_pyxcli_command( - module, 'unmap_vol', xcli_client) + if state == "present" and not mapping: + state_changed = execute_pyxcli_command(module, "map_vol", xcli_client) + if state == "absent" and mapping: + state_changed = execute_pyxcli_command(module, "unmap_vol", xcli_client) module.exit_json(changed=state_changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/icinga2_feature.py b/plugins/modules/icinga2_feature.py index 5e6abedd0ce..882e88f4507 100644 --- a/plugins/modules/icinga2_feature.py +++ b/plugins/modules/icinga2_feature.py @@ -65,24 +65,26 @@ class Icinga2FeatureHelper: def __init__(self, module): self.module = module - self._icinga2 = module.get_bin_path('icinga2', True) - self.feature_name = self.module.params['name'] - self.state = self.module.params['state'] + self._icinga2 = module.get_bin_path("icinga2", True) + self.feature_name = self.module.params["name"] + self.state = self.module.params["state"] def _exec(self, args): - cmd = [self._icinga2, 'feature'] + cmd = [self._icinga2, "feature"] rc, out, err = self.module.run_command(cmd + args, check_rc=True) return rc, out def manage(self): rc, out = self._exec(["list"]) if rc != 0: - self.module.fail_json(msg="Unable to list icinga2 features. " - "Ensure icinga2 is installed and present in binary path.") + self.module.fail_json( + msg="Unable to list icinga2 features. Ensure icinga2 is installed and present in binary path." + ) # If feature is already in good state, just exit - if (re.search(f"Disabled features:.* {self.feature_name}[ \n]", out) and self.state == "absent") or \ - (re.search(f"Enabled features:.* {self.feature_name}[ \n]", out) and self.state == "present"): + if (re.search(f"Disabled features:.* {self.feature_name}[ \n]", out) and self.state == "absent") or ( + re.search(f"Enabled features:.* {self.feature_name}[ \n]", out) and self.state == "present" + ): self.module.exit_json(changed=False) if self.module.check_mode: @@ -95,7 +97,9 @@ def manage(self): change_applied = False if self.state == "present": if rc != 0: - self.module.fail_json(msg=f"Failed to {feature_enable_str} feature {self.feature_name}. icinga2 command returned {out}") + self.module.fail_json( + msg=f"Failed to {feature_enable_str} feature {self.feature_name}. icinga2 command returned {out}" + ) if re.search("already enabled", out) is None: change_applied = True @@ -114,15 +118,15 @@ def manage(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=["present", "absent"], default="present") + name=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], default="present"), ), - supports_check_mode=True + supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") Icinga2FeatureHelper(module).manage() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py index f5599ad86d8..9073341e0bf 100644 --- a/plugins/modules/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -149,75 +149,64 @@ class icinga2_api: def __init__(self, module): self.module = module - def call_url(self, path, data='', method='GET'): + def call_url(self, path, data="", method="GET"): headers = { - 'Accept': 'application/json', - 'X-HTTP-Method-Override': method, + "Accept": "application/json", + "X-HTTP-Method-Override": method, } url = f"{self.module.params.get('url')}/{path}" - rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) - body = '' + rsp, info = fetch_url( + module=self.module, + url=url, + data=data, + headers=headers, + method=method, + use_proxy=self.module.params["use_proxy"], + ) + body = "" if rsp: body = json.loads(rsp.read()) - if info['status'] >= 400: - body = info['body'] - return {'code': info['status'], 'data': body} + if info["status"] >= 400: + body = info["body"] + return {"code": info["status"], "data": body} def check_connection(self): - ret = self.call_url('v1/status') - if ret['code'] == 200: + ret = self.call_url("v1/status") + if ret["code"] == 200: return True return False def exists(self, hostname): data = { - "filter": f"match(\"{hostname}\", host.name)", + "filter": f'match("{hostname}", host.name)', } - ret = self.call_url( - path="v1/objects/hosts", - data=self.module.jsonify(data) - ) - if ret['code'] == 200: - if len(ret['data']['results']) == 1: + ret = self.call_url(path="v1/objects/hosts", data=self.module.jsonify(data)) + if ret["code"] == 200: + if len(ret["data"]["results"]) == 1: return True return False def create(self, hostname, data): - ret = self.call_url( - path=f"v1/objects/hosts/{hostname}", - data=self.module.jsonify(data), - method="PUT" - ) + ret = self.call_url(path=f"v1/objects/hosts/{hostname}", data=self.module.jsonify(data), method="PUT") return ret def delete(self, hostname): data = {"cascade": 1} - ret = self.call_url( - path=f"v1/objects/hosts/{hostname}", - data=self.module.jsonify(data), - method="DELETE" - ) + ret = self.call_url(path=f"v1/objects/hosts/{hostname}", data=self.module.jsonify(data), method="DELETE") return ret def modify(self, hostname, data): - ret = self.call_url( - path=f"v1/objects/hosts/{hostname}", - data=self.module.jsonify(data), - method="POST" - ) + ret = self.call_url(path=f"v1/objects/hosts/{hostname}", data=self.module.jsonify(data), method="POST") return ret def diff(self, hostname, data): - ret = self.call_url( - path=f"v1/objects/hosts/{hostname}", - method="GET" - ) + ret = self.call_url(path=f"v1/objects/hosts/{hostname}", method="GET") changed = False - ic_data = ret['data']['results'][0] - for key in data['attrs']: - if key not in ic_data['attrs'].keys(): + ic_data = ret["data"]["results"][0] + for key in data["attrs"]: + if key not in ic_data["attrs"].keys(): changed = True - elif data['attrs'][key] != ic_data['attrs'][key]: + elif data["attrs"][key] != ic_data["attrs"][key]: changed = True return changed @@ -231,20 +220,17 @@ def main(): # add our own arguments argument_spec.update( state=dict(default="present", choices=["absent", "present"]), - name=dict(required=True, aliases=['host']), + name=dict(required=True, aliases=["host"]), zone=dict(), template=dict(), check_command=dict(default="hostalive"), display_name=dict(), ip=dict(), - variables=dict(type='dict'), + variables=dict(type="dict"), ) # Define the main module - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params["state"] name = module.params["name"] @@ -266,16 +252,16 @@ def main(): module.fail_json(msg=f"unable to connect to Icinga. Exception message: {e}") data = { - 'templates': template, - 'attrs': { - 'address': ip, - 'display_name': display_name, - 'check_command': check_command, - 'zone': zone, - 'vars.made_by': "ansible" - } + "templates": template, + "attrs": { + "address": ip, + "display_name": display_name, + "check_command": check_command, + "zone": zone, + "vars.made_by": "ansible", + }, } - data['attrs'].update({f"vars.{key}": value for key, value in variables.items()}) + data["attrs"].update({f"vars.{key}": value for key, value in variables.items()}) changed = False if icinga.exists(name): @@ -285,7 +271,7 @@ def main(): else: try: ret = icinga.delete(name) - if ret['code'] == 200: + if ret["code"] == 200: changed = True else: module.fail_json(msg=f"bad return code ({ret['code']}) deleting host: '{ret['data']}'") @@ -297,11 +283,11 @@ def main(): module.exit_json(changed=False, name=name, data=data) # Template attribute is not allowed in modification - del data['templates'] + del data["templates"] ret = icinga.modify(name, data) - if ret['code'] == 200: + if ret["code"] == 200: changed = True else: module.fail_json(msg=f"bad return code ({ret['code']}) modifying host: '{ret['data']}'") @@ -313,7 +299,7 @@ def main(): else: try: ret = icinga.create(name, data) - if ret['code'] == 200: + if ret["code"] == 200: changed = True else: module.fail_json(msg=f"bad return code ({ret['code']}) creating host: '{ret['data']}'") @@ -324,5 +310,5 @@ def main(): # import module snippets -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py index 1c569e1281c..dc022400aef 100644 --- a/plugins/modules/idrac_redfish_command.py +++ b/plugins/modules/idrac_redfish_command.py @@ -98,12 +98,14 @@ import re from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): - def create_bios_config_job(self): result = {} key = "Bios" @@ -111,42 +113,36 @@ def create_bios_config_job(self): # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, 'msg': f"Key {key} not found"} + return {"ret": False, "msg": f"Key {key} not found"} bios_uri = data[key]["@odata.id"] # Extract proper URI response = self.get_request(self.root_uri + bios_uri) - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ - "@odata.id"] + result["ret"] = True + data = response["data"] + set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] payload = {"TargetSettingsURI": set_bios_attr_uri} - response = self.post_request( - f"{self.root_uri}{self.manager_uri}/{jobs}", payload) - if response['ret'] is False: + response = self.post_request(f"{self.root_uri}{self.manager_uri}/{jobs}", payload) + if response["ret"] is False: return response - response_output = response['resp'].__dict__ + response_output = response["resp"].__dict__ job_id_full = response_output["headers"]["Location"] job_id = re.search("JID_.+", job_id_full).group() - return {'ret': True, 'msg': f"Config job {job_id} created", 'job_id': job_id_full} + return {"ret": True, "msg": f"Config job {job_id} created", "job_id": job_id_full} -CATEGORY_COMMANDS_ALL = { - "Systems": ["CreateBiosConfigJob"], - "Accounts": [], - "Manager": [] -} +CATEGORY_COMMANDS_ALL = {"Systems": ["CreateBiosConfigJob"], "Accounts": [], "Manager": []} def main(): @@ -154,57 +150,58 @@ def main(): return_values = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() + timeout=dict(type="int", default=10), + resource_id=dict(), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # Build root URI root_uri = f"https://{module.params['baseuri']}" - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands @@ -224,26 +221,26 @@ def main(): rf_utils.data_modification = False result = rf_utils._find_systems_resource() rf_utils.data_modification = True - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "CreateBiosConfigJob": # execute only if we find a Managers resource result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) result = rf_utils.create_bios_config_job() - if 'job_id' in result: - return_values['job_id'] = result['job_id'] + if "job_id" in result: + return_values["job_id"] = result["job_id"] # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - module.exit_json(changed=True, msg='Action was successful', return_values=return_values) + if result["ret"] is True: + del result["ret"] + module.exit_json(changed=True, msg="Action was successful", return_values=return_values) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py index 7eec4d0ce66..16d2584603f 100644 --- a/plugins/modules/idrac_redfish_config.py +++ b/plugins/modules/idrac_redfish_config.py @@ -155,20 +155,18 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.validation import ( - check_mutually_exclusive, - check_required_arguments +from ansible.module_utils.common.validation import check_mutually_exclusive, check_required_arguments +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, ) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): - def set_manager_attributes(self, command): - result = {} - required_arg_spec = {'manager_attributes': {'required': True}} + required_arg_spec = {"manager_attributes": {"required": True}} try: check_required_arguments(required_arg_spec, self.module.params) @@ -181,11 +179,11 @@ def set_manager_attributes(self, command): command_manager_attributes_uri_map = { "SetManagerAttributes": self.manager_uri, "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", - "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" + "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1", } manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) - attributes = self.module.params['manager_attributes'] + attributes = self.module.params["manager_attributes"] attrs_to_patch = {} attrs_skipped = {} @@ -193,26 +191,24 @@ def set_manager_attributes(self, command): # Search for key entry and extract URI from it response = self.get_request(f"{self.root_uri}{manager_uri}/{key}") - if response['ret'] is False: + if response["ret"] is False: return response - result['ret'] = True - data = response['data'] + result["ret"] = True + data = response["data"] if key not in data: - return {'ret': False, - 'msg': f"{command}: Key {key} not found", - 'warning': ""} + return {"ret": False, "msg": f"{command}: Key {key} not found", "warning": ""} for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data['Attributes']: + if attr_name not in data["Attributes"]: # Skip and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) continue # Find out if value is already set to what we want. If yes, exclude # those attributes - if data['Attributes'][attr_name] == attr_value: + if data["Attributes"][attr_name] == attr_value: attrs_skipped.update({attr_name: attr_value}) else: attrs_to_patch.update({attr_name: attr_value}) @@ -222,30 +218,32 @@ def set_manager_attributes(self, command): warning = f"Incorrect attributes {attrs_bad}" if not attrs_to_patch: - return {'ret': True, 'changed': False, - 'msg': "No changes made. Manager attributes already set.", - 'warning': warning} + return { + "ret": True, + "changed": False, + "msg": "No changes made. Manager attributes already set.", + "warning": warning, + } payload = {"Attributes": attrs_to_patch} response = self.patch_request(f"{self.root_uri}{manager_uri}/{key}", payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': f"{command}: Modified Manager attributes {attrs_to_patch}", - 'warning': warning} + return { + "ret": True, + "changed": True, + "msg": f"{command}: Modified Manager attributes {attrs_to_patch}", + "warning": warning, + } -CATEGORY_COMMANDS_ALL = { - "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"] -} +CATEGORY_COMMANDS_ALL = {"Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]} # list of mutually exclusive commands for a category CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { - "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", - "SetSystemAttributes"]] + "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]] } @@ -253,66 +251,66 @@ def main(): result = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() + manager_attributes=dict(type="dict", default={}), + timeout=dict(type="int", default=10), + resource_id=dict(), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # Build root URI root_uri = f"https://{module.params['baseuri']}" - rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # check for mutually exclusive commands try: # check_mutually_exclusive accepts a single list or list of lists that # are groups of terms that should be mutually exclusive with one another # and checks that against a dictionary - check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], - dict.fromkeys(command_list, True)) + check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], dict.fromkeys(command_list, True)) except TypeError as e: module.fail_json(msg=to_native(e)) @@ -322,22 +320,22 @@ def main(): if category == "Manager": # execute only if we find a Manager resource result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: result = rf_utils.set_manager_attributes(command) # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) + if result["ret"] is True: + if result.get("warning"): + module.warn(to_native(result["warning"])) - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + module.exit_json(changed=result["changed"], msg=to_native(result["msg"])) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py index 6e589255c7c..6aaf9b01dd2 100644 --- a/plugins/modules/idrac_redfish_info.py +++ b/plugins/modules/idrac_redfish_info.py @@ -129,33 +129,35 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): - def get_manager_attributes(self): result = {} manager_attributes = [] - properties = ['Attributes', 'Id'] + properties = ["Attributes", "Id"] response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] # Manager attributes are supported as part of iDRAC OEM extension # Attributes are supported only on iDRAC9 try: - for members in data['Links']['Oem']['Dell']['DellAttributes']: - attributes_uri = members['@odata.id'] + for members in data["Links"]["Oem"]["Dell"]["DellAttributes"]: + attributes_uri = members["@odata.id"] response = self.get_request(self.root_uri + attributes_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] attributes = {} for prop in properties: @@ -165,57 +167,53 @@ def get_manager_attributes(self): if attributes: manager_attributes.append(attributes) - result['ret'] = True + result["ret"] = True except (AttributeError, KeyError) as e: - result['ret'] = False - result['msg'] = f"Failed to find attribute/key: {e}" + result["ret"] = False + result["msg"] = f"Failed to find attribute/key: {e}" result["entries"] = manager_attributes return result -CATEGORY_COMMANDS_ALL = { - "Manager": ["GetManagerAttributes"] -} +CATEGORY_COMMANDS_ALL = {"Manager": ["GetManagerAttributes"]} def main(): result = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], supports_check_mode=True, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # Build root URI root_uri = f"https://{module.params['baseuri']}" @@ -223,33 +221,37 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Manager": # execute only if we find a Manager resource result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "GetManagerAttributes": result = rf_utils.get_manager_attributes() # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] + if result["ret"] is True: + del result["ret"] module.exit_json(redfish_facts=result) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py index 1792a1aa8ff..03b503847b3 100644 --- a/plugins/modules/ilo_redfish_command.py +++ b/plugins/modules/ilo_redfish_command.py @@ -96,9 +96,7 @@ """ # More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["WaitforiLORebootCompletion"] -} +CATEGORY_COMMANDS_ALL = {"Systems": ["WaitforiLORebootCompletion"]} from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC @@ -110,37 +108,35 @@ def main(): result = {} argument_spec = dict( category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), timeout=dict(type="int", default=60), username=dict(), password=dict(no_log=True), - auth_token=dict(no_log=True) + auth_token=dict(no_log=True), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} - timeout = module.params['timeout'] + timeout = module.params["timeout"] # Build root URI root_uri = f"https://{module.params['baseuri']}" @@ -148,34 +144,36 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native( - f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: module.fail_json( - msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) if category == "Systems": # execute only if we find a System resource result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "WaitforiLORebootCompletion": result[command] = rf_utils.wait_for_ilo_reboot_completion() # Return data back or fail with proper message - if not result[command]['ret']: + if not result[command]["ret"]: module.fail_json(msg=result) - changed = result[command].get('changed', False) + changed = result[command].get("changed", False) module.exit_json(ilo_redfish_command=result, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py index ed61c7ffd68..55475146139 100644 --- a/plugins/modules/ilo_redfish_config.py +++ b/plugins/modules/ilo_redfish_config.py @@ -113,9 +113,7 @@ sample: "Action was successful" """ -CATEGORY_COMMANDS_ALL = { - "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] -} +CATEGORY_COMMANDS_ALL = {"Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"]} from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC @@ -126,73 +124,73 @@ def main(): result = {} argument_spec = dict( - category=dict(required=True, choices=list( - CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), attribute_name=dict(required=True), - attribute_value=dict(type='str'), - timeout=dict(type='int', default=10) + attribute_value=dict(type="str"), + timeout=dict(type="int", default=10), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] - creds = {"user": module.params['username'], - "pswd": module.params['password'], - "token": module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} - timeout = module.params['timeout'] + timeout = module.params["timeout"] root_uri = f"https://{module.params['baseuri']}" rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) - mgr_attributes = {'mgr_attr_name': module.params['attribute_name'], - 'mgr_attr_value': module.params['attribute_value']} + mgr_attributes = { + "mgr_attr_name": module.params["attribute_name"], + "mgr_attr_value": module.params["attribute_value"], + } changed = False - offending = [ - cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]] + offending = [cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]] if offending: - module.fail_json(msg=to_native(f"Invalid Command(s): '{offending}'. Allowed Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command(s): '{offending}'. Allowed Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) if category == "Manager": resource = rf_utils._find_managers_resource() - if not resource['ret']: - module.fail_json(msg=to_native(resource['msg'])) + if not resource["ret"]: + module.fail_json(msg=to_native(resource["msg"])) dispatch = dict( SetTimeZone=rf_utils.set_time_zone, SetDNSserver=rf_utils.set_dns_server, SetDomainName=rf_utils.set_domain_name, SetNTPServers=rf_utils.set_ntp_server, - SetWINSReg=rf_utils.set_wins_registration + SetWINSReg=rf_utils.set_wins_registration, ) for command in command_list: result[command] = dispatch[command](mgr_attributes) - if 'changed' in result[command]: - changed |= result[command]['changed'] + if "changed" in result[command]: + changed |= result[command]["changed"] module.exit_json(ilo_redfish_config=result, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py index 9fa7c368539..55cabc3e21b 100644 --- a/plugins/modules/ilo_redfish_info.py +++ b/plugins/modules/ilo_redfish_info.py @@ -103,13 +103,9 @@ returned: always """ -CATEGORY_COMMANDS_ALL = { - "Sessions": ["GetiLOSessions"] -} +CATEGORY_COMMANDS_ALL = {"Sessions": ["GetiLOSessions"]} -CATEGORY_COMMANDS_DEFAULT = { - "Sessions": "GetiLOSessions" -} +CATEGORY_COMMANDS_DEFAULT = {"Sessions": "GetiLOSessions"} from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils @@ -120,59 +116,57 @@ def main(): result = {} category_list = [] argument_spec = dict( - category=dict(required=True, type='list', elements='str'), - command=dict(required=True, type='list', elements='str'), + category=dict(required=True, type="list", elements="str"), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=True + supports_check_mode=True, ) - creds = {"user": module.params['username'], - "pswd": module.params['password'], - "token": module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} - timeout = module.params['timeout'] + timeout = module.params["timeout"] root_uri = f"https://{module.params['baseuri']}" rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) # Build Category list - if "all" in module.params['category']: + if "all" in module.params["category"]: for entry in CATEGORY_COMMANDS_ALL: category_list.append(entry) else: # one or more categories specified - category_list = module.params['category'] + category_list = module.params["category"] for category in category_list: command_list = [] # Build Command list for each Category if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: + if not module.params["command"]: # True if we don't specify a command --> use default command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: + elif "all" in module.params["command"]: for entry in CATEGORY_COMMANDS_ALL[category]: command_list.append(entry) # one or more commands else: - command_list = module.params['command'] + command_list = module.params["command"] # Verify that all commands are valid for cmd in command_list: # Fail if even one command given is invalid @@ -191,5 +185,5 @@ def main(): module.exit_json(ilo_redfish_info=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index bcef41da3fa..f389dd4537f 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -271,6 +271,7 @@ LXML_ETREE_IMP_ERR = None try: import lxml.etree + HAS_LXML_ETREE = True except ImportError: LXML_ETREE_IMP_ERR = traceback.format_exc() @@ -279,6 +280,7 @@ XMLJSON_COBRA_IMP_ERR = None try: from xmljson import cobra + HAS_XMLJSON_COBRA = True except ImportError: XMLJSON_COBRA_IMP_ERR = traceback.format_exc() @@ -292,31 +294,31 @@ ) -def imc_response(module, rawoutput, rawinput=''): - ''' Handle IMC returned data ''' +def imc_response(module, rawoutput, rawinput=""): + """Handle IMC returned data""" xmloutput = lxml.etree.fromstring(rawoutput) result = cobra.data(xmloutput) # Handle errors - if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): + if xmloutput.get("errorCode") and xmloutput.get("errorDescr"): if rawinput: - result['input'] = rawinput - result['output'] = rawoutput - result['error_code'] = xmloutput.get('errorCode') - result['error_text'] = xmloutput.get('errorDescr') + result["input"] = rawinput + result["output"] = rawoutput + result["error_code"] = xmloutput.get("errorCode") + result["error_text"] = xmloutput.get("errorDescr") module.fail_json(msg=f"Request failed: {result['error_text']}", **result) return result def logout(module, url, cookie, timeout): - ''' Perform a logout, if needed ''' + """Perform a logout, if needed""" data = f'' resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) def merge(one, two): - ''' Merge two complex nested datastructures into one''' + """Merge two complex nested datastructures into one""" if isinstance(one, dict) and isinstance(two, dict): copy = dict(one) copy.update({key: merge(one.get(key, None), two[key]) for key in two}) @@ -331,34 +333,34 @@ def merge(one, two): def main(): module = AnsibleModule( argument_spec=dict( - hostname=dict(type='str', required=True, aliases=['host', 'ip']), - username=dict(type='str', default='admin', aliases=['user']), - password=dict(type='str', default='password', no_log=True), - content=dict(type='str'), - path=dict(type='path', aliases=['config_file', 'src']), - protocol=dict(type='str', default='https', choices=['http', 'https']), - timeout=dict(type='int', default=60), - validate_certs=dict(type='bool', default=True), + hostname=dict(type="str", required=True, aliases=["host", "ip"]), + username=dict(type="str", default="admin", aliases=["user"]), + password=dict(type="str", default="password", no_log=True), + content=dict(type="str"), + path=dict(type="path", aliases=["config_file", "src"]), + protocol=dict(type="str", default="https", choices=["http", "https"]), + timeout=dict(type="int", default=60), + validate_certs=dict(type="bool", default=True), ), supports_check_mode=True, - mutually_exclusive=[['content', 'path']], + mutually_exclusive=[["content", "path"]], ) if not HAS_LXML_ETREE: - module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_ETREE_IMP_ERR) if not HAS_XMLJSON_COBRA: - module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) + module.fail_json(msg=missing_required_lib("xmljson >= 0.1.8"), exception=XMLJSON_COBRA_IMP_ERR) - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] + hostname = module.params["hostname"] + username = module.params["username"] + password = module.params["password"] - content = module.params['content'] - path = module.params['path'] + content = module.params["content"] + path = module.params["path"] - protocol = module.params['protocol'] - timeout = module.params['timeout'] + protocol = module.params["protocol"] + timeout = module.params["timeout"] result = dict( failed=False, @@ -371,69 +373,69 @@ def main(): if os.path.isfile(path): file_exists = True else: - module.fail_json(msg=f'Cannot find/access path:\n{path}') + module.fail_json(msg=f"Cannot find/access path:\n{path}") start = now() # Perform login first - url = f'{protocol}://{hostname}/nuova' + url = f"{protocol}://{hostname}/nuova" data = f'' - resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or auth['status'] != 200: - result['elapsed'] = (now() - start).seconds + resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) + if resp is None or auth["status"] != 200: + result["elapsed"] = (now() - start).seconds module.fail_json(msg=f"Task failed with error {auth['status']}: {auth['msg']}", **result) result.update(imc_response(module, resp.read())) # Store cookie for future requests - cookie = '' + cookie = "" try: - cookie = result['aaaLogin']['attributes']['outCookie'] + cookie = result["aaaLogin"]["attributes"]["outCookie"] except Exception: - module.fail_json(msg='Could not find cookie in output', **result) + module.fail_json(msg="Could not find cookie in output", **result) try: # Prepare request data if content: rawdata = content elif file_exists: - with open(path, 'r') as config_object: + with open(path, "r") as config_object: rawdata = config_object.read() # Wrap the XML documents in a element - xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) + xmldata = lxml.etree.fromstring("%s" % rawdata.replace("\n", "")) # Handle each XML document separately in the same session for xmldoc in list(xmldata): if xmldoc.tag is lxml.etree.Comment: continue # Add cookie to XML - xmldoc.set('cookie', cookie) + xmldoc.set("cookie", cookie) data = lxml.etree.tostring(xmldoc) # Perform actual request - resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or info['status'] != 200: - result['elapsed'] = (now() - start).seconds + resp, info = fetch_url(module, url, data=data, method="POST", timeout=timeout) + if resp is None or info["status"] != 200: + result["elapsed"] = (now() - start).seconds module.fail_json(msg=f"Task failed with error {info['status']}: {info['msg']}", **result) # Merge results with previous results rawoutput = resp.read() result = merge(result, imc_response(module, rawoutput, rawinput=data)) - result['response'] = info['msg'] - result['status'] = info['status'] + result["response"] = info["msg"] + result["status"] = info["status"] # Check for any changes # NOTE: Unfortunately IMC API always report status as 'modified' xmloutput = lxml.etree.fromstring(rawoutput) - results = xmloutput.xpath('/configConfMo/outConfig/*/@status') - result['changed'] = ('modified' in results) + results = xmloutput.xpath("/configConfMo/outConfig/*/@status") + result["changed"] = "modified" in results # Report success - result['elapsed'] = (now() - start).seconds + result["elapsed"] = (now() - start).seconds module.exit_json(**result) finally: logout(module, url, cookie, timeout) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py index b3a2ba0c7e1..c01c226d1a1 100644 --- a/plugins/modules/imgadm.py +++ b/plugins/modules/imgadm.py @@ -130,40 +130,40 @@ class Imgadm: def __init__(self, module): self.module = module self.params = module.params - self.cmd = module.get_bin_path('imgadm', required=True) + self.cmd = module.get_bin_path("imgadm", required=True) self.changed = False - self.uuid = module.params['uuid'] + self.uuid = module.params["uuid"] # Since there are a number of (natural) aliases, prevent having to look # them up every time we operate on `state`. - if self.params['state'] in ['present', 'imported', 'updated']: + if self.params["state"] in ["present", "imported", "updated"]: self.present = True else: self.present = False # Perform basic UUID validation upfront. - if self.uuid and self.uuid != '*': - if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): - module.fail_json(msg='Provided value for uuid option is not a valid UUID.') + if self.uuid and self.uuid != "*": + if not re.match("^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$", self.uuid, re.IGNORECASE): + module.fail_json(msg="Provided value for uuid option is not a valid UUID.") # Helper method to massage stderr def errmsg(self, stderr): - match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) + match = re.match(r"^imgadm .*?: error \(\w+\): (.*): .*", stderr) if match: return match.groups()[0] else: - return 'Unexpected failure' + return "Unexpected failure" def update_images(self): - if self.uuid == '*': - cmd = [self.cmd, 'update'] + if self.uuid == "*": + cmd = [self.cmd, "update"] else: - cmd = [self.cmd, 'update', self.uuid] + cmd = [self.cmd, "update", self.uuid] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to update images: {self.errmsg(stderr)}') + self.module.fail_json(msg=f"Failed to update images: {self.errmsg(stderr)}") # There is no feedback from imgadm(1M) to determine if anything # was actually changed. So treat this as an 'always-changes' operation. @@ -171,21 +171,21 @@ def update_images(self): self.changed = True def manage_sources(self): - force = self.params['force'] - source = self.params['source'] - imgtype = self.params['type'] + force = self.params["force"] + source = self.params["source"] + imgtype = self.params["type"] - cmd = [self.cmd, 'sources'] + cmd = [self.cmd, "sources"] if force: - cmd = cmd + ['-f'] + cmd = cmd + ["-f"] if self.present: - cmd = cmd + ['-a', source, '-t', imgtype] + cmd = cmd + ["-a", source, "-t", imgtype] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to add source: {self.errmsg(stderr)}') + self.module.fail_json(msg=f"Failed to add source: {self.errmsg(stderr)}") # Check the various responses. # Note that trying to add a source with the wrong type is handled @@ -200,11 +200,11 @@ def manage_sources(self): self.changed = True else: # Type is ignored by imgadm(1M) here - cmd += f' -d {source}' + cmd += f" -d {source}" (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to remove source: {self.errmsg(stderr)}') + self.module.fail_json(msg=f"Failed to remove source: {self.errmsg(stderr)}") regex = f'Do not have image source "{source}", no change' if re.match(regex, stdout): @@ -215,51 +215,51 @@ def manage_sources(self): self.changed = True def manage_images(self): - pool = self.params['pool'] - state = self.params['state'] + pool = self.params["pool"] + state = self.params["state"] - if state == 'vacuumed': + if state == "vacuumed": # Unconditionally pass '--force', otherwise we're prompted with 'y/N' - cmd = [self.cmd, 'vacuum', '-f'] + cmd = [self.cmd, "vacuum", "-f"] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to vacuum images: {self.errmsg(stderr)}') + self.module.fail_json(msg=f"Failed to vacuum images: {self.errmsg(stderr)}") else: - if stdout == '': + if stdout == "": self.changed = False else: self.changed = True if self.present: - cmd = [self.cmd, 'import', '-P', pool, '-q'] + ([self.uuid] if self.uuid else []) + cmd = [self.cmd, "import", "-P", pool, "-q"] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to import image: {self.errmsg(stderr)}') + self.module.fail_json(msg=f"Failed to import image: {self.errmsg(stderr)}") - regex = rf'Image {self.uuid} \(.*\) is already installed, skipping' + regex = rf"Image {self.uuid} \(.*\) is already installed, skipping" if re.match(regex, stdout): self.changed = False - regex = '.*ActiveImageNotFound.*' + regex = ".*ActiveImageNotFound.*" if re.match(regex, stderr): self.changed = False - regex = f'Imported image {self.uuid}.*' + regex = f"Imported image {self.uuid}.*" if re.match(regex, stdout.splitlines()[-1]): self.changed = True else: - cmd = [self.cmd, 'delete', '-P', pool] + ([self.uuid] if self.uuid else []) + cmd = [self.cmd, "delete", "-P", pool] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) - regex = '.*ImageNotInstalled.*' + regex = ".*ImageNotInstalled.*" if re.match(regex, stderr): # Even if the 'rc' was non-zero (3), we handled the situation # in order to determine if there was a change. self.changed = False - regex = f'Deleted image {self.uuid}' + regex = f"Deleted image {self.uuid}" if re.match(regex, stdout): self.changed = True @@ -267,12 +267,12 @@ def manage_images(self): def main(): module = AnsibleModule( argument_spec=dict( - force=dict(type='bool'), - pool=dict(default='zones'), + force=dict(type="bool"), + pool=dict(default="zones"), source=dict(), - state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), - type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), - uuid=dict() + state=dict(required=True, choices=["present", "absent", "deleted", "imported", "updated", "vacuumed"]), + type=dict(default="imgapi", choices=["imgapi", "docker", "dsapi"]), + uuid=dict(), ), # This module relies largely on imgadm(1M) to enforce idempotency, which does not # provide a "noop" (or equivalent) mode to do a dry-run. @@ -281,30 +281,30 @@ def main(): imgadm = Imgadm(module) - uuid = module.params['uuid'] - source = module.params['source'] - state = module.params['state'] + uuid = module.params["uuid"] + source = module.params["source"] + state = module.params["state"] - result = {'state': state} + result = {"state": state} # Either manage sources or images. if source: - result['source'] = source + result["source"] = source imgadm.manage_sources() else: - result['uuid'] = uuid + result["uuid"] = uuid - if state == 'updated': + if state == "updated": imgadm.update_images() else: # Make sure operate on a single image for the following actions - if (uuid == '*') and (state != 'vacuumed'): + if (uuid == "*") and (state != "vacuumed"): module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') imgadm.manage_images() - result['changed'] = imgadm.changed + result["changed"] = imgadm.changed module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py index d958ec6d8cd..c3c63bb599d 100644 --- a/plugins/modules/infinity.py +++ b/plugins/modules/infinity.py @@ -159,22 +159,17 @@ def __init__(self, module, server_ip, username, password): self.base_url = f"https://{server_ip}/rest/v1/" def _get_api_call_ansible_handler( - self, - method='get', - resource_url='', - stat_codes=None, - params=None, - payload_data=None): + self, method="get", resource_url="", stat_codes=None, params=None, payload_data=None + ): """ Perform the HTTPS request by using ansible get/delete method """ stat_codes = [200] if stat_codes is None else stat_codes request_url = str(self.base_url) + str(resource_url) response = None - headers = {'Content-Type': 'application/json'} + headers = {"Content-Type": "application/json"} if not request_url: - self.module.exit_json( - msg="When sending Rest api call , the resource URL is empty, please check.") + self.module.exit_json(msg="When sending Rest api call , the resource URL is empty, please check.") if payload_data and not isinstance(payload_data, str): payload_data = json.dumps(payload_data) response_raw = open_url( @@ -186,23 +181,22 @@ def _get_api_call_ansible_handler( url_password=self.auth_pass, validate_certs=False, force_basic_auth=True, - data=payload_data) + data=payload_data, + ) response = response_raw.read() - payload = '' + payload = "" if response_raw.code not in stat_codes: self.module.exit_json( - changed=False, - meta=f" openurl response_raw.code show error and error code is {response_raw.code!r}") + changed=False, meta=f" openurl response_raw.code show error and error code is {response_raw.code!r}" + ) else: if isinstance(response, str) and len(response) > 0: payload = response - elif method.lower() == 'delete' and response_raw.code == 204: - payload = 'Delete is done.' + elif method.lower() == "delete" and response_raw.code == 204: + payload = "Delete is done." if isinstance(payload, dict) and "text" in payload: - self.module.exit_json( - changed=False, - meta="when calling rest api, returned data is not json ") + self.module.exit_json(changed=False, meta="when calling rest api, returned data is not json ") raise Exception(payload["text"]) return payload @@ -216,10 +210,9 @@ def get_network(self, network_id, network_name, limit=-1): return the details of a given with given network_id or name """ if network_name is None and network_id is None: - self.module.exit_json( - msg="You must specify one of the options 'network_name' or 'network_id'.") + self.module.exit_json(msg="You must specify one of the options 'network_name' or 'network_id'.") method = "get" - resource_url = '' + resource_url = "" params = {} response = None if network_id: @@ -228,14 +221,11 @@ def get_network(self, network_id, network_name, limit=-1): if network_id is None and network_name: method = "get" resource_url = "search" - params = {"query": json.dumps( - {"name": network_name, "type": "network"})} - response = self._get_api_call_ansible_handler( - method, resource_url, payload_data=json.dumps(params)) + params = {"query": json.dumps({"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler(method, resource_url, payload_data=json.dumps(params)) if response and isinstance(response, str): response = json.loads(response) - if response and isinstance(response, list) and len( - response) > 1 and limit == 1: + if response and isinstance(response, list) and len(response) > 1 and limit == 1: response = response[0] response = json.dumps(response) return response @@ -243,26 +233,23 @@ def get_network(self, network_id, network_name, limit=-1): # --------------------------------------------------------------------------- # get_network_id() # --------------------------------------------------------------------------- - def get_network_id(self, network_name="", network_type='lan'): + def get_network_id(self, network_name="", network_type="lan"): """ query network_id from Infinity via rest api based on given network_name """ - method = 'get' - resource_url = 'search' + method = "get" + resource_url = "search" response = None if network_name is None: - self.module.exit_json( - msg="You must specify the option 'network_name'") - params = {"query": json.dumps( - {"name": network_name, "type": "network"})} - response = self._get_api_call_ansible_handler( - method, resource_url, payload_data=json.dumps(params)) + self.module.exit_json(msg="You must specify the option 'network_name'") + params = {"query": json.dumps({"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler(method, resource_url, payload_data=json.dumps(params)) network_id = "" if response and isinstance(response, str): response = json.loads(response) if response and isinstance(response, list): response = response[0] - network_id = response['id'] + network_id = response["id"] return network_id # --------------------------------------------------------------------------- @@ -275,20 +262,18 @@ def reserve_next_available_ip(self, network_id=""): return the next available ip address from that given network """ method = "post" - resource_url = '' + resource_url = "" response = None - ip_info = '' + ip_info = "" if not network_id: - self.module.exit_json( - msg="You must specify the option 'network_id'.") + self.module.exit_json(msg="You must specify the option 'network_id'.") if network_id: resource_url = f"networks/{network_id}/reserve_ip" response = self._get_api_call_ansible_handler(method, resource_url) - if response and response.find( - "[") >= 0 and response.find("]") >= 0: + if response and response.find("[") >= 0 and response.find("]") >= 0: start_pos = response.find("{") end_pos = response.find("}") - ip_info = response[start_pos: (end_pos + 1)] + ip_info = response[start_pos : (end_pos + 1)] return ip_info # ------------------------- @@ -299,43 +284,37 @@ def release_ip(self, network_id="", ip_address=""): Reserve ip address via Infinity by using rest api """ method = "get" - resource_url = '' + resource_url = "" response = None if ip_address is None or network_id is None: - self.module.exit_json( - msg="You must specify those two options: 'network_id' and 'ip_address'.") + self.module.exit_json(msg="You must specify those two options: 'network_id' and 'ip_address'.") resource_url = f"networks/{network_id}/children" response = self._get_api_call_ansible_handler(method, resource_url) if not response: - self.module.exit_json( - msg=f"There is an error in release ip {ip_address} from network {network_id}.") + self.module.exit_json(msg=f"There is an error in release ip {ip_address} from network {network_id}.") ip_list = json.loads(response) ip_idlist = [] for ip_item in ip_list: - ip_id = ip_item['id'] + ip_id = ip_item["id"] ip_idlist.append(ip_id) - deleted_ip_id = '' + deleted_ip_id = "" for ip_id in ip_idlist: - ip_response = '' + ip_response = "" resource_url = f"ip_addresses/{ip_id}" - ip_response = self._get_api_call_ansible_handler( - method, - resource_url, - stat_codes=[200]) - if ip_response and json.loads( - ip_response)['address'] == str(ip_address): + ip_response = self._get_api_call_ansible_handler(method, resource_url, stat_codes=[200]) + if ip_response and json.loads(ip_response)["address"] == str(ip_address): deleted_ip_id = ip_id break if deleted_ip_id: - method = 'delete' + method = "delete" resource_url = f"ip_addresses/{deleted_ip_id}" - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) + response = self._get_api_call_ansible_handler(method, resource_url, stat_codes=[204]) else: self.module.exit_json( - msg=f" When release ip, could not find the ip address {ip_address} from the given network {network_id}' .") + msg=f" When release ip, could not find the ip address {ip_address} from the given network {network_id}' ." + ) return response @@ -346,26 +325,30 @@ def delete_network(self, network_id="", network_name=""): """ delete network from Infinity by using rest api """ - method = 'delete' - resource_url = '' + method = "delete" + resource_url = "" response = None if network_id is None and network_name is None: - self.module.exit_json( - msg="You must specify one of those options: 'network_id','network_name' .") + self.module.exit_json(msg="You must specify one of those options: 'network_id','network_name' .") if network_id is None and network_name: network_id = self.get_network_id(network_name=network_name) if network_id: resource_url = f"networks/{network_id}" - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) + response = self._get_api_call_ansible_handler(method, resource_url, stat_codes=[204]) return response # reserve_network() # --------------------------------------------------------------------------- - def reserve_network(self, network_id="", - reserved_network_name="", reserved_network_description="", - reserved_network_size="", reserved_network_family='4', - reserved_network_type='lan', reserved_network_address="",): + def reserve_network( + self, + network_id="", + reserved_network_name="", + reserved_network_description="", + reserved_network_size="", + reserved_network_family="4", + reserved_network_type="lan", + reserved_network_address="", + ): """ Reserves the first available network of specified size from a given supernet
network_name (required)
Name of the network
@@ -376,70 +359,71 @@ def reserve_network(self, network_id="",
network_type (required)
Type of network. One of 'supernet', 'lan', 'shared_lan'
""" - method = 'post' - resource_url = '' + method = "post" + resource_url = "" network_info = None if network_id is None or reserved_network_name is None or reserved_network_size is None: self.module.exit_json( - msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'") + msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'" + ) if network_id: resource_url = f"networks/{network_id}/reserve_network" if not reserved_network_family: - reserved_network_family = '4' + reserved_network_family = "4" if not reserved_network_type: - reserved_network_type = 'lan' + reserved_network_type = "lan" payload_data = { "network_name": reserved_network_name, - 'description': reserved_network_description, - 'network_size': reserved_network_size, - 'network_family': reserved_network_family, - 'network_type': reserved_network_type, - 'network_location': int(network_id)} + "description": reserved_network_description, + "network_size": reserved_network_size, + "network_family": reserved_network_family, + "network_type": reserved_network_type, + "network_location": int(network_id), + } if reserved_network_address: - payload_data.update({'network_address': reserved_network_address}) + payload_data.update({"network_address": reserved_network_address}) network_info = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[200, 201], payload_data=payload_data) + method, resource_url, stat_codes=[200, 201], payload_data=payload_data + ) return network_info # --------------------------------------------------------------------------- # release_network() # --------------------------------------------------------------------------- - def release_network( - self, - network_id="", - released_network_name="", - released_network_type='lan'): + def release_network(self, network_id="", released_network_name="", released_network_type="lan"): """ Release the network with name 'released_network_name' from the given supernet network_id """ - method = 'get' + method = "get" response = None if network_id is None or released_network_name is None: self.module.exit_json( - msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'") + msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'" + ) matched_network_id = "" resource_url = f"networks/{network_id}/children" response = self._get_api_call_ansible_handler(method, resource_url) if not response: self.module.exit_json( - msg=f" there is an error in releasing network {network_id} from network {released_network_name}.") + msg=f" there is an error in releasing network {network_id} from network {released_network_name}." + ) if response: response = json.loads(response) for child_net in response: - if child_net['network'] and child_net['network']['network_name'] == released_network_name: - matched_network_id = child_net['network']['network_id'] + if child_net["network"] and child_net["network"]["network_name"] == released_network_name: + matched_network_id = child_net["network"]["network_id"] break response = None if matched_network_id: - method = 'delete' + method = "delete" resource_url = f"networks/{matched_network_id}" - response = self._get_api_call_ansible_handler( - method, resource_url, stat_codes=[204]) + response = self._get_api_call_ansible_handler(method, resource_url, stat_codes=[204]) else: self.module.exit_json( - msg=f" When release network , could not find the network {released_network_name} from the given superent {network_id} ") + msg=f" When release network , could not find the network {released_network_name} from the given superent {network_id} " + ) return response @@ -447,67 +431,76 @@ def release_network( # add_network() # --------------------------------------------------------------------------- def add_network( - self, network_name="", network_address="", - network_size="", network_family='4', - network_type='lan', network_location=-1): + self, + network_name="", + network_address="", + network_size="", + network_family="4", + network_type="lan", + network_location=-1, + ): """ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ] """ - method = 'post' - resource_url = 'networks' + method = "post" + resource_url = "networks" response = None if network_name is None or network_address is None or network_size is None: self.module.exit_json( - msg="You must specify those options 'network_name', 'network_address' and 'network_size'") + msg="You must specify those options 'network_name', 'network_address' and 'network_size'" + ) if not network_family: - network_family = '4' + network_family = "4" if not network_type: - network_type = 'lan' + network_type = "lan" if not network_location: network_location = -1 payload_data = { "network_name": network_name, - 'network_address': network_address, - 'network_size': network_size, - 'network_family': network_family, - 'network_type': network_type, - 'network_location': network_location} + "network_address": network_address, + "network_size": network_size, + "network_family": network_family, + "network_type": network_type, + "network_location": network_location, + } response = self._get_api_call_ansible_handler( - method='post', resource_url=resource_url, - stat_codes=[200], payload_data=payload_data) + method="post", resource_url=resource_url, stat_codes=[200], payload_data=payload_data + ) return response def main(): module = AnsibleModule( argument_spec=dict( - server_ip=dict(type='str', required=True), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True), - network_id=dict(type='str'), - ip_address=dict(type='str'), - network_name=dict(type='str'), - network_location=dict(type='int', default=-1), - network_family=dict(type='str', default='4', choices=['4', '6', 'dual']), - network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']), - network_address=dict(type='str'), - network_size=dict(type='str'), - action=dict(type='str', required=True, choices=[ - 'add_network', - 'delete_network', - 'get_network', - 'get_network_id', - 'release_ip', - 'release_network', - 'reserve_network', - 'reserve_next_available_ip', - ],), - ), - required_together=( - ['username', 'password'], + server_ip=dict(type="str", required=True), + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + network_id=dict(type="str"), + ip_address=dict(type="str"), + network_name=dict(type="str"), + network_location=dict(type="int", default=-1), + network_family=dict(type="str", default="4", choices=["4", "6", "dual"]), + network_type=dict(type="str", default="lan", choices=["lan", "shared_lan", "supernet"]), + network_address=dict(type="str"), + network_size=dict(type="str"), + action=dict( + type="str", + required=True, + choices=[ + "add_network", + "delete_network", + "get_network", + "get_network_id", + "release_ip", + "release_network", + "reserve_network", + "reserve_next_available_ip", + ], + ), ), + required_together=(["username", "password"],), ) server_ip = module.params["server_ip"] username = module.params["username"] @@ -522,31 +515,27 @@ def main(): network_size = module.params["network_size"] network_location = module.params["network_location"] my_infinity = Infinity(module, server_ip, username, password) - result = '' + result = "" if action == "reserve_next_available_ip": if network_id: result = my_infinity.reserve_next_available_ip(network_id) if not result: - result = 'There is an error in calling method of reserve_next_available_ip' + result = "There is an error in calling method of reserve_next_available_ip" module.exit_json(changed=False, meta=result) module.exit_json(changed=True, meta=result) elif action == "release_ip": if network_id and released_ip: - result = my_infinity.release_ip( - network_id=network_id, ip_address=released_ip) + result = my_infinity.release_ip(network_id=network_id, ip_address=released_ip) module.exit_json(changed=True, meta=result) elif action == "delete_network": - result = my_infinity.delete_network( - network_id=network_id, network_name=network_name) + result = my_infinity.delete_network(network_id=network_id, network_name=network_name) module.exit_json(changed=True, meta=result) elif action == "get_network_id": - result = my_infinity.get_network_id( - network_name=network_name, network_type=network_type) + result = my_infinity.get_network_id(network_name=network_name, network_type=network_type) module.exit_json(changed=True, meta=result) elif action == "get_network": - result = my_infinity.get_network( - network_id=network_id, network_name=network_name) + result = my_infinity.get_network(network_id=network_id, network_name=network_name) module.exit_json(changed=True, meta=result) elif action == "reserve_network": result = my_infinity.reserve_network( @@ -555,13 +544,13 @@ def main(): reserved_network_size=network_size, reserved_network_family=network_family, reserved_network_type=network_type, - reserved_network_address=network_address) + reserved_network_address=network_address, + ) module.exit_json(changed=True, meta=result) elif action == "release_network": result = my_infinity.release_network( - network_id=network_id, - released_network_name=network_name, - released_network_type=network_type) + network_id=network_id, released_network_name=network_name, released_network_type=network_type + ) module.exit_json(changed=True, meta=result) elif action == "add_network": @@ -571,10 +560,11 @@ def main(): network_address=network_address, network_size=network_size, network_family=network_family, - network_type=network_type) + network_type=network_type, + ) module.exit_json(changed=True, meta=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/influxdb_database.py b/plugins/modules/influxdb_database.py index 600599ab0cf..cd0908984c2 100644 --- a/plugins/modules/influxdb_database.py +++ b/plugins/modules/influxdb_database.py @@ -81,7 +81,7 @@ def find_database(module, client, database_name): try: databases = client.get_list_database() for db in databases: - if db['name'] == database_name: + if db["name"] == database_name: database = db break except requests.exceptions.ConnectionError as e: @@ -112,33 +112,30 @@ def drop_database(module, client, database_name): def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( - database_name=dict(required=True, type='str'), - state=dict(default='present', type='str', choices=['present', 'absent']) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + database_name=dict(required=True, type="str"), + state=dict(default="present", type="str", choices=["present", "absent"]), ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params['state'] + state = module.params["state"] influxdb = InfluxDb(module) client = influxdb.connect_to_influxdb() database_name = influxdb.database_name database = find_database(module, client, database_name) - if state == 'present': + if state == "present": if database: module.exit_json(changed=False) else: create_database(module, client, database_name) - if state == 'absent': + if state == "absent": if database: drop_database(module, client, database_name) else: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/influxdb_query.py b/plugins/modules/influxdb_query.py index 1707d401f26..6ea1f6d94a2 100644 --- a/plugins/modules/influxdb_query.py +++ b/plugins/modules/influxdb_query.py @@ -71,7 +71,6 @@ class AnsibleInfluxDBRead(InfluxDb): - def read_by_query(self, query): client = self.connect_to_influxdb() try: @@ -85,19 +84,16 @@ def read_by_query(self, query): def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( - query=dict(type='str', required=True), - database_name=dict(required=True, type='str'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + query=dict(type="str", required=True), + database_name=dict(required=True, type="str"), ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) influx = AnsibleInfluxDBRead(module) - query = module.params.get('query') + query = module.params.get("query") results = influx.read_by_query(query) module.exit_json(changed=True, query_results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py index b49aa1c21ff..78062c99bc6 100644 --- a/plugins/modules/influxdb_retention_policy.py +++ b/plugins/modules/influxdb_retention_policy.py @@ -146,25 +146,25 @@ from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb -VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') +VALID_DURATION_REGEX = re.compile(r"^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$") -DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') -EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') +DURATION_REGEX = re.compile(r"(\d+)(ns|u|µ|ms|s|m|h|d|w)") +EXTENDED_DURATION_REGEX = re.compile(r"(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))") DURATION_UNIT_NANOSECS = { - 'ns': 1, - 'u': 1000, - 'µ': 1000, - 'ms': 1000 * 1000, - 's': 1000 * 1000 * 1000, - 'm': 1000 * 1000 * 1000 * 60, - 'h': 1000 * 1000 * 1000 * 60 * 60, - 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, - 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, + "ns": 1, + "u": 1000, + "µ": 1000, + "ms": 1000 * 1000, + "s": 1000 * 1000 * 1000, + "m": 1000 * 1000 * 1000 * 60, + "h": 1000 * 1000 * 1000 * 60 * 60, + "d": 1000 * 1000 * 1000 * 60 * 60 * 24, + "w": 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, } -MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] -MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] +MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS["h"] +MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS["h"] def check_duration_literal(value): @@ -188,15 +188,15 @@ def parse_duration_literal(value, extended=False): def find_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - hostname = module.params['hostname'] + database_name = module.params["database_name"] + policy_name = module.params["policy_name"] + hostname = module.params["hostname"] retention_policy = None try: retention_policies = client.get_list_retention_policies(database=database_name) for policy in retention_policies: - if policy['name'] == policy_name: + if policy["name"] == policy_name: retention_policy = policy break except requests.exceptions.ConnectionError as e: @@ -204,18 +204,20 @@ def find_retention_policy(module, client): if retention_policy is not None: retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) - retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) + retention_policy["shardGroupDuration"] = parse_duration_literal( + retention_policy["shardGroupDuration"], extended=True + ) return retention_policy def create_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] + database_name = module.params["database_name"] + policy_name = module.params["policy_name"] + duration = module.params["duration"] + replication = module.params["replication"] + default = module.params["default"] + shard_group_duration = module.params["shard_group_duration"] if not check_duration_literal(duration): module.fail_json(msg="Failed to parse value of duration") @@ -235,8 +237,9 @@ def create_retention_policy(module, client): if not module.check_mode: try: if shard_group_duration: - client.create_retention_policy(policy_name, duration, replication, database_name, default, - shard_group_duration) + client.create_retention_policy( + policy_name, duration, replication, database_name, default, shard_group_duration + ) else: client.create_retention_policy(policy_name, duration, replication, database_name, default) except exceptions.InfluxDBClientError as e: @@ -245,12 +248,12 @@ def create_retention_policy(module, client): def alter_retention_policy(module, client, retention_policy): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] - duration = module.params['duration'] - replication = module.params['replication'] - default = module.params['default'] - shard_group_duration = module.params['shard_group_duration'] + database_name = module.params["database_name"] + policy_name = module.params["policy_name"] + duration = module.params["duration"] + replication = module.params["replication"] + default = module.params["default"] + shard_group_duration = module.params["shard_group_duration"] changed = False @@ -271,14 +274,17 @@ def alter_retention_policy(module, client, retention_policy): if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: module.fail_json(msg="shard_group_duration value must be finite and at least 1h") - if (retention_policy['duration'] != influxdb_duration_format or - retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or - retention_policy['replicaN'] != int(replication) or - retention_policy['default'] != default): + if ( + retention_policy["duration"] != influxdb_duration_format + or retention_policy["shardGroupDuration"] != influxdb_shard_group_duration_format + or retention_policy["replicaN"] != int(replication) + or retention_policy["default"] != default + ): if not module.check_mode: try: - client.alter_retention_policy(policy_name, database_name, duration, replication, default, - shard_group_duration) + client.alter_retention_policy( + policy_name, database_name, duration, replication, default, shard_group_duration + ) except exceptions.InfluxDBClientError as e: module.fail_json(msg=e.content) changed = True @@ -286,8 +292,8 @@ def alter_retention_policy(module, client, retention_policy): def drop_retention_policy(module, client): - database_name = module.params['database_name'] - policy_name = module.params['policy_name'] + database_name = module.params["database_name"] + policy_name = module.params["policy_name"] if not module.check_mode: try: @@ -300,41 +306,39 @@ def drop_retention_policy(module, client): def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - database_name=dict(required=True, type='str'), - policy_name=dict(required=True, type='str'), - duration=dict(type='str'), - replication=dict(type='int'), - default=dict(default=False, type='bool'), - shard_group_duration=dict(type='str'), + state=dict(default="present", type="str", choices=["present", "absent"]), + database_name=dict(required=True, type="str"), + policy_name=dict(required=True, type="str"), + duration=dict(type="str"), + replication=dict(type="int"), + default=dict(default=False, type="bool"), + shard_group_duration=dict(type="str"), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=( - ('state', 'present', ['duration', 'replication']), - ), + required_if=(("state", "present", ["duration", "replication"]),), ) - state = module.params['state'] + state = module.params["state"] influxdb = InfluxDb(module) client = influxdb.connect_to_influxdb() retention_policy = find_retention_policy(module, client) - if state == 'present': + if state == "present": if retention_policy: alter_retention_policy(module, client, retention_policy) else: create_retention_policy(module, client) - if state == 'absent': + if state == "absent": if retention_policy: drop_retention_policy(module, client) else: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py index b6351a0c27c..05013d218b6 100644 --- a/plugins/modules/influxdb_user.py +++ b/plugins/modules/influxdb_user.py @@ -115,7 +115,7 @@ def find_user(module, client, user_name): try: users = client.get_list_users() for user in users: - if user['user'] == user_name: + if user["user"] == user_name: user_result = user break except ConnectionError as e: @@ -134,7 +134,7 @@ def check_user_password(module, client, user_name, user_password): module.fail_json(msg=to_native(e)) finally: # restore previous user - client.switch_user(module.params['username'], module.params['password']) + client.switch_user(module.params["username"], module.params["password"]) return True @@ -171,34 +171,30 @@ def set_user_grants(module, client, user_name, grants): try: current_grants = client.get_list_privileges(user_name) except influx.exceptions.InfluxDBClientError as e: - if not module.check_mode or 'user not found' not in e.content: + if not module.check_mode or "user not found" not in e.content: module.fail_json(msg=e.content) try: parsed_grants = [] # Fix privileges wording for i, v in enumerate(current_grants): - if v['privilege'] != 'NO PRIVILEGES': - if v['privilege'] == 'ALL PRIVILEGES': - v['privilege'] = 'ALL' + if v["privilege"] != "NO PRIVILEGES": + if v["privilege"] == "ALL PRIVILEGES": + v["privilege"] = "ALL" parsed_grants.append(v) # check if the current grants are included in the desired ones for current_grant in parsed_grants: if current_grant not in grants: if not module.check_mode: - client.revoke_privilege(current_grant['privilege'], - current_grant['database'], - user_name) + client.revoke_privilege(current_grant["privilege"], current_grant["database"], user_name) changed = True # check if the desired grants are included in the current ones for grant in grants: if grant not in parsed_grants: if not module.check_mode: - client.grant_privilege(grant['privilege'], - grant['database'], - user_name) + client.grant_privilege(grant["privilege"], grant["database"], user_name) changed = True except influx.exceptions.InfluxDBClientError as e: @@ -213,22 +209,19 @@ def set_user_grants(module, client, user_name, grants): def main(): argument_spec = influx.InfluxDb.influxdb_argument_spec() argument_spec.update( - state=dict(default='present', type='str', choices=['present', 'absent']), - user_name=dict(required=True, type='str'), - user_password=dict(type='str', no_log=True), - admin=dict(default='False', type='bool'), - grants=dict(type='list', elements='dict'), - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + state=dict(default="present", type="str", choices=["present", "absent"]), + user_name=dict(required=True, type="str"), + user_password=dict(type="str", no_log=True), + admin=dict(default="False", type="bool"), + grants=dict(type="list", elements="dict"), ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - state = module.params['state'] - user_name = module.params['user_name'] - user_password = module.params['user_password'] - admin = module.params['admin'] - grants = module.params['grants'] + state = module.params["state"] + user_name = module.params["user_name"] + user_password = module.params["user_password"] + admin = module.params["admin"] + grants = module.params["grants"] influxdb = influx.InfluxDb(module) client = influxdb.connect_to_influxdb() @@ -251,18 +244,18 @@ def main(): changed = False - if state == 'present': + if state == "present": if user: if not check_user_password(module, client, user_name, user_password) and user_password is not None: set_user_password(module, client, user_name, user_password) changed = True try: - if admin and not user['admin']: + if admin and not user["admin"]: if not module.check_mode: client.grant_admin_privileges(user_name) changed = True - elif not admin and user['admin']: + elif not admin and user["admin"]: if not module.check_mode: client.revoke_admin_privileges(user_name) changed = True @@ -270,7 +263,7 @@ def main(): module.fail_json(msg=to_native(e)) else: - user_password = user_password or '' + user_password = user_password or "" create_user(module, client, user_name, user_password, admin) changed = True @@ -280,12 +273,12 @@ def main(): module.exit_json(changed=changed) - if state == 'absent': + if state == "absent": if user: drop_user(module, client, user_name) else: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/influxdb_write.py b/plugins/modules/influxdb_write.py index d0348aca019..9d1d3ab9d83 100644 --- a/plugins/modules/influxdb_write.py +++ b/plugins/modules/influxdb_write.py @@ -68,7 +68,6 @@ class AnsibleInfluxDBWrite(InfluxDb): - def write_data_point(self, data_points): client = self.connect_to_influxdb() @@ -81,18 +80,18 @@ def write_data_point(self, data_points): def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( - data_points=dict(required=True, type='list', elements='dict'), - database_name=dict(required=True, type='str'), + data_points=dict(required=True, type="list", elements="dict"), + database_name=dict(required=True, type="str"), ) module = AnsibleModule( argument_spec=argument_spec, ) influx = AnsibleInfluxDBWrite(module) - data_points = module.params.get('data_points') + data_points = module.params.get("data_points") influx.write_data_point(data_points) module.exit_json(changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index 3170071b698..bb47cd7f28c 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -266,12 +266,12 @@ def match_opt(option, line): option = re.escape(option) - return re.match(f'( |\t)*([#;]?)( |\t)*({option})( |\t)*(=|$)( |\t)*(.*)', line) + return re.match(f"( |\t)*([#;]?)( |\t)*({option})( |\t)*(=|$)( |\t)*(.*)", line) def match_active_opt(option, line): option = re.escape(option) - return re.match(f'()()( |\t)*({option})( |\t)*(=|$)( |\t)*(.*)', line) + return re.match(f"()()( |\t)*({option})( |\t)*(=|$)( |\t)*(.*)", line) def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg): @@ -287,7 +287,7 @@ def update_section_line(option, changed, section_lines, index, changed_lines, ig section_lines[index] = newline changed = changed or option_changed if option_changed: - msg = 'option changed' + msg = "option changed" changed_lines[index] = 1 return (changed, msg) @@ -304,10 +304,23 @@ def check_section_has_values(section_has_values, section_lines): return True -def do_ini(module, filename, section=None, section_has_values=None, option=None, values=None, - state='present', exclusive=True, backup=False, no_extra_spaces=False, - ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False): - +def do_ini( + module, + filename, + section=None, + section_has_values=None, + option=None, + values=None, + state="present", + exclusive=True, + backup=False, + no_extra_spaces=False, + ignore_spaces=False, + create=True, + allow_no_value=False, + modify_inactive_option=True, + follow=False, +): if section is not None: section = to_text(section) if option is not None: @@ -319,10 +332,10 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, values = values_unique diff = dict( - before='', - after='', - before_header=f'{filename} (content)', - after_header=f'{filename} (content)', + before="", + after="", + before_header=f"{filename} (content)", + after_header=f"{filename} (content)", ) if follow and os.path.islink(filename): @@ -332,27 +345,27 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, if not os.path.exists(target_filename): if not create: - module.fail_json(rc=257, msg=f'Destination {target_filename} does not exist!') + module.fail_json(rc=257, msg=f"Destination {target_filename} does not exist!") destpath = os.path.dirname(target_filename) if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) ini_lines = [] else: - with io.open(target_filename, 'r', encoding="utf-8-sig") as ini_file: + with io.open(target_filename, "r", encoding="utf-8-sig") as ini_file: ini_lines = [to_text(line) for line in ini_file.readlines()] if module._diff: - diff['before'] = ''.join(ini_lines) + diff["before"] = "".join(ini_lines) changed = False # ini file could be empty if not ini_lines: - ini_lines.append('\n') + ini_lines.append("\n") # last line of file may not contain a trailing newline - if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': - ini_lines[-1] += '\n' + if ini_lines[-1] == "" or ini_lines[-1][-1] != "\n": + ini_lines[-1] += "\n" changed = True # append fake section lines to simplify the logic @@ -362,10 +375,10 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" # Insert it at the beginning - ini_lines.insert(0, f'[{fake_section_name}]') + ini_lines.insert(0, f"[{fake_section_name}]") # At bottom: - ini_lines.append('[') + ini_lines.append("[") # If no section is defined, fake section is used if not section: @@ -373,27 +386,25 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, within_section = not section section_start = section_end = 0 - msg = 'OK' + msg = "OK" if no_extra_spaces: - assignment_format = '%s=%s\n' + assignment_format = "%s=%s\n" else: - assignment_format = '%s = %s\n' + assignment_format = "%s = %s\n" option_no_value_present = False - non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + non_blank_non_comment_pattern = re.compile(to_text(r"^[ \t]*([#;].*)?$")) before = after = [] section_lines = [] - section_pattern = re.compile(to_text(rf'^\[\s*{re.escape(section.strip())}\s*]')) + section_pattern = re.compile(to_text(rf"^\[\s*{re.escape(section.strip())}\s*]")) for index, line in enumerate(ini_lines): # end of section: - if within_section and line.startswith('['): - if check_section_has_values( - section_has_values, ini_lines[section_start:index] - ): + if within_section and line.startswith("["): + if check_section_has_values(section_has_values, ini_lines[section_start:index]): section_end = index break else: @@ -408,7 +419,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, before = ini_lines[0:section_start] section_lines = ini_lines[section_start:section_end] - after = ini_lines[section_end:len(ini_lines)] + after = ini_lines[section_end : len(ini_lines)] # Keep track of changed section_lines changed_lines = [0] * len(section_lines) @@ -426,7 +437,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, # 3. delete remaining lines where we have a matching option # 4. insert missing option line(s) at the end of the section - if state == 'present' and option: + if state == "present" and option: for index, line in enumerate(section_lines): if match_function(option, line): match = match_function(option, line) @@ -434,27 +445,33 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, matched_value = match.group(8) if not matched_value and allow_no_value: # replace existing option with no value line(s) - newline = f'{option}\n' + newline = f"{option}\n" option_no_value_present = True else: # replace existing option=value line(s) newline = assignment_format % (option, matched_value) - (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + (changed, msg) = update_section_line( + option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg + ) values.remove(matched_value) elif not values and allow_no_value: # replace existing option with no value line(s) - newline = f'{option}\n' - (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + newline = f"{option}\n" + (changed, msg) = update_section_line( + option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg + ) option_no_value_present = True break - if state == 'present' and exclusive and not allow_no_value: + if state == "present" and exclusive and not allow_no_value: # override option with no value to option with value if not allow_no_value if len(values) > 0: for index, line in enumerate(section_lines): if not changed_lines[index] and match_function(option, line): newline = assignment_format % (option, values.pop(0)) - (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + (changed, msg) = update_section_line( + option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg + ) if len(values) == 0: break # remove all remaining option occurrences from the rest of the section @@ -463,9 +480,9 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, del section_lines[index] del changed_lines[index] changed = True - msg = 'option changed' + msg = "option changed" - if state == 'present': + if state == "present": # insert missing option line(s) at the end of the section for index in range(len(section_lines), 0, -1): # search backwards for previous non-blank or non-comment line @@ -478,41 +495,45 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, if element is not None: # insert option=value line section_lines.insert(index, assignment_format % (option, element)) - msg = 'option added' + msg = "option added" changed = True elif element is None and allow_no_value: # insert option with no value line - section_lines.insert(index, f'{option}\n') - msg = 'option added' + section_lines.insert(index, f"{option}\n") + msg = "option added" changed = True elif option and not values and allow_no_value and not option_no_value_present: # insert option with no value line(s) - section_lines.insert(index, f'{option}\n') - msg = 'option added' + section_lines.insert(index, f"{option}\n") + msg = "option added" changed = True break - if state == 'absent': + if state == "absent": if option: if exclusive: # delete all option line(s) with given option and ignore value new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] if section_lines != new_section_lines: changed = True - msg = 'option changed' + msg = "option changed" section_lines = new_section_lines elif not exclusive and len(values) > 0: # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)] + new_section_lines = [ + i + for i in section_lines + if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values) + ] if section_lines != new_section_lines: changed = True - msg = 'option changed' + msg = "option changed" section_lines = new_section_lines else: # drop the entire section if section_lines: section_lines = [] - msg = 'section removed' + msg = "section removed" changed = True # reassemble the ini_lines after manipulation @@ -522,32 +543,32 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, del ini_lines[0] del ini_lines[-1:] - if not within_section and state == 'present': - ini_lines.append(f'[{section}]\n') - msg = 'section and option added' + if not within_section and state == "present": + ini_lines.append(f"[{section}]\n") + msg = "section and option added" if section_has_values: for condition in section_has_values: - if condition['option'] != option: - if len(condition['values']) > 0: - for value in condition['values']: - ini_lines.append(assignment_format % (condition['option'], value)) + if condition["option"] != option: + if len(condition["values"]) > 0: + for value in condition["values"]: + ini_lines.append(assignment_format % (condition["option"], value)) elif allow_no_value: ini_lines.append(f"{condition['option']}\n") elif not exclusive: - for value in condition['values']: + for value in condition["values"]: if value not in values: values.append(value) if option and values: for value in values: ini_lines.append(assignment_format % (option, value)) elif option and not values and allow_no_value: - ini_lines.append(f'{option}\n') + ini_lines.append(f"{option}\n") else: - msg = 'only section added' + msg = "only section added" changed = True if module._diff: - diff['after'] = ''.join(ini_lines) + diff["after"] = "".join(ini_lines) backup_file = None if changed and not module.check_mode: @@ -557,7 +578,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, encoded_ini_lines = [to_bytes(line) for line in ini_lines] try: tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'wb') + f = os.fdopen(tmpfd, "wb") f.writelines(encoded_ini_lines) f.close() except IOError: @@ -566,59 +587,64 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None, try: module.atomic_move(tmpfile, os.path.abspath(target_filename)) except IOError: - module.ansible.fail_json(msg=f'Unable to move temporary file {tmpfile} to {target_filename}, IOError', traceback=traceback.format_exc()) + module.ansible.fail_json( + msg=f"Unable to move temporary file {tmpfile} to {target_filename}, IOError", + traceback=traceback.format_exc(), + ) return (changed, backup_file, diff, msg) def main(): - module = AnsibleModule( argument_spec=dict( - path=dict(type='path', required=True, aliases=['dest']), - section=dict(type='str'), - section_has_values=dict(type='list', elements='dict', options=dict( - option=dict(type='str', required=True), - value=dict(type='str'), - values=dict(type='list', elements='str') - ), mutually_exclusive=[['value', 'values']]), - option=dict(type='str'), - value=dict(type='str'), - values=dict(type='list', elements='str'), - backup=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - exclusive=dict(type='bool', default=True), - no_extra_spaces=dict(type='bool', default=False), - ignore_spaces=dict(type='bool', default=False), - allow_no_value=dict(type='bool', default=False), - modify_inactive_option=dict(type='bool', default=True), - create=dict(type='bool', default=True), - follow=dict(type='bool', default=False) + path=dict(type="path", required=True, aliases=["dest"]), + section=dict(type="str"), + section_has_values=dict( + type="list", + elements="dict", + options=dict( + option=dict(type="str", required=True), + value=dict(type="str"), + values=dict(type="list", elements="str"), + ), + mutually_exclusive=[["value", "values"]], + ), + option=dict(type="str"), + value=dict(type="str"), + values=dict(type="list", elements="str"), + backup=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), + exclusive=dict(type="bool", default=True), + no_extra_spaces=dict(type="bool", default=False), + ignore_spaces=dict(type="bool", default=False), + allow_no_value=dict(type="bool", default=False), + modify_inactive_option=dict(type="bool", default=True), + create=dict(type="bool", default=True), + follow=dict(type="bool", default=False), ), - mutually_exclusive=[ - ['value', 'values'] - ], + mutually_exclusive=[["value", "values"]], add_file_common_args=True, supports_check_mode=True, ) - path = module.params['path'] - section = module.params['section'] - section_has_values = module.params['section_has_values'] - option = module.params['option'] - value = module.params['value'] - values = module.params['values'] - state = module.params['state'] - exclusive = module.params['exclusive'] - backup = module.params['backup'] - no_extra_spaces = module.params['no_extra_spaces'] - ignore_spaces = module.params['ignore_spaces'] - allow_no_value = module.params['allow_no_value'] - modify_inactive_option = module.params['modify_inactive_option'] - create = module.params['create'] - follow = module.params['follow'] - - if state == 'present' and not allow_no_value and value is None and not values: + path = module.params["path"] + section = module.params["section"] + section_has_values = module.params["section_has_values"] + option = module.params["option"] + value = module.params["value"] + values = module.params["values"] + state = module.params["state"] + exclusive = module.params["exclusive"] + backup = module.params["backup"] + no_extra_spaces = module.params["no_extra_spaces"] + ignore_spaces = module.params["ignore_spaces"] + allow_no_value = module.params["allow_no_value"] + modify_inactive_option = module.params["modify_inactive_option"] + create = module.params["create"] + follow = module.params["follow"] + + if state == "present" and not allow_no_value and value is None and not values: module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") if value is not None: @@ -628,15 +654,29 @@ def main(): if section_has_values: for condition in section_has_values: - if condition['value'] is not None: - condition['values'] = [condition['value']] - elif condition['values'] is None: - condition['values'] = [] -# raise Exception("section_has_values: {}".format(section_has_values)) + if condition["value"] is not None: + condition["values"] = [condition["value"]] + elif condition["values"] is None: + condition["values"] = [] + # raise Exception("section_has_values: {}".format(section_has_values)) (changed, backup_file, diff, msg) = do_ini( - module, path, section, section_has_values, option, values, state, exclusive, backup, - no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow) + module, + path, + section, + section_has_values, + option, + values, + state, + exclusive, + backup, + no_extra_spaces, + ignore_spaces, + create, + allow_no_value, + modify_inactive_option, + follow, + ) if not module.check_mode and os.path.exists(path): file_args = module.load_file_common_arguments(module.params) @@ -649,11 +689,11 @@ def main(): path=path, ) if backup_file is not None: - results['backup_file'] = backup_file + results["backup_file"] = backup_file # Mission complete module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py index 27f723f4b9f..75e7fb4b866 100644 --- a/plugins/modules/installp.py +++ b/plugins/modules/installp.py @@ -102,12 +102,12 @@ def _check_new_pkg(module, package, repository_path): """ if os.path.isdir(repository_path): - installp_cmd = module.get_bin_path('installp', True) + installp_cmd = module.get_bin_path("installp", True) rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) - if package == 'all': + if package == "all": pkg_info = "All packages on dir" return True, pkg_info @@ -138,12 +138,12 @@ def _check_installed_pkg(module, package, repository_path): :return: Bool, package data. """ - lslpp_cmd = module.get_bin_path('lslpp', True) + lslpp_cmd = module.get_bin_path("lslpp", True) rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", f"{package}*"]) if rc == 1: - package_state = ' '.join(err.split()[-2:]) - if package_state == 'not installed.': + package_state = " ".join(err.split()[-2:]) + if package_state == "not installed.": return False, None else: module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) @@ -154,7 +154,7 @@ def _check_installed_pkg(module, package, repository_path): pkg_data = {} full_pkg_data = lslpp_result.splitlines() for line in full_pkg_data: - pkg_name, fileset, level = line.split(':')[0:3] + pkg_name, fileset, level = line.split(":")[0:3] pkg_data[pkg_name] = fileset, level return True, pkg_data @@ -199,7 +199,7 @@ def install(module, installp_cmd, packages, repository_path, accept_license): already_installed_pkgs = {} accept_license_param = { - True: ['-Y'], + True: ["-Y"], False: [], } @@ -228,7 +228,10 @@ def install(module, installp_cmd, packages, repository_path, accept_license): else: if not module.check_mode: rc, out, err = module.run_command( - [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package]) + [installp_cmd, "-a"] + + accept_license_param[accept_license] + + ["-X", "-d", repository_path, package] + ) if rc != 0: module.fail_json(msg="Failed to run installp", rc=rc, err=err) installed_pkgs.append(package) @@ -239,17 +242,17 @@ def install(module, installp_cmd, packages, repository_path, accept_license): if len(installed_pkgs) > 0: installed_msg = f" Installed: {' '.join(installed_pkgs)}." else: - installed_msg = '' + installed_msg = "" if len(not_found_pkgs) > 0: not_found_msg = f" Not found: {' '.join(not_found_pkgs)}." else: - not_found_msg = '' + not_found_msg = "" if len(already_installed_pkgs) > 0: already_installed_msg = f" Already installed: {already_installed_pkgs}." else: - already_installed_msg = '' + already_installed_msg = "" if len(installed_pkgs) > 0: changed = True @@ -264,28 +267,28 @@ def install(module, installp_cmd, packages, repository_path, accept_license): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True, aliases=['pkg']), - repository_path=dict(type='path'), - accept_license=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type="list", elements="str", required=True, aliases=["pkg"]), + repository_path=dict(type="path"), + accept_license=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), ), supports_check_mode=True, ) - name = module.params['name'] - repository_path = module.params['repository_path'] - accept_license = module.params['accept_license'] - state = module.params['state'] + name = module.params["name"] + repository_path = module.params["repository_path"] + accept_license = module.params["accept_license"] + state = module.params["state"] - installp_cmd = module.get_bin_path('installp', True) + installp_cmd = module.get_bin_path("installp", True) - if state == 'present': + if state == "present": if repository_path is None: module.fail_json(msg="repository_path is required to install package") changed, msg = install(module, installp_cmd, name, repository_path, accept_license) - elif state == 'absent': + elif state == "absent": changed, msg = remove(module, installp_cmd, name) else: @@ -294,5 +297,5 @@ def main(): module.exit_json(changed=changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py index facc5b88215..2f3813c05af 100644 --- a/plugins/modules/interfaces_file.py +++ b/plugins/modules/interfaces_file.py @@ -157,25 +157,32 @@ def lineDict(line): - return {'line': line, 'line_type': 'unknown'} + return {"line": line, "line_type": "unknown"} def optionDict(line, iface, option, value, address_family): - return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} + return { + "line": line, + "iface": iface, + "option": option, + "value": value, + "line_type": "option", + "address_family": address_family, + } def getValueFromLine(s): - spaceRe = re.compile(r'\s+') + spaceRe = re.compile(r"\s+") m = list(spaceRe.finditer(s))[-1] valueEnd = m.start() option = s.split()[0] optionStart = s.find(option) optionLen = len(option) - return s[optionLen + optionStart:].strip() + return s[optionLen + optionStart :].strip() def read_interfaces_file(module, filename): - with open(filename, 'r') as f: + with open(filename, "r") as f: return read_interfaces_lines(module, f) @@ -207,25 +214,28 @@ def read_interfaces_lines(module, line_strings): lines.append(lineDict(line)) currently_processing = "NONE" elif words[0] == "iface": - currif = { - "pre-up": [], - "up": [], - "down": [], - "post-up": [] - } + currif = {"pre-up": [], "up": [], "down": [], "post-up": []} iface_name = words[1] try: - currif['address_family'] = words[2] + currif["address_family"] = words[2] except IndexError: - currif['address_family'] = None - address_family = currif['address_family'] + currif["address_family"] = None + address_family = currif["address_family"] try: - currif['method'] = words[3] + currif["method"] = words[3] except IndexError: - currif['method'] = None + currif["method"] = None ifaces[iface_name] = currif - lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family}) + lines.append( + { + "line": line, + "iface": iface_name, + "line_type": "iface", + "params": currif, + "address_family": address_family, + } + ) currently_processing = "IFACE" elif words[0] == "auto": lines.append(lineDict(line)) @@ -259,19 +269,19 @@ def read_interfaces_lines(module, line_strings): def get_interface_options(iface_lines): - return [i for i in iface_lines if i['line_type'] == 'option'] + return [i for i in iface_lines if i["line_type"] == "option"] def get_target_options(iface_options, option): - return [i for i in iface_options if i['option'] == option] + return [i for i in iface_options if i["option"] == option] def update_existing_option_line(target_option, value): - old_line = target_option['line'] - old_value = target_option['value'] + old_line = target_option["line"] + old_value = target_option["value"] prefix_start = old_line.find(target_option["option"]) optionLen = len(target_option["option"]) - old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:]) + old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen :]) start = old_value_position.start() + prefix_start + optionLen end = old_value_position.end() + prefix_start + optionLen line = old_line[:start] + value + old_line[end:] @@ -284,8 +294,9 @@ def set_interface_option(module, lines, iface, option, raw_value, state, address iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface] if address_family is not None: - iface_lines = [item for item in iface_lines - if "address_family" in item and item["address_family"] == address_family] + iface_lines = [ + item for item in iface_lines if "address_family" in item and item["address_family"] == address_family + ] if len(iface_lines) < 1: # interface not found @@ -300,24 +311,28 @@ def set_interface_option(module, lines, iface, option, raw_value, state, address changed = True # add new option last_line_dict = iface_lines[-1] - changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family) + changed, lines = addOptionAfterLine( + option, value, iface, lines, last_line_dict, iface_options, address_family + ) else: if option in ["pre-up", "up", "down", "post-up"]: - if len([i for i in target_options if i['value'] == value]) < 1: - changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family) + if len([i for i in target_options if i["value"] == value]) < 1: + changed, lines = addOptionAfterLine( + option, value, iface, lines, target_options[-1], iface_options, address_family + ) else: # if more than one option found edit the last one - if target_options[-1]['value'] != value: + if target_options[-1]["value"] != value: changed = True target_option = target_options[-1] line = update_existing_option_line(target_option, value) - address_family = target_option['address_family'] + address_family = target_option["address_family"] index = len(lines) - lines[::-1].index(target_option) - 1 lines[index] = optionDict(line, iface, option, value, address_family) elif state == "absent": if len(target_options) >= 1: if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None": - for target_option in [ito for ito in target_options if ito['value'] == value]: + for target_option in [ito for ito in target_options if ito["value"] == value]: changed = True lines = [ln for ln in lines if ln != target_option] else: @@ -332,18 +347,22 @@ def set_interface_option(module, lines, iface, option, raw_value, state, address def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family): # Changing method of interface is not an addition - if option == 'method': + if option == "method": changed = False for ln in lines: - if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''): - if address_family is not None and ln.get('address_family') != address_family: + if ( + ln.get("line_type", "") == "iface" + and ln.get("iface", "") == iface + and value != ln.get("params", {}).get("method", "") + ): + if address_family is not None and ln.get("address_family") != address_family: continue changed = True - ln['line'] = re.sub(f"{ln.get('params', {}).get('method', '')}$", value, ln.get('line')) - ln['params']['method'] = value + ln["line"] = re.sub(f"{ln.get('params', {}).get('method', '')}$", value, ln.get("line")) + ln["params"]["method"] = value return changed, lines - last_line = last_line_dict['line'] + last_line = last_line_dict["line"] prefix_start = last_line.find(last_line.split()[0]) suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1]) prefix = last_line[:prefix_start] @@ -360,38 +379,37 @@ def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_option def write_changes(module, lines, dest): - tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'wb') as f: - f.write(to_bytes(''.join(lines), errors='surrogate_or_strict')) + with os.fdopen(tmpfd, "wb") as f: + f.write(to_bytes("".join(lines), errors="surrogate_or_strict")) module.atomic_move(tmpfile, os.path.realpath(dest)) def main(): module = AnsibleModule( argument_spec=dict( - dest=dict(type='path', default='/etc/network/interfaces'), - iface=dict(type='str'), - address_family=dict(type='str'), - option=dict(type='str'), - value=dict(type='str'), - backup=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), + dest=dict(type="path", default="/etc/network/interfaces"), + iface=dict(type="str"), + address_family=dict(type="str"), + option=dict(type="str"), + value=dict(type="str"), + backup=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), ), add_file_common_args=True, supports_check_mode=True, required_by=dict( - option=('iface',), + option=("iface",), ), ) - dest = module.params['dest'] - iface = module.params['iface'] - address_family = module.params['address_family'] - option = module.params['option'] - value = module.params['value'] - backup = module.params['backup'] - state = module.params['state'] + dest = module.params["dest"] + iface = module.params["iface"] + address_family = module.params["address_family"] + option = module.params["option"] + value = module.params["value"] + backup = module.params["backup"] + state = module.params["state"] if option is not None and state == "present" and value is None: module.fail_json(msg="Value must be set if option is defined and state is 'present'") @@ -404,15 +422,15 @@ def main(): changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family) if changed: - dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + dummy, ifaces = read_interfaces_lines(module, [d["line"] for d in lines if "line" in d]) if changed and not module.check_mode: if backup: module.backup_local(dest) - write_changes(module, [d['line'] for d in lines if 'line' in d], dest) + write_changes(module, [d["line"] for d in lines if "line" in d], dest) module.exit_json(dest=dest, changed=changed, ifaces=ifaces) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ip_netns.py b/plugins/modules/ip_netns.py index 02e89fe3350..f5e4e0c6535 100644 --- a/plugins/modules/ip_netns.py +++ b/plugins/modules/ip_netns.py @@ -55,60 +55,60 @@ class Namespace: - """Interface to network namespaces. """ + """Interface to network namespaces.""" def __init__(self, module): self.module = module - self.name = module.params['name'] - self.state = module.params['state'] + self.name = module.params["name"] + self.state = module.params["state"] def _netns(self, command): - '''Run ip nents command''' - return self.module.run_command(['ip', 'netns'] + command) + """Run ip nents command""" + return self.module.run_command(["ip", "netns"] + command) def exists(self): - '''Check if the namespace already exists''' - rc, out, err = self.module.run_command(['ip', 'netns', 'list']) + """Check if the namespace already exists""" + rc, out, err = self.module.run_command(["ip", "netns", "list"]) if rc != 0: self.module.fail_json(msg=to_text(err)) return self.name in out def add(self): - '''Create network namespace''' - rtc, out, err = self._netns(['add', self.name]) + """Create network namespace""" + rtc, out, err = self._netns(["add", self.name]) if rtc != 0: self.module.fail_json(msg=err) def delete(self): - '''Delete network namespace''' - rtc, out, err = self._netns(['del', self.name]) + """Delete network namespace""" + rtc, out, err = self._netns(["del", self.name]) if rtc != 0: self.module.fail_json(msg=err) def check(self): - '''Run check mode''' + """Run check mode""" changed = False - if self.state == 'present' and self.exists(): + if self.state == "present" and self.exists(): changed = True - elif self.state == 'absent' and self.exists(): + elif self.state == "absent" and self.exists(): changed = True - elif self.state == 'present' and not self.exists(): + elif self.state == "present" and not self.exists(): changed = True self.module.exit_json(changed=changed) def run(self): - '''Make the necessary changes''' + """Make the necessary changes""" changed = False - if self.state == 'absent': + if self.state == "absent": if self.exists(): self.delete() changed = True - elif self.state == 'present': + elif self.state == "present": if not self.exists(): self.add() changed = True @@ -120,8 +120,8 @@ def main(): """Entry point.""" module = AnsibleModule( argument_spec={ - 'name': {'default': None}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, + "name": {"default": None}, + "state": {"default": "present", "choices": ["present", "absent"]}, }, supports_check_mode=True, ) @@ -133,5 +133,5 @@ def main(): network_namespace.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_config.py b/plugins/modules/ipa_config.py index 33f3e1aec55..2386a81ab22 100644 --- a/plugins/modules/ipa_config.py +++ b/plugins/modules/ipa_config.py @@ -239,53 +239,63 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def config_show(self): - return self._post_json(method='config_show', name=None) + return self._post_json(method="config_show", name=None) def config_mod(self, name, item): - return self._post_json(method='config_mod', name=name, item=item) - - -def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, - ipadefaultemaildomain=None, ipadefaultprimarygroup=None, - ipagroupsearchfields=None, ipagroupobjectclasses=None, - ipahomesrootdir=None, ipakrbauthzdata=None, - ipamaxusernamelength=None, ipapwdexpadvnotify=None, - ipasearchrecordslimit=None, ipasearchtimelimit=None, - ipaselinuxusermaporder=None, ipauserauthtype=None, - ipausersearchfields=None, ipauserobjectclasses=None): + return self._post_json(method="config_mod", name=name, item=item) + + +def get_config_dict( + ipaconfigstring=None, + ipadefaultloginshell=None, + ipadefaultemaildomain=None, + ipadefaultprimarygroup=None, + ipagroupsearchfields=None, + ipagroupobjectclasses=None, + ipahomesrootdir=None, + ipakrbauthzdata=None, + ipamaxusernamelength=None, + ipapwdexpadvnotify=None, + ipasearchrecordslimit=None, + ipasearchtimelimit=None, + ipaselinuxusermaporder=None, + ipauserauthtype=None, + ipausersearchfields=None, + ipauserobjectclasses=None, +): config = {} if ipaconfigstring is not None: - config['ipaconfigstring'] = ipaconfigstring + config["ipaconfigstring"] = ipaconfigstring if ipadefaultloginshell is not None: - config['ipadefaultloginshell'] = ipadefaultloginshell + config["ipadefaultloginshell"] = ipadefaultloginshell if ipadefaultemaildomain is not None: - config['ipadefaultemaildomain'] = ipadefaultemaildomain + config["ipadefaultemaildomain"] = ipadefaultemaildomain if ipadefaultprimarygroup is not None: - config['ipadefaultprimarygroup'] = ipadefaultprimarygroup + config["ipadefaultprimarygroup"] = ipadefaultprimarygroup if ipagroupobjectclasses is not None: - config['ipagroupobjectclasses'] = ipagroupobjectclasses + config["ipagroupobjectclasses"] = ipagroupobjectclasses if ipagroupsearchfields is not None: - config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) + config["ipagroupsearchfields"] = ",".join(ipagroupsearchfields) if ipahomesrootdir is not None: - config['ipahomesrootdir'] = ipahomesrootdir + config["ipahomesrootdir"] = ipahomesrootdir if ipakrbauthzdata is not None: - config['ipakrbauthzdata'] = ipakrbauthzdata + config["ipakrbauthzdata"] = ipakrbauthzdata if ipamaxusernamelength is not None: - config['ipamaxusernamelength'] = str(ipamaxusernamelength) + config["ipamaxusernamelength"] = str(ipamaxusernamelength) if ipapwdexpadvnotify is not None: - config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) + config["ipapwdexpadvnotify"] = str(ipapwdexpadvnotify) if ipasearchrecordslimit is not None: - config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) + config["ipasearchrecordslimit"] = str(ipasearchrecordslimit) if ipasearchtimelimit is not None: - config['ipasearchtimelimit'] = str(ipasearchtimelimit) + config["ipasearchtimelimit"] = str(ipasearchtimelimit) if ipaselinuxusermaporder is not None: - config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) + config["ipaselinuxusermaporder"] = "$".join(ipaselinuxusermaporder) if ipauserauthtype is not None: - config['ipauserauthtype'] = ipauserauthtype + config["ipauserauthtype"] = ipauserauthtype if ipauserobjectclasses is not None: - config['ipauserobjectclasses'] = ipauserobjectclasses + config["ipauserobjectclasses"] = ipauserobjectclasses if ipausersearchfields is not None: - config['ipausersearchfields'] = ','.join(ipausersearchfields) + config["ipausersearchfields"] = ",".join(ipausersearchfields) return config @@ -296,22 +306,22 @@ def get_config_diff(client, ipa_config, module_config): def ensure(module, client): module_config = get_config_dict( - ipaconfigstring=module.params.get('ipaconfigstring'), - ipadefaultloginshell=module.params.get('ipadefaultloginshell'), - ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), - ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), - ipagroupobjectclasses=module.params.get('ipagroupobjectclasses'), - ipagroupsearchfields=module.params.get('ipagroupsearchfields'), - ipahomesrootdir=module.params.get('ipahomesrootdir'), - ipakrbauthzdata=module.params.get('ipakrbauthzdata'), - ipamaxusernamelength=module.params.get('ipamaxusernamelength'), - ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), - ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), - ipasearchtimelimit=module.params.get('ipasearchtimelimit'), - ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), - ipauserauthtype=module.params.get('ipauserauthtype'), - ipausersearchfields=module.params.get('ipausersearchfields'), - ipauserobjectclasses=module.params.get('ipauserobjectclasses'), + ipaconfigstring=module.params.get("ipaconfigstring"), + ipadefaultloginshell=module.params.get("ipadefaultloginshell"), + ipadefaultemaildomain=module.params.get("ipadefaultemaildomain"), + ipadefaultprimarygroup=module.params.get("ipadefaultprimarygroup"), + ipagroupobjectclasses=module.params.get("ipagroupobjectclasses"), + ipagroupsearchfields=module.params.get("ipagroupsearchfields"), + ipahomesrootdir=module.params.get("ipahomesrootdir"), + ipakrbauthzdata=module.params.get("ipakrbauthzdata"), + ipamaxusernamelength=module.params.get("ipamaxusernamelength"), + ipapwdexpadvnotify=module.params.get("ipapwdexpadvnotify"), + ipasearchrecordslimit=module.params.get("ipasearchrecordslimit"), + ipasearchtimelimit=module.params.get("ipasearchtimelimit"), + ipaselinuxusermaporder=module.params.get("ipaselinuxusermaporder"), + ipauserauthtype=module.params.get("ipauserauthtype"), + ipausersearchfields=module.params.get("ipausersearchfields"), + ipauserobjectclasses=module.params.get("ipauserobjectclasses"), ) ipa_config = client.config_show() diff = get_config_diff(client, ipa_config, module_config) @@ -332,61 +342,57 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() argument_spec.update( - ipaconfigstring=dict(type='list', elements='str', - choices=['AllowNThash', - 'KDC:Disable Last Success', - 'KDC:Disable Lockout', - 'KDC:Disable Default Preauth for SPNs'], - aliases=['configstring']), - ipadefaultloginshell=dict(type='str', aliases=['loginshell']), - ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), - ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), - ipagroupobjectclasses=dict(type='list', elements='str', - aliases=['groupobjectclasses']), - ipagroupsearchfields=dict(type='list', elements='str', - aliases=['groupsearchfields']), - ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), - ipakrbauthzdata=dict(type='list', elements='str', - choices=['MS-PAC', 'PAD', 'nfs:NONE'], - aliases=['krbauthzdata']), - ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), - ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), - ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), - ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), - ipaselinuxusermaporder=dict(type='list', elements='str', - aliases=['selinuxusermaporder']), - ipauserauthtype=dict(type='list', elements='str', - aliases=['userauthtype'], - choices=["password", "radius", "otp", "pkinit", - "hardened", "idp", "passkey", "disabled"]), - ipausersearchfields=dict(type='list', elements='str', - aliases=['usersearchfields']), - ipauserobjectclasses=dict(type='list', elements='str', - aliases=['userobjectclasses']), + ipaconfigstring=dict( + type="list", + elements="str", + choices=[ + "AllowNThash", + "KDC:Disable Last Success", + "KDC:Disable Lockout", + "KDC:Disable Default Preauth for SPNs", + ], + aliases=["configstring"], + ), + ipadefaultloginshell=dict(type="str", aliases=["loginshell"]), + ipadefaultemaildomain=dict(type="str", aliases=["emaildomain"]), + ipadefaultprimarygroup=dict(type="str", aliases=["primarygroup"]), + ipagroupobjectclasses=dict(type="list", elements="str", aliases=["groupobjectclasses"]), + ipagroupsearchfields=dict(type="list", elements="str", aliases=["groupsearchfields"]), + ipahomesrootdir=dict(type="str", aliases=["homesrootdir"]), + ipakrbauthzdata=dict( + type="list", elements="str", choices=["MS-PAC", "PAD", "nfs:NONE"], aliases=["krbauthzdata"] + ), + ipamaxusernamelength=dict(type="int", aliases=["maxusernamelength"]), + ipapwdexpadvnotify=dict(type="int", aliases=["pwdexpadvnotify"]), + ipasearchrecordslimit=dict(type="int", aliases=["searchrecordslimit"]), + ipasearchtimelimit=dict(type="int", aliases=["searchtimelimit"]), + ipaselinuxusermaporder=dict(type="list", elements="str", aliases=["selinuxusermaporder"]), + ipauserauthtype=dict( + type="list", + elements="str", + aliases=["userauthtype"], + choices=["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey", "disabled"], + ), + ipausersearchfields=dict(type="list", elements="str", aliases=["usersearchfields"]), + ipauserobjectclasses=dict(type="list", elements="str", aliases=["userobjectclasses"]), ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) client = ConfigIPAClient( module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], ) try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, user = ensure(module, client) module.exit_json(changed=changed, user=user) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py index e57ba6415e6..a415ac625cf 100644 --- a/plugins/modules/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -207,83 +207,83 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def dnsrecord_find(self, zone_name, record_name): - if record_name == '@': - return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) + if record_name == "@": + return self._post_json(method="dnsrecord_show", name=zone_name, item={"idnsname": record_name, "all": True}) else: - return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) + return self._post_json(method="dnsrecord_find", name=zone_name, item={"idnsname": record_name, "all": True}) def dnsrecord_add(self, zone_name=None, record_name=None, details=None): item = dict(idnsname=record_name) - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) + if details.get("record_ttl"): + item.update(dnsttl=details["record_ttl"]) - for value in details['record_values']: - if details['record_type'] == 'A': + for value in details["record_values"]: + if details["record_type"] == "A": item.update(a_part_ip_address=value) - elif details['record_type'] == 'AAAA': + elif details["record_type"] == "AAAA": item.update(aaaa_part_ip_address=value) - elif details['record_type'] == 'A6': + elif details["record_type"] == "A6": item.update(a6_part_data=value) - elif details['record_type'] == 'CNAME': + elif details["record_type"] == "CNAME": item.update(cname_part_hostname=value) - elif details['record_type'] == 'DNAME': + elif details["record_type"] == "DNAME": item.update(dname_part_target=value) - elif details['record_type'] == 'NS': + elif details["record_type"] == "NS": item.update(ns_part_hostname=value) - elif details['record_type'] == 'PTR': + elif details["record_type"] == "PTR": item.update(ptr_part_hostname=value) - elif details['record_type'] == 'TXT': + elif details["record_type"] == "TXT": item.update(txtrecord=value) - elif details['record_type'] == 'SRV': + elif details["record_type"] == "SRV": item.update(srvrecord=value) - elif details['record_type'] == 'MX': + elif details["record_type"] == "MX": item.update(mxrecord=value) - elif details['record_type'] == 'SSHFP': + elif details["record_type"] == "SSHFP": item.update(sshfprecord=value) - self._post_json(method='dnsrecord_add', name=zone_name, item=item) + self._post_json(method="dnsrecord_add", name=zone_name, item=item) def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): item = get_dnsrecord_dict(details) item.update(idnsname=record_name) - if details.get('record_ttl'): - item.update(dnsttl=details['record_ttl']) - return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) + if details.get("record_ttl"): + item.update(dnsttl=details["record_ttl"]) + return self._post_json(method="dnsrecord_mod", name=zone_name, item=item) def dnsrecord_del(self, zone_name=None, record_name=None, details=None): item = get_dnsrecord_dict(details) item.update(idnsname=record_name) - return self._post_json(method='dnsrecord_del', name=zone_name, item=item) + return self._post_json(method="dnsrecord_del", name=zone_name, item=item) def get_dnsrecord_dict(details=None): module_dnsrecord = dict() - if details['record_type'] == 'A' and details['record_values']: - module_dnsrecord.update(arecord=details['record_values']) - elif details['record_type'] == 'AAAA' and details['record_values']: - module_dnsrecord.update(aaaarecord=details['record_values']) - elif details['record_type'] == 'A6' and details['record_values']: - module_dnsrecord.update(a6record=details['record_values']) - elif details['record_type'] == 'CNAME' and details['record_values']: - module_dnsrecord.update(cnamerecord=details['record_values']) - elif details['record_type'] == 'DNAME' and details['record_values']: - module_dnsrecord.update(dnamerecord=details['record_values']) - elif details['record_type'] == 'NS' and details['record_values']: - module_dnsrecord.update(nsrecord=details['record_values']) - elif details['record_type'] == 'PTR' and details['record_values']: - module_dnsrecord.update(ptrrecord=details['record_values']) - elif details['record_type'] == 'TXT' and details['record_values']: - module_dnsrecord.update(txtrecord=details['record_values']) - elif details['record_type'] == 'SRV' and details['record_values']: - module_dnsrecord.update(srvrecord=details['record_values']) - elif details['record_type'] == 'MX' and details['record_values']: - module_dnsrecord.update(mxrecord=details['record_values']) - elif details['record_type'] == 'SSHFP' and details['record_values']: - module_dnsrecord.update(sshfprecord=details['record_values']) - - if details.get('record_ttl'): - module_dnsrecord.update(dnsttl=details['record_ttl']) + if details["record_type"] == "A" and details["record_values"]: + module_dnsrecord.update(arecord=details["record_values"]) + elif details["record_type"] == "AAAA" and details["record_values"]: + module_dnsrecord.update(aaaarecord=details["record_values"]) + elif details["record_type"] == "A6" and details["record_values"]: + module_dnsrecord.update(a6record=details["record_values"]) + elif details["record_type"] == "CNAME" and details["record_values"]: + module_dnsrecord.update(cnamerecord=details["record_values"]) + elif details["record_type"] == "DNAME" and details["record_values"]: + module_dnsrecord.update(dnamerecord=details["record_values"]) + elif details["record_type"] == "NS" and details["record_values"]: + module_dnsrecord.update(nsrecord=details["record_values"]) + elif details["record_type"] == "PTR" and details["record_values"]: + module_dnsrecord.update(ptrrecord=details["record_values"]) + elif details["record_type"] == "TXT" and details["record_values"]: + module_dnsrecord.update(txtrecord=details["record_values"]) + elif details["record_type"] == "SRV" and details["record_values"]: + module_dnsrecord.update(srvrecord=details["record_values"]) + elif details["record_type"] == "MX" and details["record_values"]: + module_dnsrecord.update(mxrecord=details["record_values"]) + elif details["record_type"] == "SSHFP" and details["record_values"]: + module_dnsrecord.update(sshfprecord=details["record_values"]) + + if details.get("record_ttl"): + module_dnsrecord.update(dnsttl=details["record_ttl"]) return module_dnsrecord @@ -294,91 +294,82 @@ def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): def ensure(module, client): - zone_name = module.params['zone_name'] - record_name = module.params['record_name'] - record_ttl = module.params.get('record_ttl') - state = module.params['state'] + zone_name = module.params["zone_name"] + record_name = module.params["record_name"] + record_ttl = module.params.get("record_ttl") + state = module.params["state"] ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) - record_values = module.params['record_values'] - if module.params['record_value'] is not None: - record_values = [module.params['record_value']] + record_values = module.params["record_values"] + if module.params["record_value"] is not None: + record_values = [module.params["record_value"]] module_dnsrecord = dict( - record_type=module.params['record_type'], + record_type=module.params["record_type"], record_values=record_values, - record_ttl=to_native(record_ttl, nonstring='passthru'), + record_ttl=to_native(record_ttl, nonstring="passthru"), ) # ttl is not required to change records - if module_dnsrecord['record_ttl'] is None: - module_dnsrecord.pop('record_ttl') + if module_dnsrecord["record_ttl"] is None: + module_dnsrecord.pop("record_ttl") changed = False - if state == 'present': + if state == "present": if not ipa_dnsrecord: changed = True if not module.check_mode: - client.dnsrecord_add(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) + client.dnsrecord_add(zone_name=zone_name, record_name=record_name, details=module_dnsrecord) else: diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) if len(diff) > 0: changed = True if not module.check_mode: - client.dnsrecord_mod(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) + client.dnsrecord_mod(zone_name=zone_name, record_name=record_name, details=module_dnsrecord) else: if ipa_dnsrecord: changed = True if not module.check_mode: - client.dnsrecord_del(zone_name=zone_name, - record_name=record_name, - details=module_dnsrecord) + client.dnsrecord_del(zone_name=zone_name, record_name=record_name, details=module_dnsrecord) return changed, client.dnsrecord_find(zone_name, record_name) def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX', 'SSHFP'] + record_types = ["A", "AAAA", "A6", "CNAME", "DNAME", "NS", "PTR", "TXT", "SRV", "MX", "SSHFP"] argument_spec = ipa_argument_spec() argument_spec.update( - zone_name=dict(type='str', required=True), - record_name=dict(type='str', aliases=['name'], required=True), - record_type=dict(type='str', default='A', choices=record_types), - record_value=dict(type='str'), - record_values=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - record_ttl=dict(type='int'), + zone_name=dict(type="str", required=True), + record_name=dict(type="str", aliases=["name"], required=True), + record_type=dict(type="str", default="A", choices=record_types), + record_value=dict(type="str"), + record_values=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent"]), + record_ttl=dict(type="int"), ) module = AnsibleModule( argument_spec=argument_spec, - mutually_exclusive=[['record_value', 'record_values']], - required_one_of=[['record_value', 'record_values']], - supports_check_mode=True + mutually_exclusive=[["record_value", "record_values"]], + required_one_of=[["record_value", "record_values"]], + supports_check_mode=True, ) client = DNSRecordIPAClient( module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], ) try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, record = ensure(module, client) module.exit_json(changed=changed, record=record) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py index daa37202774..edbc5505ec2 100644 --- a/plugins/modules/ipa_dnszone.py +++ b/plugins/modules/ipa_dnszone.py @@ -94,66 +94,61 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def dnszone_find(self, zone_name, details=None): - items = {'all': 'true', - 'idnsname': zone_name, } + items = { + "all": "true", + "idnsname": zone_name, + } if details is not None: items.update(details) - return self._post_json( - method='dnszone_find', - name=zone_name, - item=items - ) + return self._post_json(method="dnszone_find", name=zone_name, item=items) def dnszone_add(self, zone_name=None, details=None): items = {} if details is not None: items.update(details) - return self._post_json( - method='dnszone_add', - name=zone_name, - item=items - ) + return self._post_json(method="dnszone_add", name=zone_name, item=items) def dnszone_mod(self, zone_name=None, details=None): items = {} if details is not None: items.update(details) - return self._post_json( - method='dnszone_mod', - name=zone_name, - item=items - ) + return self._post_json(method="dnszone_mod", name=zone_name, item=items) def dnszone_del(self, zone_name=None, record_name=None, details=None): - return self._post_json( - method='dnszone_del', name=zone_name, item={}) + return self._post_json(method="dnszone_del", name=zone_name, item={}) def ensure(module, client): - zone_name = module.params['zone_name'] - state = module.params['state'] - dynamicupdate = module.params['dynamicupdate'] - allowsyncptr = module.params['allowsyncptr'] + zone_name = module.params["zone_name"] + state = module.params["state"] + dynamicupdate = module.params["dynamicupdate"] + allowsyncptr = module.params["allowsyncptr"] changed = False # does zone exist ipa_dnszone = client.dnszone_find(zone_name) - if state == 'present': + if state == "present": if not ipa_dnszone: - changed = True if not module.check_mode: - client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) - elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or \ - ipa_dnszone.get('idnsallowsyncptr') and ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): + client.dnszone_add( + zone_name=zone_name, details={"idnsallowdynupdate": dynamicupdate, "idnsallowsyncptr": allowsyncptr} + ) + elif ( + ipa_dnszone["idnsallowdynupdate"][0] != str(dynamicupdate).upper() + or ipa_dnszone.get("idnsallowsyncptr") + and ipa_dnszone["idnsallowsyncptr"][0] != str(allowsyncptr).upper() + ): changed = True if not module.check_mode: - client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) + client.dnszone_mod( + zone_name=zone_name, details={"idnsallowdynupdate": dynamicupdate, "idnsallowsyncptr": allowsyncptr} + ) else: changed = False @@ -170,33 +165,32 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(zone_name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - dynamicupdate=dict(type='bool', default=False), - allowsyncptr=dict(type='bool', default=False), - ) + argument_spec.update( + zone_name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + dynamicupdate=dict(type="bool", default=False), + allowsyncptr=dict(type="bool", default=False), + ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) client = DNSZoneIPAClient( module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], ) try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, zone = ensure(module, client) module.exit_json(changed=changed, zone=zone) except Exception as e: module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py index f88e35d0f79..afc6b3960df 100644 --- a/plugins/modules/ipa_getkeytab.py +++ b/plugins/modules/ipa_getkeytab.py @@ -130,41 +130,41 @@ class IPAKeytab: def __init__(self, module, **kwargs): self.module = module - self.path = kwargs['path'] - self.state = kwargs['state'] - self.principal = kwargs['principal'] - self.ipa_host = kwargs['ipa_host'] - self.ldap_uri = kwargs['ldap_uri'] - self.bind_dn = kwargs['bind_dn'] - self.bind_pw = kwargs['bind_pw'] - self.password = kwargs['password'] - self.ca_cert = kwargs['ca_cert'] - self.sasl_mech = kwargs['sasl_mech'] - self.retrieve_mode = kwargs['retrieve_mode'] - self.encryption_types = kwargs['encryption_types'] + self.path = kwargs["path"] + self.state = kwargs["state"] + self.principal = kwargs["principal"] + self.ipa_host = kwargs["ipa_host"] + self.ldap_uri = kwargs["ldap_uri"] + self.bind_dn = kwargs["bind_dn"] + self.bind_pw = kwargs["bind_pw"] + self.password = kwargs["password"] + self.ca_cert = kwargs["ca_cert"] + self.sasl_mech = kwargs["sasl_mech"] + self.retrieve_mode = kwargs["retrieve_mode"] + self.encryption_types = kwargs["encryption_types"] self.runner = CmdRunner( module, - command='ipa-getkeytab', + command="ipa-getkeytab", arg_formats=dict( - retrieve_mode=cmd_runner_fmt.as_bool('--retrieve'), - path=cmd_runner_fmt.as_opt_val('--keytab'), - ipa_host=cmd_runner_fmt.as_opt_val('--server'), - principal=cmd_runner_fmt.as_opt_val('--principal'), - ldap_uri=cmd_runner_fmt.as_opt_val('--ldapuri'), - bind_dn=cmd_runner_fmt.as_opt_val('--binddn'), - bind_pw=cmd_runner_fmt.as_opt_val('--bindpw'), - password=cmd_runner_fmt.as_opt_val('--password'), - ca_cert=cmd_runner_fmt.as_opt_val('--cacert'), - sasl_mech=cmd_runner_fmt.as_opt_val('--mech'), - encryption_types=cmd_runner_fmt.as_opt_val('--enctypes'), - ) + retrieve_mode=cmd_runner_fmt.as_bool("--retrieve"), + path=cmd_runner_fmt.as_opt_val("--keytab"), + ipa_host=cmd_runner_fmt.as_opt_val("--server"), + principal=cmd_runner_fmt.as_opt_val("--principal"), + ldap_uri=cmd_runner_fmt.as_opt_val("--ldapuri"), + bind_dn=cmd_runner_fmt.as_opt_val("--binddn"), + bind_pw=cmd_runner_fmt.as_opt_val("--bindpw"), + password=cmd_runner_fmt.as_opt_val("--password"), + ca_cert=cmd_runner_fmt.as_opt_val("--cacert"), + sasl_mech=cmd_runner_fmt.as_opt_val("--mech"), + encryption_types=cmd_runner_fmt.as_opt_val("--enctypes"), + ), ) def _exec(self, check_rc=True): with self.runner( "retrieve_mode path ipa_host principal ldap_uri bind_dn bind_pw password ca_cert sasl_mech encryption_types", - check_rc=check_rc + check_rc=check_rc, ) as ctx: rc, out, err = ctx.run() return out @@ -172,47 +172,48 @@ def _exec(self, check_rc=True): def main(): arg_spec = dict( - path=dict(type='path', required=True, aliases=["keytab"]), - state=dict(default='present', choices=['present', 'absent']), - principal=dict(type='str', required=True), - ipa_host=dict(type='str'), - ldap_uri=dict(type='str'), - bind_dn=dict(type='str'), - bind_pw=dict(type='str'), - password=dict(type='str', no_log=True), - ca_cert=dict(type='path'), - sasl_mech=dict(type='str', choices=["GSSAPI", "EXTERNAL"]), - retrieve_mode=dict(type='bool'), - encryption_types=dict(type='str'), - force=dict(type='bool'), + path=dict(type="path", required=True, aliases=["keytab"]), + state=dict(default="present", choices=["present", "absent"]), + principal=dict(type="str", required=True), + ipa_host=dict(type="str"), + ldap_uri=dict(type="str"), + bind_dn=dict(type="str"), + bind_pw=dict(type="str"), + password=dict(type="str", no_log=True), + ca_cert=dict(type="path"), + sasl_mech=dict(type="str", choices=["GSSAPI", "EXTERNAL"]), + retrieve_mode=dict(type="bool"), + encryption_types=dict(type="str"), + force=dict(type="bool"), ) module = AnsibleModule( argument_spec=arg_spec, - mutually_exclusive=[('ipa_host', 'ldap_uri'), ('retrieve_mode', 'password')], + mutually_exclusive=[("ipa_host", "ldap_uri"), ("retrieve_mode", "password")], supports_check_mode=True, ) - path = module.params['path'] - state = module.params['state'] - force = module.params['force'] - - keytab = IPAKeytab(module, - path=path, - state=state, - principal=module.params['principal'], - ipa_host=module.params['ipa_host'], - ldap_uri=module.params['ldap_uri'], - bind_dn=module.params['bind_dn'], - bind_pw=module.params['bind_pw'], - password=module.params['password'], - ca_cert=module.params['ca_cert'], - sasl_mech=module.params['sasl_mech'], - retrieve_mode=module.params['retrieve_mode'], - encryption_types=module.params['encryption_types'], - ) + path = module.params["path"] + state = module.params["state"] + force = module.params["force"] + + keytab = IPAKeytab( + module, + path=path, + state=state, + principal=module.params["principal"], + ipa_host=module.params["ipa_host"], + ldap_uri=module.params["ldap_uri"], + bind_dn=module.params["bind_dn"], + bind_pw=module.params["bind_pw"], + password=module.params["password"], + ca_cert=module.params["ca_cert"], + sasl_mech=module.params["sasl_mech"], + retrieve_mode=module.params["retrieve_mode"], + encryption_types=module.params["encryption_types"], + ) changed = False - if state == 'present': + if state == "present": if os.path.exists(path): if force and not module.check_mode: try: @@ -227,7 +228,7 @@ def main(): changed = True keytab._exec() - if state == 'absent': + if state == "absent": if os.path.exists(path): changed = True if not module.check_mode: @@ -239,5 +240,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py index df3b257cf0c..f5759ff0f39 100644 --- a/plugins/modules/ipa_group.py +++ b/plugins/modules/ipa_group.py @@ -180,91 +180,93 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def group_find(self, name): - return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="group_find", name=None, item={"all": True, "cn": name}) def group_add(self, name, item): - return self._post_json(method='group_add', name=name, item=item) + return self._post_json(method="group_add", name=name, item=item) def group_mod(self, name, item): - return self._post_json(method='group_mod', name=name, item=item) + return self._post_json(method="group_mod", name=name, item=item) def group_del(self, name): - return self._post_json(method='group_del', name=name) + return self._post_json(method="group_del", name=name) def group_add_member(self, name, item): - return self._post_json(method='group_add_member', name=name, item=item) + return self._post_json(method="group_add_member", name=name, item=item) def group_add_member_group(self, name, item): - return self.group_add_member(name=name, item={'group': item}) + return self.group_add_member(name=name, item={"group": item}) def group_add_member_user(self, name, item): - return self.group_add_member(name=name, item={'user': item}) + return self.group_add_member(name=name, item={"user": item}) def group_add_member_externaluser(self, name, item): - return self.group_add_member(name=name, item={'ipaexternalmember': item}) + return self.group_add_member(name=name, item={"ipaexternalmember": item}) def group_remove_member(self, name, item): - return self._post_json(method='group_remove_member', name=name, item=item) + return self._post_json(method="group_remove_member", name=name, item=item) def group_remove_member_group(self, name, item): - return self.group_remove_member(name=name, item={'group': item}) + return self.group_remove_member(name=name, item={"group": item}) def group_remove_member_user(self, name, item): - return self.group_remove_member(name=name, item={'user': item}) + return self.group_remove_member(name=name, item={"user": item}) def group_remove_member_externaluser(self, name, item): - return self.group_remove_member(name=name, item={'ipaexternalmember': item}) + return self.group_remove_member(name=name, item={"ipaexternalmember": item}) def get_group_dict(description=None, external=None, gid=None, nonposix=None): group = {} if description is not None: - group['description'] = description + group["description"] = description if external is not None: - group['external'] = external + group["external"] = external if gid is not None: - group['gidnumber'] = gid + group["gidnumber"] = gid if nonposix is not None: - group['nonposix'] = nonposix + group["nonposix"] = nonposix return group def get_group_diff(client, ipa_group, module_group): data = [] # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. - if 'nonposix' in module_group: + if "nonposix" in module_group: # Only non-posix groups can be changed to posix - if not module_group['nonposix'] and ipa_group.get('nonposix'): - module_group['posix'] = True - del module_group['nonposix'] + if not module_group["nonposix"] and ipa_group.get("nonposix"): + module_group["posix"] = True + del module_group["nonposix"] - if 'external' in module_group: - if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): - del module_group['external'] + if "external" in module_group: + if module_group["external"] and "ipaexternalgroup" in ipa_group.get("objectclass"): + del module_group["external"] return client.get_diff(ipa_data=ipa_group, module_data=module_group) def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - user = module.params['user'] - external = module.params['external'] - external_user = module.params['external_user'] - append = module.params['append'] - - module_group = get_group_dict(description=module.params['description'], - external=external, - gid=module.params['gidnumber'], - nonposix=module.params['nonposix']) + state = module.params["state"] + name = module.params["cn"] + group = module.params["group"] + user = module.params["user"] + external = module.params["external"] + external_user = module.params["external_user"] + append = module.params["append"] + + module_group = get_group_dict( + description=module.params["description"], + external=external, + gid=module.params["gidnumber"], + nonposix=module.params["nonposix"], + ) ipa_group = client.group_find(name=name) if not (external or external_user is None): module.fail_json("external_user can only be set if external = True") changed = False - if state == 'present': + if state == "present": if not ipa_group: changed = True if not module.check_mode: @@ -280,22 +282,43 @@ def ensure(module, client): client.group_mod(name=name, item=data) if group is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, - client.group_add_member_group, - client.group_remove_member_group, - append=append) or changed + changed = ( + client.modify_if_diff( + name, + ipa_group.get("member_group", []), + group, + client.group_add_member_group, + client.group_remove_member_group, + append=append, + ) + or changed + ) if user is not None: - changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, - client.group_add_member_user, - client.group_remove_member_user, - append=append) or changed + changed = ( + client.modify_if_diff( + name, + ipa_group.get("member_user", []), + user, + client.group_add_member_user, + client.group_remove_member_user, + append=append, + ) + or changed + ) if external_user is not None: - changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user, - client.group_add_member_externaluser, - client.group_remove_member_externaluser, - append=append) or changed + changed = ( + client.modify_if_diff( + name, + ipa_group.get("ipaexternalmember", []), + external_user, + client.group_add_member_externaluser, + client.group_remove_member_externaluser, + append=append, + ) + or changed + ) else: if ipa_group: changed = True @@ -307,33 +330,37 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - external=dict(type='bool'), - external_user=dict(type='list', elements='str'), - gidnumber=dict(type='str', aliases=['gid']), - group=dict(type='list', elements='str'), - nonposix=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str'), - append=dict(type='bool', default=False)) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - ) - - client = GroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + external=dict(type="bool"), + external_user=dict(type="list", elements="str"), + gidnumber=dict(type="str", aliases=["gid"]), + group=dict(type="list", elements="str"), + nonposix=dict(type="bool"), + state=dict(type="str", default="present", choices=["present", "absent"]), + user=dict(type="list", elements="str"), + append=dict(type="bool", default=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = GroupIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, group = ensure(module, client) module.exit_json(changed=changed, group=group) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py index 56ba837b224..3bf0877ffb4 100644 --- a/plugins/modules/ipa_hbacrule.py +++ b/plugins/modules/ipa_hbacrule.py @@ -166,58 +166,63 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def hbacrule_find(self, name): - return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="hbacrule_find", name=None, item={"all": True, "cn": name}) def hbacrule_add(self, name, item): - return self._post_json(method='hbacrule_add', name=name, item=item) + return self._post_json(method="hbacrule_add", name=name, item=item) def hbacrule_mod(self, name, item): - return self._post_json(method='hbacrule_mod', name=name, item=item) + return self._post_json(method="hbacrule_mod", name=name, item=item) def hbacrule_del(self, name): - return self._post_json(method='hbacrule_del', name=name) + return self._post_json(method="hbacrule_del", name=name) def hbacrule_add_host(self, name, item): - return self._post_json(method='hbacrule_add_host', name=name, item=item) + return self._post_json(method="hbacrule_add_host", name=name, item=item) def hbacrule_remove_host(self, name, item): - return self._post_json(method='hbacrule_remove_host', name=name, item=item) + return self._post_json(method="hbacrule_remove_host", name=name, item=item) def hbacrule_add_service(self, name, item): - return self._post_json(method='hbacrule_add_service', name=name, item=item) + return self._post_json(method="hbacrule_add_service", name=name, item=item) def hbacrule_remove_service(self, name, item): - return self._post_json(method='hbacrule_remove_service', name=name, item=item) + return self._post_json(method="hbacrule_remove_service", name=name, item=item) def hbacrule_add_user(self, name, item): - return self._post_json(method='hbacrule_add_user', name=name, item=item) + return self._post_json(method="hbacrule_add_user", name=name, item=item) def hbacrule_remove_user(self, name, item): - return self._post_json(method='hbacrule_remove_user', name=name, item=item) + return self._post_json(method="hbacrule_remove_user", name=name, item=item) def hbacrule_add_sourcehost(self, name, item): - return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) + return self._post_json(method="hbacrule_add_sourcehost", name=name, item=item) def hbacrule_remove_sourcehost(self, name, item): - return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) + return self._post_json(method="hbacrule_remove_sourcehost", name=name, item=item) -def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, - sourcehostcategory=None, - usercategory=None): +def get_hbacrule_dict( + description=None, + hostcategory=None, + ipaenabledflag=None, + servicecategory=None, + sourcehostcategory=None, + usercategory=None, +): data = {} if description is not None: - data['description'] = description + data["description"] = description if hostcategory is not None: - data['hostcategory'] = hostcategory + data["hostcategory"] = hostcategory if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag + data["ipaenabledflag"] = ipaenabledflag if servicecategory is not None: - data['servicecategory'] = servicecategory + data["servicecategory"] = servicecategory if sourcehostcategory is not None: - data['sourcehostcategory'] = sourcehostcategory + data["sourcehostcategory"] = sourcehostcategory if usercategory is not None: - data['usercategory'] = usercategory + data["usercategory"] = usercategory return data @@ -226,44 +231,46 @@ def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] + name = module.params["cn"] + state = module.params["state"] ipa_version = client.get_ipa_version() - if state in ['present', 'enabled']: - if LooseVersion(ipa_version) < LooseVersion('4.9.10'): - ipaenabledflag = 'TRUE' + if state in ["present", "enabled"]: + if LooseVersion(ipa_version) < LooseVersion("4.9.10"): + ipaenabledflag = "TRUE" else: ipaenabledflag = True else: - if LooseVersion(ipa_version) < LooseVersion('4.9.10'): - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion("4.9.10"): + ipaenabledflag = "FALSE" else: ipaenabledflag = False - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - service = module.params['service'] - servicecategory = module.params['servicecategory'] - servicegroup = module.params['servicegroup'] - sourcehost = module.params['sourcehost'] - sourcehostcategory = module.params['sourcehostcategory'] - sourcehostgroup = module.params['sourcehostgroup'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_hbacrule = get_hbacrule_dict(description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - servicecategory=servicecategory, - sourcehostcategory=sourcehostcategory, - usercategory=usercategory) + host = module.params["host"] + hostcategory = module.params["hostcategory"] + hostgroup = module.params["hostgroup"] + service = module.params["service"] + servicecategory = module.params["servicecategory"] + servicegroup = module.params["servicegroup"] + sourcehost = module.params["sourcehost"] + sourcehostcategory = module.params["sourcehostcategory"] + sourcehostgroup = module.params["sourcehostgroup"] + user = module.params["user"] + usercategory = module.params["usercategory"] + usergroup = module.params["usergroup"] + + module_hbacrule = get_hbacrule_dict( + description=module.params["description"], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + servicecategory=servicecategory, + sourcehostcategory=sourcehostcategory, + usercategory=usercategory, + ) ipa_hbacrule = client.hbacrule_find(name=name) changed = False - if state in ['present', 'enabled', 'disabled']: + if state in ["present", "enabled", "disabled"]: if not ipa_hbacrule: changed = True if not module.check_mode: @@ -279,45 +286,108 @@ def ensure(module, client): client.hbacrule_mod(name=name, item=data) if host is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'host') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberhost_host", []), + host, + client.hbacrule_add_host, + client.hbacrule_remove_host, + "host", + ) + or changed + ) if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, - client.hbacrule_add_host, - client.hbacrule_remove_host, 'hostgroup') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberhost_hostgroup", []), + hostgroup, + client.hbacrule_add_host, + client.hbacrule_remove_host, + "hostgroup", + ) + or changed + ) if service is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvc') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberservice_hbacsvc", []), + service, + client.hbacrule_add_service, + client.hbacrule_remove_service, + "hbacsvc", + ) + or changed + ) if servicegroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), - servicegroup, - client.hbacrule_add_service, - client.hbacrule_remove_service, 'hbacsvcgroup') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberservice_hbacsvcgroup", []), + servicegroup, + client.hbacrule_add_service, + client.hbacrule_remove_service, + "hbacsvcgroup", + ) + or changed + ) if sourcehost is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'host') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("sourcehost_host", []), + sourcehost, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, + "host", + ) + or changed + ) if sourcehostgroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, - client.hbacrule_add_sourcehost, - client.hbacrule_remove_sourcehost, 'hostgroup') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("sourcehost_group", []), + sourcehostgroup, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, + "hostgroup", + ) + or changed + ) if user is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'user') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberuser_user", []), + user, + client.hbacrule_add_user, + client.hbacrule_remove_user, + "user", + ) + or changed + ) if usergroup is not None: - changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, - client.hbacrule_add_user, - client.hbacrule_remove_user, 'group') or changed + changed = ( + client.modify_if_diff( + name, + ipa_hbacrule.get("memberuser_group", []), + usergroup, + client.hbacrule_add_user, + client.hbacrule_remove_user, + "group", + ) + or changed + ) else: if ipa_hbacrule: changed = True @@ -329,39 +399,40 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - servicecategory=dict(type='str', choices=['all']), - servicegroup=dict(type='list', elements='str'), - sourcehost=dict(type='list', elements='str'), - sourcehostcategory=dict(type='str', choices=['all']), - sourcehostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True - ) - - client = HBACRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + host=dict(type="list", elements="str"), + hostcategory=dict(type="str", choices=["all"]), + hostgroup=dict(type="list", elements="str"), + service=dict(type="list", elements="str"), + servicecategory=dict(type="str", choices=["all"]), + servicegroup=dict(type="list", elements="str"), + sourcehost=dict(type="list", elements="str"), + sourcehostcategory=dict(type="str", choices=["all"]), + sourcehostgroup=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + user=dict(type="list", elements="str"), + usercategory=dict(type="str", choices=["all"]), + usergroup=dict(type="list", elements="str"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = HBACRuleIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, hbacrule = ensure(module, client) module.exit_json(changed=changed, hbacrule=hbacrule) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py index 8d12178845c..07775df2fa1 100644 --- a/plugins/modules/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -194,56 +194,67 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def host_show(self, name): - return self._post_json(method='host_show', name=name) + return self._post_json(method="host_show", name=name) def host_find(self, name): - return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) + return self._post_json(method="host_find", name=None, item={"all": True, "fqdn": name}) def host_add(self, name, host): - return self._post_json(method='host_add', name=name, item=host) + return self._post_json(method="host_add", name=name, item=host) def host_mod(self, name, host): - return self._post_json(method='host_mod', name=name, item=host) + return self._post_json(method="host_mod", name=name, item=host) def host_del(self, name, update_dns): - return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) + return self._post_json(method="host_del", name=name, item={"updatedns": update_dns}) def host_disable(self, name): - return self._post_json(method='host_disable', name=name) - - -def get_host_dict(description=None, userclass=None, force=None, ip_address=None, l=None, ns_host_location=None, ns_hardware_platform=None, - ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): + return self._post_json(method="host_disable", name=name) + + +def get_host_dict( + description=None, + userclass=None, + force=None, + ip_address=None, + l=None, + ns_host_location=None, + ns_hardware_platform=None, + ns_os_version=None, + user_certificate=None, + mac_address=None, + random_password=None, +): data = {} if description is not None: - data['description'] = description + data["description"] = description if userclass is not None: - data['userclass'] = userclass + data["userclass"] = userclass if force is not None: - data['force'] = force + data["force"] = force if ip_address is not None: - data['ip_address'] = ip_address + data["ip_address"] = ip_address if l is not None: - data['l'] = l + data["l"] = l if ns_host_location is not None: - data['nshostlocation'] = ns_host_location + data["nshostlocation"] = ns_host_location if ns_hardware_platform is not None: - data['nshardwareplatform'] = ns_hardware_platform + data["nshardwareplatform"] = ns_hardware_platform if ns_os_version is not None: - data['nsosversion'] = ns_os_version + data["nsosversion"] = ns_os_version if user_certificate is not None: - data['usercertificate'] = [{"__base64__": item} for item in user_certificate] + data["usercertificate"] = [{"__base64__": item} for item in user_certificate] if mac_address is not None: - data['macaddress'] = mac_address + data["macaddress"] = mac_address if random_password is not None: - data['random'] = random_password + data["random"] = random_password return data def get_host_diff(client, ipa_host, module_host): - non_updateable_keys = ['force', 'ip_address'] - if not module_host.get('random'): - non_updateable_keys.append('random') + non_updateable_keys = ["force", "ip_address"] + if not module_host.get("random"): + non_updateable_keys.append("random") for key in non_updateable_keys: if key in module_host: del module_host[key] @@ -252,33 +263,34 @@ def get_host_diff(client, ipa_host, module_host): def ensure(module, client): - name = module.params['fqdn'] - state = module.params['state'] - force_creation = module.params['force_creation'] + name = module.params["fqdn"] + state = module.params["state"] + force_creation = module.params["force_creation"] ipa_host = client.host_find(name=name) - module_host = get_host_dict(description=module.params['description'], - userclass=module.params['userclass'], - force=module.params['force'], - ip_address=module.params['ip_address'], - l=module.params['l'], - ns_host_location=module.params['ns_host_location'], - ns_hardware_platform=module.params['ns_hardware_platform'], - ns_os_version=module.params['ns_os_version'], - user_certificate=module.params['user_certificate'], - mac_address=module.params['mac_address'], - random_password=module.params['random_password'], - ) + module_host = get_host_dict( + description=module.params["description"], + userclass=module.params["userclass"], + force=module.params["force"], + ip_address=module.params["ip_address"], + l=module.params["l"], + ns_host_location=module.params["ns_host_location"], + ns_hardware_platform=module.params["ns_hardware_platform"], + ns_os_version=module.params["ns_os_version"], + user_certificate=module.params["user_certificate"], + mac_address=module.params["mac_address"], + random_password=module.params["random_password"], + ) changed = False - if state in ['present', 'enabled', 'disabled']: - if not ipa_host and (force_creation or state == 'present'): + if state in ["present", "enabled", "disabled"]: + if not ipa_host and (force_creation or state == "present"): changed = True if not module.check_mode: # OTP password generated by FreeIPA is visible only for host_add command # so, return directly from here. return changed, client.host_add(name=name, host=module_host) else: - if state in ['disabled', 'enabled']: + if state in ["disabled", "enabled"]: module.fail_json(msg=f"No host with name {ipa_host} found") diff = get_host_diff(client, ipa_host, module_host) @@ -289,17 +301,17 @@ def ensure(module, client): for key in diff: data[key] = module_host.get(key) if "usercertificate" not in data: - data["usercertificate"] = [ - cert['__base64__'] for cert in ipa_host.get("usercertificate", []) - ] + data["usercertificate"] = [cert["__base64__"] for cert in ipa_host.get("usercertificate", [])] ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): + if ipa_host_show.get("has_keytab", True) and ( + state == "disabled" or module.params.get("random_password") + ): client.host_disable(name=name) return changed, client.host_mod(name=name, host=data) - elif state == 'absent': + elif state == "absent": if ipa_host: changed = True - update_dns = module.params.get('update_dns', False) + update_dns = module.params.get("update_dns", False) if not module.check_mode: client.host_del(name=name, update_dns=update_dns) @@ -309,39 +321,39 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() argument_spec.update( - description=dict(type='str'), - fqdn=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool'), - ip_address=dict(type='str'), - l=dict(type='str', aliases=['locality']), - ns_host_location=dict(type='str', aliases=['nshostlocation']), - ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), - ns_os_version=dict(type='str', aliases=['nsosversion']), - userclass=dict(type='str'), - user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), - mac_address=dict(type='list', aliases=['macaddress'], elements='str'), - update_dns=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False), - force_creation=dict(type='bool', default=True) + description=dict(type="str"), + fqdn=dict(type="str", required=True, aliases=["name"]), + force=dict(type="bool"), + ip_address=dict(type="str"), + l=dict(type="str", aliases=["locality"]), + ns_host_location=dict(type="str", aliases=["nshostlocation"]), + ns_hardware_platform=dict(type="str", aliases=["nshardwareplatform"]), + ns_os_version=dict(type="str", aliases=["nsosversion"]), + userclass=dict(type="str"), + user_certificate=dict(type="list", aliases=["usercertificate"], elements="str"), + mac_address=dict(type="list", aliases=["macaddress"], elements="str"), + update_dns=dict(type="bool"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + random_password=dict(type="bool", no_log=False), + force_creation=dict(type="bool", default=True), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - client = HostIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + client = HostIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, host = ensure(module, client) module.exit_json(changed=changed, host=host) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py index 41bd489b47f..d957fbcfb34 100644 --- a/plugins/modules/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -107,40 +107,40 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def hostgroup_find(self, name): - return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="hostgroup_find", name=None, item={"all": True, "cn": name}) def hostgroup_add(self, name, item): - return self._post_json(method='hostgroup_add', name=name, item=item) + return self._post_json(method="hostgroup_add", name=name, item=item) def hostgroup_mod(self, name, item): - return self._post_json(method='hostgroup_mod', name=name, item=item) + return self._post_json(method="hostgroup_mod", name=name, item=item) def hostgroup_del(self, name): - return self._post_json(method='hostgroup_del', name=name) + return self._post_json(method="hostgroup_del", name=name) def hostgroup_add_member(self, name, item): - return self._post_json(method='hostgroup_add_member', name=name, item=item) + return self._post_json(method="hostgroup_add_member", name=name, item=item) def hostgroup_add_host(self, name, item): - return self.hostgroup_add_member(name=name, item={'host': item}) + return self.hostgroup_add_member(name=name, item={"host": item}) def hostgroup_add_hostgroup(self, name, item): - return self.hostgroup_add_member(name=name, item={'hostgroup': item}) + return self.hostgroup_add_member(name=name, item={"hostgroup": item}) def hostgroup_remove_member(self, name, item): - return self._post_json(method='hostgroup_remove_member', name=name, item=item) + return self._post_json(method="hostgroup_remove_member", name=name, item=item) def hostgroup_remove_host(self, name, item): - return self.hostgroup_remove_member(name=name, item={'host': item}) + return self.hostgroup_remove_member(name=name, item={"host": item}) def hostgroup_remove_hostgroup(self, name, item): - return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) + return self.hostgroup_remove_member(name=name, item={"hostgroup": item}) def get_hostgroup_dict(description=None): data = {} if description is not None: - data['description'] = description + data["description"] = description return data @@ -149,17 +149,17 @@ def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - append = module.params['append'] + name = module.params["cn"] + state = module.params["state"] + host = module.params["host"] + hostgroup = module.params["hostgroup"] + append = module.params["append"] ipa_hostgroup = client.hostgroup_find(name=name) - module_hostgroup = get_hostgroup_dict(description=module.params['description']) + module_hostgroup = get_hostgroup_dict(description=module.params["description"]) changed = False - if state in ['present', 'enabled']: + if state in ["present", "enabled"]: if not ipa_hostgroup: changed = True if not module.check_mode: @@ -175,18 +175,30 @@ def ensure(module, client): client.hostgroup_mod(name=name, item=data) if host is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), - [item.lower() for item in host], - client.hostgroup_add_host, - client.hostgroup_remove_host, - append=append) or changed + changed = ( + client.modify_if_diff( + name, + ipa_hostgroup.get("member_host", []), + [item.lower() for item in host], + client.hostgroup_add_host, + client.hostgroup_remove_host, + append=append, + ) + or changed + ) if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), - [item.lower() for item in hostgroup], - client.hostgroup_add_hostgroup, - client.hostgroup_remove_hostgroup, - append=append) or changed + changed = ( + client.modify_if_diff( + name, + ipa_hostgroup.get("member_hostgroup", []), + [item.lower() for item in hostgroup], + client.hostgroup_add_hostgroup, + client.hostgroup_remove_hostgroup, + append=append, + ) + or changed + ) else: if ipa_hostgroup: @@ -199,29 +211,31 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - append=dict(type='bool', default=False)) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = HostGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + host=dict(type="list", elements="str"), + hostgroup=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + append=dict(type="bool", default=False), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = HostGroupIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, hostgroup = ensure(module, client) module.exit_json(changed=changed, hostgroup=hostgroup) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py index 6956497fb02..05e8b5009cf 100644 --- a/plugins/modules/ipa_otpconfig.py +++ b/plugins/modules/ipa_otpconfig.py @@ -90,24 +90,24 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def otpconfig_show(self): - return self._post_json(method='otpconfig_show', name=None) + return self._post_json(method="otpconfig_show", name=None) def otpconfig_mod(self, name, item): - return self._post_json(method='otpconfig_mod', name=name, item=item) + return self._post_json(method="otpconfig_mod", name=name, item=item) -def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, - ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): - +def get_otpconfig_dict( + ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None +): config = {} if ipatokentotpauthwindow is not None: - config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) + config["ipatokentotpauthwindow"] = str(ipatokentotpauthwindow) if ipatokentotpsyncwindow is not None: - config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) + config["ipatokentotpsyncwindow"] = str(ipatokentotpsyncwindow) if ipatokenhotpauthwindow is not None: - config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) + config["ipatokenhotpauthwindow"] = str(ipatokenhotpauthwindow) if ipatokenhotpsyncwindow is not None: - config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) + config["ipatokenhotpsyncwindow"] = str(ipatokenhotpsyncwindow) return config @@ -118,10 +118,10 @@ def get_otpconfig_diff(client, ipa_config, module_config): def ensure(module, client): module_otpconfig = get_otpconfig_dict( - ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), - ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), - ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), - ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), + ipatokentotpauthwindow=module.params.get("ipatokentotpauthwindow"), + ipatokentotpsyncwindow=module.params.get("ipatokentotpsyncwindow"), + ipatokenhotpauthwindow=module.params.get("ipatokenhotpauthwindow"), + ipatokenhotpsyncwindow=module.params.get("ipatokenhotpsyncwindow"), ) ipa_otpconfig = client.otpconfig_show() diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) @@ -142,29 +142,23 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() argument_spec.update( - ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), - ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), - ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), - ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), + ipatokentotpauthwindow=dict(type="int", aliases=["totpauthwindow"], no_log=False), + ipatokentotpsyncwindow=dict(type="int", aliases=["totpsyncwindow"], no_log=False), + ipatokenhotpauthwindow=dict(type="int", aliases=["hotpauthwindow"], no_log=False), + ipatokenhotpsyncwindow=dict(type="int", aliases=["hotpsyncwindow"], no_log=False), ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) client = OTPConfigIPAClient( module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot'] + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], ) try: - client.login( - username=module.params['ipa_user'], - password=module.params['ipa_pass'] - ) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, otpconfig = ensure(module, client) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) @@ -172,5 +166,5 @@ def main(): module.exit_json(changed=changed, otpconfig=otpconfig) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py index ab697176f38..45d12a57f40 100644 --- a/plugins/modules/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -181,46 +181,63 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def otptoken_find(self, name): - return self._post_json(method='otptoken_find', name=None, item={'all': True, - 'ipatokenuniqueid': name, - 'timelimit': '0', - 'sizelimit': '0'}) + return self._post_json( + method="otptoken_find", + name=None, + item={"all": True, "ipatokenuniqueid": name, "timelimit": "0", "sizelimit": "0"}, + ) def otptoken_add(self, name, item): - return self._post_json(method='otptoken_add', name=name, item=item) + return self._post_json(method="otptoken_add", name=name, item=item) def otptoken_mod(self, name, item): - return self._post_json(method='otptoken_mod', name=name, item=item) + return self._post_json(method="otptoken_mod", name=name, item=item) def otptoken_del(self, name): - return self._post_json(method='otptoken_del', name=name) + return self._post_json(method="otptoken_del", name=name) def base64_to_base32(base64_string): """Converts base64 string to base32 string""" - b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') + b32_string = base64.b32encode(base64.b64decode(base64_string)).decode("ascii") return b32_string def base32_to_base64(base32_string): """Converts base32 string to base64 string""" - b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') + b64_string = base64.b64encode(base64.b32decode(base32_string)).decode("ascii") return b64_string -def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, - enabled=None, notbefore=None, notafter=None, vendor=None, - model=None, serial=None, algorithm=None, digits=None, offset=None, - interval=None, counter=None): +def get_otptoken_dict( + ansible_to_ipa, + uniqueid=None, + newuniqueid=None, + otptype=None, + secretkey=None, + description=None, + owner=None, + enabled=None, + notbefore=None, + notafter=None, + vendor=None, + model=None, + serial=None, + algorithm=None, + digits=None, + offset=None, + interval=None, + counter=None, +): """Create the dictionary of settings passed in""" otptoken = {} if uniqueid is not None: - otptoken[ansible_to_ipa['uniqueid']] = uniqueid + otptoken[ansible_to_ipa["uniqueid"]] = uniqueid if newuniqueid is not None: - otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid + otptoken[ansible_to_ipa["newuniqueid"]] = newuniqueid if otptype is not None: - otptoken[ansible_to_ipa['otptype']] = otptype.upper() + otptoken[ansible_to_ipa["otptype"]] = otptype.upper() if secretkey is not None: # For some unknown reason, while IPA returns the secret in base64, # it wants the secret passed in as base32. This makes it more difficult @@ -229,42 +246,42 @@ def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=N # in a different way than if it was passed in as a parameter. For # these reasons, have the module standardize on base64 input (as parameter) # and output (from IPA). - otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) + otptoken[ansible_to_ipa["secretkey"]] = base64_to_base32(secretkey) if description is not None: - otptoken[ansible_to_ipa['description']] = description + otptoken[ansible_to_ipa["description"]] = description if owner is not None: - otptoken[ansible_to_ipa['owner']] = owner + otptoken[ansible_to_ipa["owner"]] = owner if enabled is not None: - otptoken[ansible_to_ipa['enabled']] = False if enabled else True + otptoken[ansible_to_ipa["enabled"]] = False if enabled else True if notbefore is not None: - otptoken[ansible_to_ipa['notbefore']] = f"{notbefore}Z" + otptoken[ansible_to_ipa["notbefore"]] = f"{notbefore}Z" if notafter is not None: - otptoken[ansible_to_ipa['notafter']] = f"{notafter}Z" + otptoken[ansible_to_ipa["notafter"]] = f"{notafter}Z" if vendor is not None: - otptoken[ansible_to_ipa['vendor']] = vendor + otptoken[ansible_to_ipa["vendor"]] = vendor if model is not None: - otptoken[ansible_to_ipa['model']] = model + otptoken[ansible_to_ipa["model"]] = model if serial is not None: - otptoken[ansible_to_ipa['serial']] = serial + otptoken[ansible_to_ipa["serial"]] = serial if algorithm is not None: - otptoken[ansible_to_ipa['algorithm']] = algorithm + otptoken[ansible_to_ipa["algorithm"]] = algorithm if digits is not None: - otptoken[ansible_to_ipa['digits']] = str(digits) + otptoken[ansible_to_ipa["digits"]] = str(digits) if offset is not None: - otptoken[ansible_to_ipa['offset']] = str(offset) + otptoken[ansible_to_ipa["offset"]] = str(offset) if interval is not None: - otptoken[ansible_to_ipa['interval']] = str(interval) + otptoken[ansible_to_ipa["interval"]] = str(interval) if counter is not None: - otptoken[ansible_to_ipa['counter']] = str(counter) + otptoken[ansible_to_ipa["counter"]] = str(counter) return otptoken def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): """Transform the output received by IPA to a format more friendly - before it is returned to the user. IPA returns even simple - strings as a list of strings. It also returns bools and - int as string. This function cleans that up before return. + before it is returned to the user. IPA returns even simple + strings as a list of strings. It also returns bools and + int as string. This function cleans that up before return. """ updated_otptoken = ipa_otptoken @@ -284,39 +301,38 @@ def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): for ansible_parameter in ansible_to_ipa: if ansible_parameter in updated_otptoken: if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: - if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: + if ansible_parameter in ["digits", "offset", "interval", "counter"]: updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) - elif ansible_parameter == 'enabled': + elif ansible_parameter == "enabled": updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) else: updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] - if 'secretkey' in updated_otptoken: - if isinstance(updated_otptoken['secretkey'], dict): - if '__base64__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) - b64key = updated_otptoken['secretkey']['__base64__'] - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key + if "secretkey" in updated_otptoken: + if isinstance(updated_otptoken["secretkey"], dict): + if "__base64__" in updated_otptoken["secretkey"]: + sanitize_strings.add(updated_otptoken["secretkey"]["__base64__"]) + b64key = updated_otptoken["secretkey"]["__base64__"] + updated_otptoken.pop("secretkey") + updated_otptoken["secretkey"] = b64key sanitize_strings.add(b64key) - elif '__base32__' in updated_otptoken['secretkey']: - sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) - b32key = updated_otptoken['secretkey']['__base32__'] + elif "__base32__" in updated_otptoken["secretkey"]: + sanitize_strings.add(updated_otptoken["secretkey"]["__base32__"]) + b32key = updated_otptoken["secretkey"]["__base32__"] b64key = base32_to_base64(b32key) - updated_otptoken.pop('secretkey') - updated_otptoken['secretkey'] = b64key + updated_otptoken.pop("secretkey") + updated_otptoken["secretkey"] = b64key sanitize_strings.add(b32key) sanitize_strings.add(b64key) return updated_otptoken, sanitize_strings -def validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation): +def validate_modifications(ansible_to_ipa, module, ipa_otptoken, module_otptoken, unmodifiable_after_creation): """Checks to see if the requested modifications are valid. Some elements - cannot be modified after initial creation. However, we still want to - validate arguments that are specified, but are not different than what - is currently set on the server. + cannot be modified after initial creation. However, we still want to + validate arguments that are specified, but are not different than what + is currently set on the server. """ modifications_valid = True @@ -329,12 +345,14 @@ def validate_modifications(ansible_to_ipa, module, ipa_otptoken, # values in a list, even though passing them in a list (even of # length 1) will be rejected. The module values for all elements # other than type (totp or hotp) have this happen. - if parameter == 'otptype': + if parameter == "otptype": ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] else: if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: - module.fail_json(msg="Invariant fail: Return value from IPA is not a list of length 1. Please open a bug report for the module.") - if parameter == 'secretkey': + module.fail_json( + msg="Invariant fail: Return value from IPA is not a list of length 1. Please open a bug report for the module." + ) + if parameter == "secretkey": # We stored the secret key in base32 since we had assumed that would need to # be the format if we were contacting IPA to create it. However, we are # now comparing it against what is already set in the IPA server, so convert @@ -343,11 +361,11 @@ def validate_modifications(ansible_to_ipa, module, ipa_otptoken, # For the secret key, it is even more specific in that the key is returned # in a dict, in the list, as the __base64__ entry for the IPA response. - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] - elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: - b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]["__base64__"] + if "__base64__" in ipa_otptoken[ansible_to_ipa[parameter]][0]: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]["__base64__"] + elif "__base32__" in ipa_otptoken[ansible_to_ipa[parameter]][0]: + b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]["__base32__"] b64key = base32_to_base64(b32key) ipa_value = b64key else: @@ -357,9 +375,11 @@ def validate_modifications(ansible_to_ipa, module, ipa_otptoken, if mod_value != ipa_value: modifications_valid = False - fail_message = (f"Parameter '{parameter}' cannot be changed once " + - f"the OTP is created and the requested value specified here ({mod_value}) " + - f"differs from what is set in the IPA server ({ipa_value})") + fail_message = ( + f"Parameter '{parameter}' cannot be changed once " + + f"the OTP is created and the requested value specified here ({mod_value}) " + + f"differs from what is set in the IPA server ({ipa_value})" + ) module.fail_json(msg=fail_message) return modifications_valid @@ -368,84 +388,92 @@ def validate_modifications(ansible_to_ipa, module, ipa_otptoken, def ensure(module, client): # dict to map from ansible parameter names to attribute names # used by IPA (which are not so friendly). - ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', - 'newuniqueid': 'rename', - 'otptype': 'type', - 'secretkey': 'ipatokenotpkey', - 'description': 'description', - 'owner': 'ipatokenowner', - 'enabled': 'ipatokendisabled', - 'notbefore': 'ipatokennotbefore', - 'notafter': 'ipatokennotafter', - 'vendor': 'ipatokenvendor', - 'model': 'ipatokenmodel', - 'serial': 'ipatokenserial', - 'algorithm': 'ipatokenotpalgorithm', - 'digits': 'ipatokenotpdigits', - 'offset': 'ipatokentotpclockoffset', - 'interval': 'ipatokentotptimestep', - 'counter': 'ipatokenhotpcounter'} + ansible_to_ipa = { + "uniqueid": "ipatokenuniqueid", + "newuniqueid": "rename", + "otptype": "type", + "secretkey": "ipatokenotpkey", + "description": "description", + "owner": "ipatokenowner", + "enabled": "ipatokendisabled", + "notbefore": "ipatokennotbefore", + "notafter": "ipatokennotafter", + "vendor": "ipatokenvendor", + "model": "ipatokenmodel", + "serial": "ipatokenserial", + "algorithm": "ipatokenotpalgorithm", + "digits": "ipatokenotpdigits", + "offset": "ipatokentotpclockoffset", + "interval": "ipatokentotptimestep", + "counter": "ipatokenhotpcounter", + } # Create inverse dictionary for mapping return values ipa_to_ansible = {v: k for k, v in ansible_to_ipa.items()} - unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', - 'digits', 'offset', 'interval', 'counter'] - state = module.params['state'] - uniqueid = module.params['uniqueid'] - - module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, - uniqueid=module.params.get('uniqueid'), - newuniqueid=module.params.get('newuniqueid'), - otptype=module.params.get('otptype'), - secretkey=module.params.get('secretkey'), - description=module.params.get('description'), - owner=module.params.get('owner'), - enabled=module.params.get('enabled'), - notbefore=module.params.get('notbefore'), - notafter=module.params.get('notafter'), - vendor=module.params.get('vendor'), - model=module.params.get('model'), - serial=module.params.get('serial'), - algorithm=module.params.get('algorithm'), - digits=module.params.get('digits'), - offset=module.params.get('offset'), - interval=module.params.get('interval'), - counter=module.params.get('counter')) + unmodifiable_after_creation = ["otptype", "secretkey", "algorithm", "digits", "offset", "interval", "counter"] + state = module.params["state"] + uniqueid = module.params["uniqueid"] + + module_otptoken = get_otptoken_dict( + ansible_to_ipa=ansible_to_ipa, + uniqueid=module.params.get("uniqueid"), + newuniqueid=module.params.get("newuniqueid"), + otptype=module.params.get("otptype"), + secretkey=module.params.get("secretkey"), + description=module.params.get("description"), + owner=module.params.get("owner"), + enabled=module.params.get("enabled"), + notbefore=module.params.get("notbefore"), + notafter=module.params.get("notafter"), + vendor=module.params.get("vendor"), + model=module.params.get("model"), + serial=module.params.get("serial"), + algorithm=module.params.get("algorithm"), + digits=module.params.get("digits"), + offset=module.params.get("offset"), + interval=module.params.get("interval"), + counter=module.params.get("counter"), + ) ipa_otptoken = client.otptoken_find(name=uniqueid) - if ansible_to_ipa['newuniqueid'] in module_otptoken: + if ansible_to_ipa["newuniqueid"] in module_otptoken: # Check to see if the new unique id is already taken in use - ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) + ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa["newuniqueid"]]) if ipa_otptoken_new: - module.fail_json(msg=(f"Requested rename through newuniqueid to {module_otptoken[ansible_to_ipa['newuniqueid']]} " - "failed because the new unique id is already in use")) + module.fail_json( + msg=( + f"Requested rename through newuniqueid to {module_otptoken[ansible_to_ipa['newuniqueid']]} " + "failed because the new unique id is already in use" + ) + ) changed = False - if state == 'present': + if state == "present": if not ipa_otptoken: changed = True if not module.check_mode: # It would not make sense to have a rename after creation, so if the user # specified a newuniqueid, just replace the uniqueid with the updated one # before creation - if ansible_to_ipa['newuniqueid'] in module_otptoken: - module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] - uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] - module_otptoken.pop(ansible_to_ipa['newuniqueid']) + if ansible_to_ipa["newuniqueid"] in module_otptoken: + module_otptoken[ansible_to_ipa["uniqueid"]] = module_otptoken[ansible_to_ipa["newuniqueid"]] + uniqueid = module_otptoken[ansible_to_ipa["newuniqueid"]] + module_otptoken.pop(ansible_to_ipa["newuniqueid"]) # IPA wants the unique id in the first position and not as a key/value pair. # Get rid of it from the otptoken dict and just specify it in the name field # for otptoken_add. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) + if ansible_to_ipa["uniqueid"] in module_otptoken: + module_otptoken.pop(ansible_to_ipa["uniqueid"]) - module_otptoken['all'] = True + module_otptoken["all"] = True ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) else: - if not validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation): + if not validate_modifications( + ansible_to_ipa, module, ipa_otptoken, module_otptoken, unmodifiable_after_creation + ): module.fail_json(msg="Modifications requested in module are not valid") # IPA will reject 'modifications' that do not actually modify anything @@ -460,14 +488,13 @@ def ensure(module, client): if len(diff) > 0: changed = True if not module.check_mode: - # IPA wants the unique id in the first position and not as a key/value pair. # Get rid of it from the otptoken dict and just specify it in the name field # for otptoken_mod. - if ansible_to_ipa['uniqueid'] in module_otptoken: - module_otptoken.pop(ansible_to_ipa['uniqueid']) + if ansible_to_ipa["uniqueid"] in module_otptoken: + module_otptoken.pop(ansible_to_ipa["uniqueid"]) - module_otptoken['all'] = True + module_otptoken["all"] = True ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) else: if ipa_otptoken: @@ -485,36 +512,38 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), - newuniqueid=dict(type='str'), - otptype=dict(type='str', choices=['totp', 'hotp']), - secretkey=dict(type='str', no_log=True), - description=dict(type='str'), - owner=dict(type='str'), - enabled=dict(type='bool', default=True), - notbefore=dict(type='str'), - notafter=dict(type='str'), - vendor=dict(type='str'), - model=dict(type='str'), - serial=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), - digits=dict(type='int', choices=[6, 8]), - offset=dict(type='int'), - interval=dict(type='int'), - counter=dict(type='int')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = OTPTokenIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + uniqueid=dict(type="str", aliases=["name"], required=True), + newuniqueid=dict(type="str"), + otptype=dict(type="str", choices=["totp", "hotp"]), + secretkey=dict(type="str", no_log=True), + description=dict(type="str"), + owner=dict(type="str"), + enabled=dict(type="bool", default=True), + notbefore=dict(type="str"), + notafter=dict(type="str"), + vendor=dict(type="str"), + model=dict(type="str"), + serial=dict(type="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), + algorithm=dict(type="str", choices=["sha1", "sha256", "sha384", "sha512"]), + digits=dict(type="int", choices=[6, 8]), + offset=dict(type="int"), + interval=dict(type="int"), + counter=dict(type="int"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = OTPTokenIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, otptoken = ensure(module, client) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) @@ -522,5 +551,5 @@ def main(): module.exit_json(changed=changed, otptoken=otptoken) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py index 796b921dbe0..0be1413240a 100644 --- a/plugins/modules/ipa_pwpolicy.py +++ b/plugins/modules/ipa_pwpolicy.py @@ -162,7 +162,8 @@ class PwPolicyIPAClient(IPAClient): - '''The global policy will be selected when `name` is `None`''' + """The global policy will be selected when `name` is `None`""" + def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) @@ -170,41 +171,54 @@ def pwpolicy_find(self, name): if name is None: # Manually set the cn to the global policy because pwpolicy_find will return a random # different policy if cn is `None` - name = 'global_policy' - return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) + name = "global_policy" + return self._post_json(method="pwpolicy_find", name=None, item={"all": True, "cn": name}) def pwpolicy_add(self, name, item): - return self._post_json(method='pwpolicy_add', name=name, item=item) + return self._post_json(method="pwpolicy_add", name=name, item=item) def pwpolicy_mod(self, name, item): - return self._post_json(method='pwpolicy_mod', name=name, item=item) + return self._post_json(method="pwpolicy_mod", name=name, item=item) def pwpolicy_del(self, name): - return self._post_json(method='pwpolicy_del', name=name) - - -def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, - minlength=None, priority=None, maxfailcount=None, failinterval=None, - lockouttime=None, gracelimit=None, maxrepeat=None, maxsequence=None, dictcheck=None, usercheck=None): + return self._post_json(method="pwpolicy_del", name=name) + + +def get_pwpolicy_dict( + maxpwdlife=None, + minpwdlife=None, + historylength=None, + minclasses=None, + minlength=None, + priority=None, + maxfailcount=None, + failinterval=None, + lockouttime=None, + gracelimit=None, + maxrepeat=None, + maxsequence=None, + dictcheck=None, + usercheck=None, +): pwpolicy = {} pwpolicy_options = { - 'krbmaxpwdlife': maxpwdlife, - 'krbminpwdlife': minpwdlife, - 'krbpwdhistorylength': historylength, - 'krbpwdmindiffchars': minclasses, - 'krbpwdminlength': minlength, - 'cospriority': priority, - 'krbpwdmaxfailure': maxfailcount, - 'krbpwdfailurecountinterval': failinterval, - 'krbpwdlockoutduration': lockouttime, - 'passwordgracelimit': gracelimit, - 'ipapwdmaxrepeat': maxrepeat, - 'ipapwdmaxsequence': maxsequence, + "krbmaxpwdlife": maxpwdlife, + "krbminpwdlife": minpwdlife, + "krbpwdhistorylength": historylength, + "krbpwdmindiffchars": minclasses, + "krbpwdminlength": minlength, + "cospriority": priority, + "krbpwdmaxfailure": maxfailcount, + "krbpwdfailurecountinterval": failinterval, + "krbpwdlockoutduration": lockouttime, + "passwordgracelimit": gracelimit, + "ipapwdmaxrepeat": maxrepeat, + "ipapwdmaxsequence": maxsequence, } pwpolicy_boolean_options = { - 'ipapwddictcheck': dictcheck, - 'ipapwdusercheck': usercheck, + "ipapwddictcheck": dictcheck, + "ipapwdusercheck": usercheck, } for option, value in pwpolicy_options.items(): @@ -223,29 +237,30 @@ def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): def ensure(module, client): - state = module.params['state'] - name = module.params['group'] - - module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), - minpwdlife=module.params.get('minpwdlife'), - historylength=module.params.get('historylength'), - minclasses=module.params.get('minclasses'), - minlength=module.params.get('minlength'), - priority=module.params.get('priority'), - maxfailcount=module.params.get('maxfailcount'), - failinterval=module.params.get('failinterval'), - lockouttime=module.params.get('lockouttime'), - gracelimit=module.params.get('gracelimit'), - maxrepeat=module.params.get('maxrepeat'), - maxsequence=module.params.get('maxsequence'), - dictcheck=module.params.get('dictcheck'), - usercheck=module.params.get('usercheck'), - ) + state = module.params["state"] + name = module.params["group"] + + module_pwpolicy = get_pwpolicy_dict( + maxpwdlife=module.params.get("maxpwdlife"), + minpwdlife=module.params.get("minpwdlife"), + historylength=module.params.get("historylength"), + minclasses=module.params.get("minclasses"), + minlength=module.params.get("minlength"), + priority=module.params.get("priority"), + maxfailcount=module.params.get("maxfailcount"), + failinterval=module.params.get("failinterval"), + lockouttime=module.params.get("lockouttime"), + gracelimit=module.params.get("gracelimit"), + maxrepeat=module.params.get("maxrepeat"), + maxsequence=module.params.get("maxsequence"), + dictcheck=module.params.get("dictcheck"), + usercheck=module.params.get("usercheck"), + ) ipa_pwpolicy = client.pwpolicy_find(name=name) changed = False - if state == 'present': + if state == "present": if not ipa_pwpolicy: changed = True if not module.check_mode: @@ -267,35 +282,36 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(group=dict(type='str', aliases=['name']), - state=dict(type='str', default='present', choices=['present', 'absent']), - maxpwdlife=dict(type='str'), - minpwdlife=dict(type='str'), - historylength=dict(type='str'), - minclasses=dict(type='str'), - minlength=dict(type='str'), - priority=dict(type='str'), - maxfailcount=dict(type='str'), - failinterval=dict(type='str'), - lockouttime=dict(type='str'), - gracelimit=dict(type='int'), - maxrepeat=dict(type='int'), - maxsequence=dict(type='int'), - dictcheck=dict(type='bool'), - usercheck=dict(type='bool'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = PwPolicyIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + group=dict(type="str", aliases=["name"]), + state=dict(type="str", default="present", choices=["present", "absent"]), + maxpwdlife=dict(type="str"), + minpwdlife=dict(type="str"), + historylength=dict(type="str"), + minclasses=dict(type="str"), + minlength=dict(type="str"), + priority=dict(type="str"), + maxfailcount=dict(type="str"), + failinterval=dict(type="str"), + lockouttime=dict(type="str"), + gracelimit=dict(type="int"), + maxrepeat=dict(type="int"), + maxsequence=dict(type="int"), + dictcheck=dict(type="bool"), + usercheck=dict(type="bool"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = PwPolicyIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, pwpolicy = ensure(module, client) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) @@ -303,5 +319,5 @@ def main(): module.exit_json(changed=changed, pwpolicy=pwpolicy) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py index 8730e1156e9..9038f89a1b4 100644 --- a/plugins/modules/ipa_role.py +++ b/plugins/modules/ipa_role.py @@ -143,64 +143,64 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def role_find(self, name): - return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="role_find", name=None, item={"all": True, "cn": name}) def role_add(self, name, item): - return self._post_json(method='role_add', name=name, item=item) + return self._post_json(method="role_add", name=name, item=item) def role_mod(self, name, item): - return self._post_json(method='role_mod', name=name, item=item) + return self._post_json(method="role_mod", name=name, item=item) def role_del(self, name): - return self._post_json(method='role_del', name=name) + return self._post_json(method="role_del", name=name) def role_add_member(self, name, item): - return self._post_json(method='role_add_member', name=name, item=item) + return self._post_json(method="role_add_member", name=name, item=item) def role_add_group(self, name, item): - return self.role_add_member(name=name, item={'group': item}) + return self.role_add_member(name=name, item={"group": item}) def role_add_host(self, name, item): - return self.role_add_member(name=name, item={'host': item}) + return self.role_add_member(name=name, item={"host": item}) def role_add_hostgroup(self, name, item): - return self.role_add_member(name=name, item={'hostgroup': item}) + return self.role_add_member(name=name, item={"hostgroup": item}) def role_add_service(self, name, item): - return self.role_add_member(name=name, item={'service': item}) + return self.role_add_member(name=name, item={"service": item}) def role_add_user(self, name, item): - return self.role_add_member(name=name, item={'user': item}) + return self.role_add_member(name=name, item={"user": item}) def role_remove_member(self, name, item): - return self._post_json(method='role_remove_member', name=name, item=item) + return self._post_json(method="role_remove_member", name=name, item=item) def role_remove_group(self, name, item): - return self.role_remove_member(name=name, item={'group': item}) + return self.role_remove_member(name=name, item={"group": item}) def role_remove_host(self, name, item): - return self.role_remove_member(name=name, item={'host': item}) + return self.role_remove_member(name=name, item={"host": item}) def role_remove_hostgroup(self, name, item): - return self.role_remove_member(name=name, item={'hostgroup': item}) + return self.role_remove_member(name=name, item={"hostgroup": item}) def role_remove_service(self, name, item): - return self.role_remove_member(name=name, item={'service': item}) + return self.role_remove_member(name=name, item={"service": item}) def role_remove_user(self, name, item): - return self.role_remove_member(name=name, item={'user': item}) + return self.role_remove_member(name=name, item={"user": item}) def role_add_privilege(self, name, item): - return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) + return self._post_json(method="role_add_privilege", name=name, item={"privilege": item}) def role_remove_privilege(self, name, item): - return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) + return self._post_json(method="role_remove_privilege", name=name, item={"privilege": item}) def get_role_dict(description=None): data = {} if description is not None: - data['description'] = description + data["description"] = description return data @@ -209,20 +209,20 @@ def get_role_diff(client, ipa_role, module_role): def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - group = module.params['group'] - host = module.params['host'] - hostgroup = module.params['hostgroup'] - privilege = module.params['privilege'] - service = module.params['service'] - user = module.params['user'] - - module_role = get_role_dict(description=module.params['description']) + state = module.params["state"] + name = module.params["cn"] + group = module.params["group"] + host = module.params["host"] + hostgroup = module.params["hostgroup"] + privilege = module.params["privilege"] + service = module.params["service"] + user = module.params["user"] + + module_role = get_role_dict(description=module.params["description"]) ipa_role = client.role_find(name=name) changed = False - if state == 'present': + if state == "present": if not ipa_role: changed = True if not module.check_mode: @@ -238,31 +238,61 @@ def ensure(module, client): client.role_mod(name=name, item=data) if group is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, - client.role_add_group, - client.role_remove_group) or changed + changed = ( + client.modify_if_diff( + name, ipa_role.get("member_group", []), group, client.role_add_group, client.role_remove_group + ) + or changed + ) if host is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, - client.role_add_host, - client.role_remove_host) or changed + changed = ( + client.modify_if_diff( + name, ipa_role.get("member_host", []), host, client.role_add_host, client.role_remove_host + ) + or changed + ) if hostgroup is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, - client.role_add_hostgroup, - client.role_remove_hostgroup) or changed + changed = ( + client.modify_if_diff( + name, + ipa_role.get("member_hostgroup", []), + hostgroup, + client.role_add_hostgroup, + client.role_remove_hostgroup, + ) + or changed + ) if privilege is not None: - changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, - client.role_add_privilege, - client.role_remove_privilege) or changed + changed = ( + client.modify_if_diff( + name, + ipa_role.get("memberof_privilege", []), + privilege, + client.role_add_privilege, + client.role_remove_privilege, + ) + or changed + ) if service is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, - client.role_add_service, - client.role_remove_service) or changed + changed = ( + client.modify_if_diff( + name, + ipa_role.get("member_service", []), + service, + client.role_add_service, + client.role_remove_service, + ) + or changed + ) if user is not None: - changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, - client.role_add_user, - client.role_remove_user) or changed + changed = ( + client.modify_if_diff( + name, ipa_role.get("member_user", []), user, client.role_add_user, client.role_remove_user + ) + or changed + ) else: if ipa_role: @@ -275,32 +305,34 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - group=dict(type='list', elements='str'), - host=dict(type='list', elements='str'), - hostgroup=dict(type='list', elements='str'), - privilege=dict(type='list', elements='str'), - service=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent']), - user=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = RoleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + group=dict(type="list", elements="str"), + host=dict(type="list", elements="str"), + hostgroup=dict(type="list", elements="str"), + privilege=dict(type="list", elements="str"), + service=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent"]), + user=dict(type="list", elements="str"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = RoleIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, role = ensure(module, client) module.exit_json(changed=changed, role=role) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py index b721889b286..71ad06d473d 100644 --- a/plugins/modules/ipa_service.py +++ b/plugins/modules/ipa_service.py @@ -102,40 +102,40 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def service_find(self, name): - return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) + return self._post_json(method="service_find", name=None, item={"all": True, "krbcanonicalname": name}) def service_add(self, name, service): - return self._post_json(method='service_add', name=name, item=service) + return self._post_json(method="service_add", name=name, item=service) def service_mod(self, name, service): - return self._post_json(method='service_mod', name=name, item=service) + return self._post_json(method="service_mod", name=name, item=service) def service_del(self, name): - return self._post_json(method='service_del', name=name) + return self._post_json(method="service_del", name=name) def service_disable(self, name): - return self._post_json(method='service_disable', name=name) + return self._post_json(method="service_disable", name=name) def service_add_host(self, name, item): - return self._post_json(method='service_add_host', name=name, item={'host': item}) + return self._post_json(method="service_add_host", name=name, item={"host": item}) def service_remove_host(self, name, item): - return self._post_json(method='service_remove_host', name=name, item={'host': item}) + return self._post_json(method="service_remove_host", name=name, item={"host": item}) def get_service_dict(force=None, krbcanonicalname=None, skip_host_check=None): data = {} if force is not None: - data['force'] = force + data["force"] = force if krbcanonicalname is not None: - data['krbcanonicalname'] = krbcanonicalname + data["krbcanonicalname"] = krbcanonicalname if skip_host_check is not None: - data['skip_host_check'] = skip_host_check + data["skip_host_check"] = skip_host_check return data def get_service_diff(client, ipa_host, module_service): - non_updateable_keys = ['force', 'krbcanonicalname', 'skip_host_check'] + non_updateable_keys = ["force", "krbcanonicalname", "skip_host_check"] for key in non_updateable_keys: if key in module_service: del module_service[key] @@ -144,14 +144,14 @@ def get_service_diff(client, ipa_host, module_service): def ensure(module, client): - name = module.params['krbcanonicalname'] - state = module.params['state'] - hosts = module.params['hosts'] + name = module.params["krbcanonicalname"] + state = module.params["state"] + hosts = module.params["hosts"] ipa_service = client.service_find(name=name) - module_service = get_service_dict(force=module.params['force'], skip_host_check=module.params['skip_host_check']) + module_service = get_service_dict(force=module.params["force"], skip_host_check=module.params["skip_host_check"]) changed = False - if state in ['present', 'enabled', 'disabled']: + if state in ["present", "enabled", "disabled"]: if not ipa_service: changed = True if not module.check_mode: @@ -166,14 +166,14 @@ def ensure(module, client): data[key] = module_service.get(key) client.service_mod(name=name, service=data) if hosts is not None: - if 'managedby_host' in ipa_service: - for host in ipa_service['managedby_host']: + if "managedby_host" in ipa_service: + for host in ipa_service["managedby_host"]: if host not in hosts: if not module.check_mode: client.service_remove_host(name=name, item=host) changed = True for host in hosts: - if host not in ipa_service['managedby_host']: + if host not in ipa_service["managedby_host"]: if not module.check_mode: client.service_add_host(name=name, item=host) changed = True @@ -195,29 +195,29 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() argument_spec.update( - krbcanonicalname=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool'), - skip_host_check=dict(type='bool', default=False), - hosts=dict(type='list', elements='str'), - state=dict(type='str', default='present', - choices=['present', 'absent'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = ServiceIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + krbcanonicalname=dict(type="str", required=True, aliases=["name"]), + force=dict(type="bool"), + skip_host_check=dict(type="bool", default=False), + hosts=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent"]), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = ServiceIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, host = ensure(module, client) module.exit_json(changed=changed, host=host) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_subca.py b/plugins/modules/ipa_subca.py index f296acb97bc..f45edcf1199 100644 --- a/plugins/modules/ipa_subca.py +++ b/plugins/modules/ipa_subca.py @@ -90,14 +90,14 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def subca_find(self, subca_name): - return self._post_json(method='ca_find', name=subca_name, item=None) + return self._post_json(method="ca_find", name=subca_name, item=None) def subca_add(self, subca_name=None, subject_dn=None, details=None): item = dict(ipacasubjectdn=subject_dn) - subca_desc = details.get('description', None) + subca_desc = details.get("description", None) if subca_desc is not None: item.update(description=subca_desc) - return self._post_json(method='ca_add', name=subca_name, item=item) + return self._post_json(method="ca_add", name=subca_name, item=item) def subca_mod(self, subca_name=None, diff=None, details=None): item = get_subca_dict(details) @@ -105,24 +105,24 @@ def subca_mod(self, subca_name=None, diff=None, details=None): update_detail = dict() if item[change] is not None: update_detail.update(setattr=f"{change}={item[change]}") - self._post_json(method='ca_mod', name=subca_name, item=update_detail) + self._post_json(method="ca_mod", name=subca_name, item=update_detail) def subca_del(self, subca_name=None): - return self._post_json(method='ca_del', name=subca_name) + return self._post_json(method="ca_del", name=subca_name) def subca_disable(self, subca_name=None): - return self._post_json(method='ca_disable', name=subca_name) + return self._post_json(method="ca_disable", name=subca_name) def subca_enable(self, subca_name=None): - return self._post_json(method='ca_enable', name=subca_name) + return self._post_json(method="ca_enable", name=subca_name) def get_subca_dict(details=None): module_subca = dict() - if details['description'] is not None: - module_subca['description'] = details['description'] - if details['subca_subject'] is not None: - module_subca['ipacasubjectdn'] = details['subca_subject'] + if details["description"] is not None: + module_subca["description"] = details["description"] + if details["subca_subject"] is not None: + module_subca["ipacasubjectdn"] = details["subca_subject"] return module_subca @@ -132,18 +132,17 @@ def get_subca_diff(client, ipa_subca, module_subca): def ensure(module, client): - subca_name = module.params['subca_name'] - subca_subject_dn = module.params['subca_subject'] - subca_desc = module.params['subca_desc'] + subca_name = module.params["subca_name"] + subca_subject_dn = module.params["subca_subject"] + subca_desc = module.params["subca_desc"] - state = module.params['state'] + state = module.params["state"] ipa_subca = client.subca_find(subca_name) - module_subca = dict(description=subca_desc, - subca_subject=subca_subject_dn) + module_subca = dict(description=subca_desc, subca_subject=subca_subject_dn) changed = False - if state == 'present': + if state == "present": if not ipa_subca: changed = True if not module.check_mode: @@ -152,33 +151,37 @@ def ensure(module, client): diff = get_subca_diff(client, ipa_subca, module_subca) # IPA does not allow to modify Sub CA's subject DN # So skip it for now. - if 'ipacasubjectdn' in diff: - diff.remove('ipacasubjectdn') - del module_subca['subca_subject'] + if "ipacasubjectdn" in diff: + diff.remove("ipacasubjectdn") + del module_subca["subca_subject"] if len(diff) > 0: changed = True if not module.check_mode: client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) - elif state == 'absent': + elif state == "absent": if ipa_subca: changed = True if not module.check_mode: client.subca_del(subca_name=subca_name) - elif state == 'disable': + elif state == "disable": ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " - "version greater than 4.4.2") + if LooseVersion(ipa_version) < LooseVersion("4.4.2"): + module.fail_json( + msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " + "version greater than 4.4.2" + ) if ipa_subca: changed = True if not module.check_mode: client.subca_disable(subca_name=subca_name) - elif state == 'enable': + elif state == "enable": ipa_version = client.get_ipa_version() - if LooseVersion(ipa_version) < LooseVersion('4.4.2'): - module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " - "version greater than 4.4.2") + if LooseVersion(ipa_version) < LooseVersion("4.4.2"): + module.fail_json( + msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " + "version greater than 4.4.2" + ) if ipa_subca: changed = True if not module.check_mode: @@ -189,28 +192,32 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), - subca_subject=dict(type='str', required=True), - subca_desc=dict(type='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']),) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True,) - - client = SubCAIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + subca_name=dict(type="str", required=True, aliases=["name"]), + subca_subject=dict(type="str", required=True), + subca_desc=dict(type="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = SubCAIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, record = ensure(module, client) module.exit_json(changed=changed, record=record) except Exception as exc: module.fail_json(msg=to_native(exc)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py index ac2607bd7a7..deee41108f9 100644 --- a/plugins/modules/ipa_sudocmd.py +++ b/plugins/modules/ipa_sudocmd.py @@ -75,22 +75,22 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def sudocmd_find(self, name): - return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) + return self._post_json(method="sudocmd_find", name=None, item={"all": True, "sudocmd": name}) def sudocmd_add(self, name, item): - return self._post_json(method='sudocmd_add', name=name, item=item) + return self._post_json(method="sudocmd_add", name=name, item=item) def sudocmd_mod(self, name, item): - return self._post_json(method='sudocmd_mod', name=name, item=item) + return self._post_json(method="sudocmd_mod", name=name, item=item) def sudocmd_del(self, name): - return self._post_json(method='sudocmd_del', name=name) + return self._post_json(method="sudocmd_del", name=name) def get_sudocmd_dict(description=None): data = {} if description is not None: - data['description'] = description + data["description"] = description return data @@ -99,14 +99,14 @@ def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): def ensure(module, client): - name = module.params['sudocmd'] - state = module.params['state'] + name = module.params["sudocmd"] + state = module.params["state"] - module_sudocmd = get_sudocmd_dict(description=module.params['description']) + module_sudocmd = get_sudocmd_dict(description=module.params["description"]) ipa_sudocmd = client.sudocmd_find(name=name) changed = False - if state == 'present': + if state == "present": if not ipa_sudocmd: changed = True if not module.check_mode: @@ -131,25 +131,27 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='str', required=True, aliases=['name'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + description=dict(type="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + sudocmd=dict(type="str", required=True, aliases=["name"]), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = SudoCmdIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, sudocmd = ensure(module, client) module.exit_json(changed=changed, sudocmd=sudocmd) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py index 4ceb072f1bb..093c380e310 100644 --- a/plugins/modules/ipa_sudocmdgroup.py +++ b/plugins/modules/ipa_sudocmdgroup.py @@ -84,34 +84,34 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def sudocmdgroup_find(self, name): - return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="sudocmdgroup_find", name=None, item={"all": True, "cn": name}) def sudocmdgroup_add(self, name, item): - return self._post_json(method='sudocmdgroup_add', name=name, item=item) + return self._post_json(method="sudocmdgroup_add", name=name, item=item) def sudocmdgroup_mod(self, name, item): - return self._post_json(method='sudocmdgroup_mod', name=name, item=item) + return self._post_json(method="sudocmdgroup_mod", name=name, item=item) def sudocmdgroup_del(self, name): - return self._post_json(method='sudocmdgroup_del', name=name) + return self._post_json(method="sudocmdgroup_del", name=name) def sudocmdgroup_add_member(self, name, item): - return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) + return self._post_json(method="sudocmdgroup_add_member", name=name, item=item) def sudocmdgroup_add_member_sudocmd(self, name, item): - return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) + return self.sudocmdgroup_add_member(name=name, item={"sudocmd": item}) def sudocmdgroup_remove_member(self, name, item): - return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) + return self._post_json(method="sudocmdgroup_remove_member", name=name, item=item) def sudocmdgroup_remove_member_sudocmd(self, name, item): - return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) + return self.sudocmdgroup_remove_member(name=name, item={"sudocmd": item}) def get_sudocmdgroup_dict(description=None): data = {} if description is not None: - data['description'] = description + data["description"] = description return data @@ -120,15 +120,15 @@ def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): def ensure(module, client): - name = module.params['cn'] - state = module.params['state'] - sudocmd = module.params['sudocmd'] + name = module.params["cn"] + state = module.params["state"] + sudocmd = module.params["sudocmd"] - module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) + module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params["description"]) ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) changed = False - if state == 'present': + if state == "present": if not ipa_sudocmdgroup: changed = True if not module.check_mode: @@ -144,9 +144,13 @@ def ensure(module, client): client.sudocmdgroup_mod(name=name, item=data) if sudocmd is not None: - changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, - client.sudocmdgroup_add_member_sudocmd, - client.sudocmdgroup_remove_member_sudocmd) + changed = client.modify_if_diff( + name, + ipa_sudocmdgroup.get("member_sudocmd", []), + sudocmd, + client.sudocmdgroup_add_member_sudocmd, + client.sudocmdgroup_remove_member_sudocmd, + ) else: if ipa_sudocmdgroup: changed = True @@ -158,26 +162,28 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - sudocmd=dict(type='list', elements='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = SudoCmdGroupIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + sudocmd=dict(type="list", elements="str"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = SudoCmdGroupIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, sudocmdgroup = ensure(module, client) module.exit_json(changed=changed, sudorule=sudocmdgroup) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py index 7befb354bc1..990ea147dd1 100644 --- a/plugins/modules/ipa_sudorule.py +++ b/plugins/modules/ipa_sudorule.py @@ -208,159 +208,168 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def sudorule_find(self, name): - return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="sudorule_find", name=None, item={"all": True, "cn": name}) def sudorule_add(self, name, item): - return self._post_json(method='sudorule_add', name=name, item=item) + return self._post_json(method="sudorule_add", name=name, item=item) def sudorule_add_runasuser(self, name, item): - return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) + return self._post_json(method="sudorule_add_runasuser", name=name, item={"user": item}) def sudorule_remove_runasuser(self, name, item): - return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) + return self._post_json(method="sudorule_remove_runasuser", name=name, item={"user": item}) def sudorule_mod(self, name, item): - return self._post_json(method='sudorule_mod', name=name, item=item) + return self._post_json(method="sudorule_mod", name=name, item=item) def sudorule_del(self, name): - return self._post_json(method='sudorule_del', name=name) + return self._post_json(method="sudorule_del", name=name) def sudorule_add_option(self, name, item): - return self._post_json(method='sudorule_add_option', name=name, item=item) + return self._post_json(method="sudorule_add_option", name=name, item=item) def sudorule_add_option_ipasudoopt(self, name, item): - return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) + return self.sudorule_add_option(name=name, item={"ipasudoopt": item}) def sudorule_remove_option(self, name, item): - return self._post_json(method='sudorule_remove_option', name=name, item=item) + return self._post_json(method="sudorule_remove_option", name=name, item=item) def sudorule_remove_option_ipasudoopt(self, name, item): - return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) + return self.sudorule_remove_option(name=name, item={"ipasudoopt": item}) def sudorule_add_host(self, name, item): - return self._post_json(method='sudorule_add_host', name=name, item=item) + return self._post_json(method="sudorule_add_host", name=name, item=item) def sudorule_add_host_host(self, name, item): - return self.sudorule_add_host(name=name, item={'host': item}) + return self.sudorule_add_host(name=name, item={"host": item}) def sudorule_add_host_hostgroup(self, name, item): - return self.sudorule_add_host(name=name, item={'hostgroup': item}) + return self.sudorule_add_host(name=name, item={"hostgroup": item}) def sudorule_remove_host(self, name, item): - return self._post_json(method='sudorule_remove_host', name=name, item=item) + return self._post_json(method="sudorule_remove_host", name=name, item=item) def sudorule_remove_host_host(self, name, item): - return self.sudorule_remove_host(name=name, item={'host': item}) + return self.sudorule_remove_host(name=name, item={"host": item}) def sudorule_remove_host_hostgroup(self, name, item): - return self.sudorule_remove_host(name=name, item={'hostgroup': item}) + return self.sudorule_remove_host(name=name, item={"hostgroup": item}) def sudorule_add_allow_command(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) + return self._post_json(method="sudorule_add_allow_command", name=name, item={"sudocmd": item}) def sudorule_add_allow_command_group(self, name, item): - return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) + return self._post_json(method="sudorule_add_allow_command", name=name, item={"sudocmdgroup": item}) def sudorule_add_deny_command(self, name, item): - return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmd': item}) + return self._post_json(method="sudorule_add_deny_command", name=name, item={"sudocmd": item}) def sudorule_add_deny_command_group(self, name, item): - return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmdgroup': item}) + return self._post_json(method="sudorule_add_deny_command", name=name, item={"sudocmdgroup": item}) def sudorule_remove_allow_command(self, name, item): - return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) + return self._post_json(method="sudorule_remove_allow_command", name=name, item=item) def sudorule_add_user(self, name, item): - return self._post_json(method='sudorule_add_user', name=name, item=item) + return self._post_json(method="sudorule_add_user", name=name, item=item) def sudorule_add_user_user(self, name, item): - return self.sudorule_add_user(name=name, item={'user': item}) + return self.sudorule_add_user(name=name, item={"user": item}) def sudorule_add_user_group(self, name, item): - return self.sudorule_add_user(name=name, item={'group': item}) + return self.sudorule_add_user(name=name, item={"group": item}) def sudorule_remove_user(self, name, item): - return self._post_json(method='sudorule_remove_user', name=name, item=item) + return self._post_json(method="sudorule_remove_user", name=name, item=item) def sudorule_remove_user_user(self, name, item): - return self.sudorule_remove_user(name=name, item={'user': item}) + return self.sudorule_remove_user(name=name, item={"user": item}) def sudorule_remove_user_group(self, name, item): - return self.sudorule_remove_user(name=name, item={'group': item}) - - -def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, - runasgroupcategory=None, runasusercategory=None): + return self.sudorule_remove_user(name=name, item={"group": item}) + + +def get_sudorule_dict( + cmdcategory=None, + description=None, + hostcategory=None, + ipaenabledflag=None, + usercategory=None, + runasgroupcategory=None, + runasusercategory=None, +): data = {} if cmdcategory is not None: - data['cmdcategory'] = cmdcategory + data["cmdcategory"] = cmdcategory if description is not None: - data['description'] = description + data["description"] = description if hostcategory is not None: - data['hostcategory'] = hostcategory + data["hostcategory"] = hostcategory if ipaenabledflag is not None: - data['ipaenabledflag'] = ipaenabledflag + data["ipaenabledflag"] = ipaenabledflag if usercategory is not None: - data['usercategory'] = usercategory + data["usercategory"] = usercategory if runasusercategory is not None: - data['ipasudorunasusercategory'] = runasusercategory + data["ipasudorunasusercategory"] = runasusercategory if runasgroupcategory is not None: - data['ipasudorunasgroupcategory'] = runasgroupcategory + data["ipasudorunasgroupcategory"] = runasgroupcategory return data def category_changed(module, client, category_name, ipa_sudorule): - if ipa_sudorule.get(category_name, None) == ['all']: + if ipa_sudorule.get(category_name, None) == ["all"]: if not module.check_mode: # cn is returned as list even with only a single value. - client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) + client.sudorule_mod(name=ipa_sudorule.get("cn")[0], item={category_name: None}) return True return False def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - cmd = module.params['cmd'] - cmdgroup = module.params['cmdgroup'] - cmdcategory = module.params['cmdcategory'] - deny_cmd = module.params['deny_cmd'] - deny_cmdgroup = module.params['deny_cmdgroup'] - host = module.params['host'] - hostcategory = module.params['hostcategory'] - hostgroup = module.params['hostgroup'] - runasusercategory = module.params['runasusercategory'] - runasgroupcategory = module.params['runasgroupcategory'] - runasextusers = module.params['runasextusers'] + state = module.params["state"] + name = module.params["cn"] + cmd = module.params["cmd"] + cmdgroup = module.params["cmdgroup"] + cmdcategory = module.params["cmdcategory"] + deny_cmd = module.params["deny_cmd"] + deny_cmdgroup = module.params["deny_cmdgroup"] + host = module.params["host"] + hostcategory = module.params["hostcategory"] + hostgroup = module.params["hostgroup"] + runasusercategory = module.params["runasusercategory"] + runasgroupcategory = module.params["runasgroupcategory"] + runasextusers = module.params["runasextusers"] ipa_version = client.get_ipa_version() - if state in ['present', 'enabled']: - if LooseVersion(ipa_version) < LooseVersion('4.9.10'): - ipaenabledflag = 'TRUE' + if state in ["present", "enabled"]: + if LooseVersion(ipa_version) < LooseVersion("4.9.10"): + ipaenabledflag = "TRUE" else: ipaenabledflag = True else: - if LooseVersion(ipa_version) < LooseVersion('4.9.10'): - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion("4.9.10"): + ipaenabledflag = "FALSE" else: ipaenabledflag = False - sudoopt = module.params['sudoopt'] - user = module.params['user'] - usercategory = module.params['usercategory'] - usergroup = module.params['usergroup'] - - module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, - description=module.params['description'], - hostcategory=hostcategory, - ipaenabledflag=ipaenabledflag, - usercategory=usercategory, - runasusercategory=runasusercategory, - runasgroupcategory=runasgroupcategory) + sudoopt = module.params["sudoopt"] + user = module.params["user"] + usercategory = module.params["usercategory"] + usergroup = module.params["usergroup"] + + module_sudorule = get_sudorule_dict( + cmdcategory=cmdcategory, + description=module.params["description"], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + usercategory=usercategory, + runasusercategory=runasusercategory, + runasgroupcategory=runasgroupcategory, + ) ipa_sudorule = client.sudorule_find(name=name) changed = False - if state in ['present', 'disabled', 'enabled']: + if state in ["present", "disabled", "enabled"]: if not ipa_sudorule: changed = True if not module.check_mode: @@ -370,55 +379,70 @@ def ensure(module, client): if len(diff) > 0: changed = True if not module.check_mode: - if 'hostcategory' in diff: - if ipa_sudorule.get('memberhost_host', None) is not None: - client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) - if ipa_sudorule.get('memberhost_hostgroup', None) is not None: - client.sudorule_remove_host_hostgroup(name=name, - item=ipa_sudorule.get('memberhost_hostgroup')) + if "hostcategory" in diff: + if ipa_sudorule.get("memberhost_host", None) is not None: + client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get("memberhost_host")) + if ipa_sudorule.get("memberhost_hostgroup", None) is not None: + client.sudorule_remove_host_hostgroup( + name=name, item=ipa_sudorule.get("memberhost_hostgroup") + ) client.sudorule_mod(name=name, item=module_sudorule) if cmd is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + changed = category_changed(module, client, "cmdcategory", ipa_sudorule) or changed if not module.check_mode: client.sudorule_add_allow_command(name=name, item=cmd) if cmdgroup is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + changed = category_changed(module, client, "cmdcategory", ipa_sudorule) or changed if not module.check_mode: client.sudorule_add_allow_command_group(name=name, item=cmdgroup) if deny_cmd is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + changed = category_changed(module, client, "cmdcategory", ipa_sudorule) or changed if not module.check_mode: client.sudorule_add_deny_command(name=name, item=deny_cmd) if deny_cmdgroup is not None: - changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + changed = category_changed(module, client, "cmdcategory", ipa_sudorule) or changed if not module.check_mode: client.sudorule_add_deny_command_group(name=name, item=deny_cmdgroup) if runasusercategory is not None: - changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed + changed = category_changed(module, client, "iparunasusercategory", ipa_sudorule) or changed if runasgroupcategory is not None: - changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed + changed = category_changed(module, client, "iparunasgroupcategory", ipa_sudorule) or changed if host is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, - client.sudorule_add_host_host, - client.sudorule_remove_host_host) or changed + changed = category_changed(module, client, "hostcategory", ipa_sudorule) or changed + changed = ( + client.modify_if_diff( + name, + ipa_sudorule.get("memberhost_host", []), + host, + client.sudorule_add_host_host, + client.sudorule_remove_host_host, + ) + or changed + ) if hostgroup is not None: - changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, - client.sudorule_add_host_hostgroup, - client.sudorule_remove_host_hostgroup) or changed + changed = category_changed(module, client, "hostcategory", ipa_sudorule) or changed + changed = ( + client.modify_if_diff( + name, + ipa_sudorule.get("memberhost_hostgroup", []), + hostgroup, + client.sudorule_add_host_hostgroup, + client.sudorule_remove_host_hostgroup, + ) + or changed + ) if sudoopt is not None: # client.modify_if_diff does not work as each option must be removed/added by its own - ipa_list = ipa_sudorule.get('ipasudoopt', []) + ipa_list = ipa_sudorule.get("ipasudoopt", []) module_list = sudoopt diff = list(set(ipa_list) - set(module_list)) if len(diff) > 0: @@ -434,7 +458,7 @@ def ensure(module, client): client.sudorule_add_option_ipasudoopt(name, item) if runasextusers is not None: - ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) + ipa_sudorule_run_as_user = ipa_sudorule.get("ipasudorunasextuser", []) diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) if len(diff) > 0: changed = True @@ -449,15 +473,29 @@ def ensure(module, client): client.sudorule_add_runasuser(name=name, item=item) if user is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, - client.sudorule_add_user_user, - client.sudorule_remove_user_user) or changed + changed = category_changed(module, client, "usercategory", ipa_sudorule) or changed + changed = ( + client.modify_if_diff( + name, + ipa_sudorule.get("memberuser_user", []), + user, + client.sudorule_add_user_user, + client.sudorule_remove_user_user, + ) + or changed + ) if usergroup is not None: - changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed - changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, - client.sudorule_add_user_group, - client.sudorule_remove_user_group) or changed + changed = category_changed(module, client, "usercategory", ipa_sudorule) or changed + changed = ( + client.modify_if_diff( + name, + ipa_sudorule.get("memberuser_group", []), + usergroup, + client.sudorule_add_user_group, + client.sudorule_remove_user_group, + ) + or changed + ) else: if ipa_sudorule: changed = True @@ -469,47 +507,54 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cmd=dict(type='list', elements='str'), - cmdgroup=dict(type='list', elements='str'), - cmdcategory=dict(type='str', choices=['all']), - cn=dict(type='str', required=True, aliases=['name']), - deny_cmd=dict(type='list', elements='str'), - deny_cmdgroup=dict(type='list', elements='str'), - description=dict(type='str'), - host=dict(type='list', elements='str'), - hostcategory=dict(type='str', choices=['all']), - hostgroup=dict(type='list', elements='str'), - runasusercategory=dict(type='str', choices=['all']), - runasgroupcategory=dict(type='str', choices=['all']), - sudoopt=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - user=dict(type='list', elements='str'), - usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str'), - runasextusers=dict(type='list', elements='str')) - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[['cmdcategory', 'cmd'], - ['cmdcategory', 'deny_cmd'], - ['cmdcategory', 'cmdgroup'], - ['cmdcategory', 'deny_cmdgroup'], - ['hostcategory', 'host'], - ['hostcategory', 'hostgroup'], - ['usercategory', 'user'], - ['usercategory', 'usergroup']], - supports_check_mode=True) - - client = SudoRuleIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cmd=dict(type="list", elements="str"), + cmdgroup=dict(type="list", elements="str"), + cmdcategory=dict(type="str", choices=["all"]), + cn=dict(type="str", required=True, aliases=["name"]), + deny_cmd=dict(type="list", elements="str"), + deny_cmdgroup=dict(type="list", elements="str"), + description=dict(type="str"), + host=dict(type="list", elements="str"), + hostcategory=dict(type="str", choices=["all"]), + hostgroup=dict(type="list", elements="str"), + runasusercategory=dict(type="str", choices=["all"]), + runasgroupcategory=dict(type="str", choices=["all"]), + sudoopt=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + user=dict(type="list", elements="str"), + usercategory=dict(type="str", choices=["all"]), + usergroup=dict(type="list", elements="str"), + runasextusers=dict(type="list", elements="str"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["cmdcategory", "cmd"], + ["cmdcategory", "deny_cmd"], + ["cmdcategory", "cmdgroup"], + ["cmdcategory", "deny_cmdgroup"], + ["hostcategory", "host"], + ["hostcategory", "hostgroup"], + ["usercategory", "user"], + ["usercategory", "usergroup"], + ], + supports_check_mode=True, + ) + + client = SudoRuleIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, sudorule = ensure(module, client) module.exit_json(changed=changed, sudorule=sudorule) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py index 0b3a84832df..50dbe6d58b8 100644 --- a/plugins/modules/ipa_user.py +++ b/plugins/modules/ipa_user.py @@ -191,58 +191,71 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def user_find(self, name): - return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) + return self._post_json(method="user_find", name=None, item={"all": True, "uid": name}) def user_add(self, name, item): - return self._post_json(method='user_add', name=name, item=item) + return self._post_json(method="user_add", name=name, item=item) def user_mod(self, name, item): - return self._post_json(method='user_mod', name=name, item=item) + return self._post_json(method="user_mod", name=name, item=item) def user_del(self, name): - return self._post_json(method='user_del', name=name) + return self._post_json(method="user_del", name=name) def user_disable(self, name): - return self._post_json(method='user_disable', name=name) + return self._post_json(method="user_disable", name=name) def user_enable(self, name): - return self._post_json(method='user_enable', name=name) - - -def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, - mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, - title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None, - userauthtype=None): + return self._post_json(method="user_enable", name=name) + + +def get_user_dict( + displayname=None, + givenname=None, + krbpasswordexpiration=None, + loginshell=None, + mail=None, + nsaccountlock=False, + sn=None, + sshpubkey=None, + telephonenumber=None, + title=None, + userpassword=None, + gidnumber=None, + uidnumber=None, + homedirectory=None, + userauthtype=None, +): user = {} if displayname is not None: - user['displayname'] = displayname + user["displayname"] = displayname if krbpasswordexpiration is not None: - user['krbpasswordexpiration'] = f"{krbpasswordexpiration}Z" + user["krbpasswordexpiration"] = f"{krbpasswordexpiration}Z" if givenname is not None: - user['givenname'] = givenname + user["givenname"] = givenname if loginshell is not None: - user['loginshell'] = loginshell + user["loginshell"] = loginshell if mail is not None: - user['mail'] = mail - user['nsaccountlock'] = nsaccountlock + user["mail"] = mail + user["nsaccountlock"] = nsaccountlock if sn is not None: - user['sn'] = sn + user["sn"] = sn if sshpubkey is not None: - user['ipasshpubkey'] = sshpubkey + user["ipasshpubkey"] = sshpubkey if telephonenumber is not None: - user['telephonenumber'] = telephonenumber + user["telephonenumber"] = telephonenumber if title is not None: - user['title'] = title + user["title"] = title if userpassword is not None: - user['userpassword'] = userpassword + user["userpassword"] = userpassword if gidnumber is not None: - user['gidnumber'] = gidnumber + user["gidnumber"] = gidnumber if uidnumber is not None: - user['uidnumber'] = uidnumber + user["uidnumber"] = uidnumber if homedirectory is not None: - user['homedirectory'] = homedirectory + user["homedirectory"] = homedirectory if userauthtype is not None: - user['ipauserauthtype'] = userauthtype + user["ipauserauthtype"] = userauthtype return user @@ -262,25 +275,27 @@ def get_user_diff(client, ipa_user, module_user): # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. # These are used for comparison. sshpubkey = None - if 'ipasshpubkey' in module_user: - hash_algo = 'md5' - if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': - hash_algo = 'sha256' - module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] + if "ipasshpubkey" in module_user: + hash_algo = "md5" + if "sshpubkeyfp" in ipa_user and ipa_user["sshpubkeyfp"][0][:7].upper() == "SHA256:": + hash_algo = "sha256" + module_user["sshpubkeyfp"] = [ + get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user["ipasshpubkey"] + ] # Remove the ipasshpubkey element as it is not returned from IPA but save its value to be used later on - sshpubkey = module_user['ipasshpubkey'] - del module_user['ipasshpubkey'] + sshpubkey = module_user["ipasshpubkey"] + del module_user["ipasshpubkey"] result = client.get_diff(ipa_data=ipa_user, module_data=module_user) # If there are public keys, remove the fingerprints and add them back to the dict if sshpubkey is not None: - del module_user['sshpubkeyfp'] - module_user['ipasshpubkey'] = sshpubkey + del module_user["sshpubkeyfp"] + module_user["ipasshpubkey"] = sshpubkey return result -def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): +def get_ssh_key_fingerprint(ssh_key, hash_algo="sha256"): """ Return the public key fingerprint of a given public SSH key in format "[fp] [comment] (ssh-rsa)" where fp is of the format: @@ -298,14 +313,14 @@ def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): if len(parts) == 0: return None key_type = parts[0] - key = base64.b64decode(parts[1].encode('ascii')) + key = base64.b64decode(parts[1].encode("ascii")) - if hash_algo == 'md5': + if hash_algo == "md5": fp_plain = hashlib.md5(key).hexdigest() - key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() - elif hash_algo == 'sha256': - fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') - key_fp = f'SHA256:{fp_plain}' + key_fp = ":".join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() + elif hash_algo == "sha256": + fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode("ascii").rstrip("=") + key_fp = f"SHA256:{fp_plain}" if len(parts) < 3: return f"{key_fp} ({key_type})" else: @@ -314,34 +329,40 @@ def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): def ensure(module, client): - state = module.params['state'] - name = module.params['uid'] - nsaccountlock = state == 'disabled' - - module_user = get_user_dict(displayname=module.params.get('displayname'), - krbpasswordexpiration=module.params.get('krbpasswordexpiration'), - givenname=module.params.get('givenname'), - loginshell=module.params['loginshell'], - mail=module.params['mail'], sn=module.params['sn'], - sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, - telephonenumber=module.params['telephonenumber'], title=module.params['title'], - userpassword=module.params['password'], - gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), - homedirectory=module.params.get('homedirectory'), - userauthtype=module.params.get('userauthtype')) - - update_password = module.params.get('update_password') + state = module.params["state"] + name = module.params["uid"] + nsaccountlock = state == "disabled" + + module_user = get_user_dict( + displayname=module.params.get("displayname"), + krbpasswordexpiration=module.params.get("krbpasswordexpiration"), + givenname=module.params.get("givenname"), + loginshell=module.params["loginshell"], + mail=module.params["mail"], + sn=module.params["sn"], + sshpubkey=module.params["sshpubkey"], + nsaccountlock=nsaccountlock, + telephonenumber=module.params["telephonenumber"], + title=module.params["title"], + userpassword=module.params["password"], + gidnumber=module.params.get("gidnumber"), + uidnumber=module.params.get("uidnumber"), + homedirectory=module.params.get("homedirectory"), + userauthtype=module.params.get("userauthtype"), + ) + + update_password = module.params.get("update_password") ipa_user = client.user_find(name=name) changed = False - if state in ['present', 'enabled', 'disabled']: + if state in ["present", "enabled", "disabled"]: if not ipa_user: changed = True if not module.check_mode: ipa_user = client.user_add(name=name, item=module_user) else: - if update_password == 'on_create': - module_user.pop('userpassword', None) + if update_password == "on_create": + module_user.pop("userpassword", None) diff = get_user_diff(client, ipa_user, module_user) if len(diff) > 0: changed = True @@ -358,51 +379,51 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(displayname=dict(type='str'), - givenname=dict(type='str'), - update_password=dict(type='str', default="always", - choices=['always', 'on_create'], - no_log=False), - krbpasswordexpiration=dict(type='str', no_log=False), - loginshell=dict(type='str'), - mail=dict(type='list', elements='str'), - sn=dict(type='str'), - uid=dict(type='str', required=True, aliases=['name']), - gidnumber=dict(type='str'), - uidnumber=dict(type='str'), - password=dict(type='str', no_log=True), - sshpubkey=dict(type='list', elements='str'), - state=dict(type='str', default='present', - choices=['present', 'absent', 'enabled', 'disabled']), - telephonenumber=dict(type='list', elements='str'), - title=dict(type='str'), - homedirectory=dict(type='str'), - userauthtype=dict(type='list', elements='str', - choices=['password', 'radius', 'otp', 'pkinit', 'hardened', 'idp', 'passkey'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = UserIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + displayname=dict(type="str"), + givenname=dict(type="str"), + update_password=dict(type="str", default="always", choices=["always", "on_create"], no_log=False), + krbpasswordexpiration=dict(type="str", no_log=False), + loginshell=dict(type="str"), + mail=dict(type="list", elements="str"), + sn=dict(type="str"), + uid=dict(type="str", required=True, aliases=["name"]), + gidnumber=dict(type="str"), + uidnumber=dict(type="str"), + password=dict(type="str", no_log=True), + sshpubkey=dict(type="list", elements="str"), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + telephonenumber=dict(type="list", elements="str"), + title=dict(type="str"), + homedirectory=dict(type="str"), + userauthtype=dict( + type="list", elements="str", choices=["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"] + ), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + client = UserIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey # as different which should be avoided. - if module.params['sshpubkey'] is not None: - if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": - module.params['sshpubkey'] = None + if module.params["sshpubkey"] is not None: + if len(module.params["sshpubkey"]) == 1 and module.params["sshpubkey"][0] == "": + module.params["sshpubkey"] = None try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, user = ensure(module, client) module.exit_json(changed=changed, user=user) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipa_vault.py b/plugins/modules/ipa_vault.py index 2b016872110..1362baaba97 100644 --- a/plugins/modules/ipa_vault.py +++ b/plugins/modules/ipa_vault.py @@ -146,31 +146,31 @@ def __init__(self, module, host, port, protocol): super().__init__(module, host, port, protocol) def vault_find(self, name): - return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) + return self._post_json(method="vault_find", name=None, item={"all": True, "cn": name}) def vault_add_internal(self, name, item): - return self._post_json(method='vault_add_internal', name=name, item=item) + return self._post_json(method="vault_add_internal", name=name, item=item) def vault_mod_internal(self, name, item): - return self._post_json(method='vault_mod_internal', name=name, item=item) + return self._post_json(method="vault_mod_internal", name=name, item=item) def vault_del(self, name): - return self._post_json(method='vault_del', name=name) + return self._post_json(method="vault_del", name=name) def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): vault = {} if description is not None: - vault['description'] = description + vault["description"] = description if vault_type is not None: - vault['ipavaulttype'] = vault_type + vault["ipavaulttype"] = vault_type if vault_salt is not None: - vault['ipavaultsalt'] = vault_salt + vault["ipavaultsalt"] = vault_salt if vault_public_key is not None: - vault['ipavaultpublickey'] = vault_public_key + vault["ipavaultpublickey"] = vault_public_key if service is not None: - vault['service'] = service + vault["service"] = service return vault @@ -179,19 +179,22 @@ def get_vault_diff(client, ipa_vault, module_vault, module): def ensure(module, client): - state = module.params['state'] - name = module.params['cn'] - user = module.params['username'] - replace = module.params['replace'] - - module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], - vault_salt=module.params['ipavaultsalt'], - vault_public_key=module.params['ipavaultpublickey'], - service=module.params['service']) + state = module.params["state"] + name = module.params["cn"] + user = module.params["username"] + replace = module.params["replace"] + + module_vault = get_vault_dict( + description=module.params["description"], + vault_type=module.params["ipavaulttype"], + vault_salt=module.params["ipavaultsalt"], + vault_public_key=module.params["ipavaultpublickey"], + service=module.params["service"], + ) ipa_vault = client.vault_find(name=name) changed = False - if state == 'present': + if state == "present": if not ipa_vault: # New vault changed = True @@ -220,33 +223,37 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), - description=dict(type='str'), - ipavaulttype=dict(type='str', default='symmetric', - choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), - ipavaultsalt=dict(type='str', aliases=['vault_salt']), - ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), - service=dict(type='str'), - replace=dict(type='bool', default=False, choices=[True, False]), - state=dict(type='str', default='present', choices=['present', 'absent']), - username=dict(type='list', elements='str', aliases=['user'])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['username', 'service']]) - - client = VaultIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) + argument_spec.update( + cn=dict(type="str", required=True, aliases=["name"]), + description=dict(type="str"), + ipavaulttype=dict( + type="str", default="symmetric", choices=["standard", "symmetric", "asymmetric"], aliases=["vault_type"] + ), + ipavaultsalt=dict(type="str", aliases=["vault_salt"]), + ipavaultpublickey=dict(type="str", aliases=["vault_public_key"]), + service=dict(type="str"), + replace=dict(type="bool", default=False, choices=[True, False]), + state=dict(type="str", default="present", choices=["present", "absent"]), + username=dict(type="list", elements="str", aliases=["user"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[["username", "service"]] + ) + + client = VaultIPAClient( + module=module, + host=module.params["ipa_host"], + port=module.params["ipa_port"], + protocol=module.params["ipa_prot"], + ) try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) + client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"]) changed, vault = ensure(module, client) module.exit_json(changed=changed, vault=vault) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py index 1923c793611..f87c6c5cb48 100644 --- a/plugins/modules/ipbase_info.py +++ b/plugins/modules/ipbase_info.py @@ -218,12 +218,11 @@ from urllib.parse import urlencode -USER_AGENT = 'ansible-community.general.ipbase_info/0.1.0' -BASE_URL = 'https://api.ipbase.com/v2/info' +USER_AGENT = "ansible-community.general.ipbase_info/0.1.0" +BASE_URL = "https://api.ipbase.com/v2/info" class IpbaseInfo: - def __init__(self, module): self.module = module @@ -234,43 +233,42 @@ def _get_url_data(self, url): force=True, timeout=10, headers={ - 'Accept': 'application/json', - 'User-Agent': USER_AGENT, - }) + "Accept": "application/json", + "User-Agent": USER_AGENT, + }, + ) - if info['status'] != 200: + if info["status"] != 200: self.module.fail_json(msg=f"The API request to ipbase.com returned an error status code {info['status']}") else: try: content = response.read() - result = self.module.from_json(content.decode('utf8')) + result = self.module.from_json(content.decode("utf8")) except ValueError: - self.module.fail_json( - msg=f'Failed to parse the ipbase.com response: {url} {content}') + self.module.fail_json(msg=f"Failed to parse the ipbase.com response: {url} {content}") else: return result def info(self): - - ip = self.module.params['ip'] - apikey = self.module.params['apikey'] - hostname = self.module.params['hostname'] - language = self.module.params['language'] + ip = self.module.params["ip"] + apikey = self.module.params["apikey"] + hostname = self.module.params["hostname"] + language = self.module.params["language"] url = BASE_URL params = {} if ip: - params['ip'] = ip + params["ip"] = ip if apikey: - params['apikey'] = apikey + params["apikey"] = apikey if hostname: - params['hostname'] = 1 + params["hostname"] = 1 if language: - params['language'] = language + params["language"] = language if params: url += f"?{urlencode(params)}" @@ -280,10 +278,10 @@ def info(self): def main(): module_args = dict( - ip=dict(type='str', no_log=False), - apikey=dict(type='str', no_log=True), - hostname=dict(type='bool', no_log=False, default=False), - language=dict(type='str', no_log=False, default='en'), + ip=dict(type="str", no_log=False), + apikey=dict(type="str", no_log=True), + hostname=dict(type="bool", no_log=False, default=False), + language=dict(type="str", no_log=False, default="en"), ) module = AnsibleModule( @@ -295,5 +293,5 @@ def main(): module.exit_json(**ipbase.info()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py index 194bbc6fcc2..f82b6338017 100644 --- a/plugins/modules/ipify_facts.py +++ b/plugins/modules/ipify_facts.py @@ -67,22 +67,21 @@ class IpifyFacts: - def __init__(self): - self.api_url = module.params.get('api_url') - self.timeout = module.params.get('timeout') + self.api_url = module.params.get("api_url") + self.timeout = module.params.get("timeout") def run(self): - result = { - 'ipify_public_ip': None - } + result = {"ipify_public_ip": None} (response, info) = fetch_url(module=module, url=f"{self.api_url}?format=json", force=True, timeout=self.timeout) if not response: - module.fail_json(msg=f"No valid or no response from url {self.api_url} within {self.timeout} seconds (timeout)") + module.fail_json( + msg=f"No valid or no response from url {self.api_url} within {self.timeout} seconds (timeout)" + ) data = json.loads(to_text(response.read())) - result['ipify_public_ip'] = data.get('ip') + result["ipify_public_ip"] = data.get("ip") return result @@ -90,9 +89,9 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - api_url=dict(type='str', default='https://api.ipify.org/'), - timeout=dict(type='int', default=10), - validate_certs=dict(type='bool', default=True), + api_url=dict(type="str", default="https://api.ipify.org/"), + timeout=dict(type="int", default=10), + validate_certs=dict(type="bool", default=True), ), supports_check_mode=True, ) @@ -102,5 +101,5 @@ def main(): module.exit_json(**ipify_facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py index 76c712a558b..f3170a84ced 100644 --- a/plugins/modules/ipinfoio_facts.py +++ b/plugins/modules/ipinfoio_facts.py @@ -84,30 +84,32 @@ from ansible.module_utils.urls import fetch_url -USER_AGENT = 'ansible-ipinfoio-module/0.0.1' +USER_AGENT = "ansible-ipinfoio-module/0.0.1" class IpinfoioFacts: - def __init__(self, module): - self.url = 'https://ipinfo.io/json' - self.timeout = module.params.get('timeout') + self.url = "https://ipinfo.io/json" + self.timeout = module.params.get("timeout") self.module = module def get_geo_data(self): - response, info = fetch_url(self.module, self.url, force=True, # NOQA - timeout=self.timeout) + response, info = fetch_url( + self.module, + self.url, + force=True, # NOQA + timeout=self.timeout, + ) try: - info['status'] == 200 + info["status"] == 200 except AssertionError: - self.module.fail_json(msg=f'Could not get {self.url} page, check for connectivity!') + self.module.fail_json(msg=f"Could not get {self.url} page, check for connectivity!") else: try: content = response.read() - result = self.module.from_json(content.decode('utf8')) + result = self.module.from_json(content.decode("utf8")) except ValueError: - self.module.fail_json( - msg=f'Failed to parse the ipinfo.io response: {self.url} {content}') + self.module.fail_json(msg=f"Failed to parse the ipinfo.io response: {self.url} {content}") else: return result @@ -116,16 +118,15 @@ def main(): module = AnsibleModule( # NOQA argument_spec=dict( http_agent=dict(default=USER_AGENT), - timeout=dict(type='int', default=10), + timeout=dict(type="int", default=10), ), supports_check_mode=True, ) ipinfoio = IpinfoioFacts(module) - ipinfoio_result = dict( - changed=False, ansible_facts=ipinfoio.get_geo_data()) + ipinfoio_result = dict(changed=False, ansible_facts=ipinfoio.get_geo_data()) module.exit_json(**ipinfoio_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py index 23b412910e0..b8446d522b2 100644 --- a/plugins/modules/ipmi_boot.py +++ b/plugins/modules/ipmi_boot.py @@ -144,37 +144,37 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - port=dict(default=623, type='int'), + port=dict(default=623, type="int"), user=dict(required=True, no_log=True), password=dict(required=True, no_log=True), - key=dict(type='str', no_log=True), - state=dict(default='present', choices=['present', 'absent']), - bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), - persistent=dict(default=False, type='bool'), - uefiboot=dict(default=False, type='bool') + key=dict(type="str", no_log=True), + state=dict(default="present", choices=["present", "absent"]), + bootdev=dict(required=True, choices=["network", "hd", "floppy", "safe", "optical", "setup", "default"]), + persistent=dict(default=False, type="bool"), + uefiboot=dict(default=False, type="bool"), ), supports_check_mode=True, ) if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) - - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - bootdev = module.params['bootdev'] - persistent = module.params['persistent'] - uefiboot = module.params['uefiboot'] + module.fail_json(msg=missing_required_lib("pyghmi"), exception=PYGHMI_IMP_ERR) + + name = module.params["name"] + port = module.params["port"] + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + bootdev = module.params["bootdev"] + persistent = module.params["persistent"] + uefiboot = module.params["uefiboot"] request = dict() - if state == 'absent' and bootdev == 'default': + if state == "absent" and bootdev == "default": module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") try: - if module.params['key']: - key = binascii.unhexlify(module.params['key']) + if module.params["key"]: + key = binascii.unhexlify(module.params["key"]) else: key = None except Exception as e: @@ -182,37 +182,35 @@ def main(): # --- run command --- try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port, kg=key - ) + ipmi_cmd = command.Command(bmc=name, userid=user, password=password, port=port, kg=key) module.debug(f'ipmi instantiated - name: "{name}"') current = ipmi_cmd.get_bootdev() # uefimode may not supported by BMC, so use desired value as default - current.setdefault('uefimode', uefiboot) - if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): + current.setdefault("uefimode", uefiboot) + if state == "present" and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) - elif state == 'absent' and current['bootdev'] == bootdev: - request = dict(bootdev='default') + elif state == "absent" and current["bootdev"] == bootdev: + request = dict(bootdev="default") else: module.exit_json(changed=False, **current) if module.check_mode: - response = dict(bootdev=request['bootdev']) + response = dict(bootdev=request["bootdev"]) else: response = ipmi_cmd.set_bootdev(**request) - if 'error' in response: - module.fail_json(msg=response['error']) + if "error" in response: + module.fail_json(msg=response["error"]) - if 'persist' in request: - response['persistent'] = request['persist'] - if 'uefiboot' in request: - response['uefimode'] = request['uefiboot'] + if "persist" in request: + response["persistent"] = request["persist"] + if "uefiboot" in request: + response["uefimode"] = request["uefiboot"] module.exit_json(changed=True, **response) except Exception as e: module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py index 86b220f0bc3..f7fe4e066f1 100644 --- a/plugins/modules/ipmi_power.py +++ b/plugins/modules/ipmi_power.py @@ -170,40 +170,39 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - port=dict(default=623, type='int'), - state=dict(choices=['on', 'off', 'shutdown', 'reset', 'boot']), + port=dict(default=623, type="int"), + state=dict(choices=["on", "off", "shutdown", "reset", "boot"]), user=dict(required=True, no_log=True), password=dict(required=True, no_log=True), - key=dict(type='str', no_log=True), - timeout=dict(default=300, type='int'), + key=dict(type="str", no_log=True), + timeout=dict(default=300, type="int"), machine=dict( - type='list', elements='dict', + type="list", + elements="dict", options=dict( - targetAddress=dict(required=True, type='int'), - state=dict(type='str', choices=['on', 'off', 'shutdown', 'reset', 'boot']), + targetAddress=dict(required=True, type="int"), + state=dict(type="str", choices=["on", "off", "shutdown", "reset", "boot"]), ), ), ), supports_check_mode=True, - required_one_of=( - ['state', 'machine'], - ), + required_one_of=(["state", "machine"],), ) if command is None: - module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyghmi"), exception=PYGHMI_IMP_ERR) - name = module.params['name'] - port = module.params['port'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - timeout = module.params['timeout'] - machine = module.params['machine'] + name = module.params["name"] + port = module.params["port"] + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + timeout = module.params["timeout"] + machine = module.params["machine"] try: - if module.params['key']: - key = binascii.unhexlify(module.params['key']) + if module.params["key"]: + key = binascii.unhexlify(module.params["key"]) else: key = None except Exception: @@ -211,29 +210,26 @@ def main(): # --- run command --- try: - ipmi_cmd = command.Command( - bmc=name, userid=user, password=password, port=port, kg=key - ) + ipmi_cmd = command.Command(bmc=name, userid=user, password=password, port=port, kg=key) module.debug(f'ipmi instantiated - name: "{name}"') changed = False if machine is None: current = ipmi_cmd.get_power() - if current['powerstate'] != state: - response = {'powerstate': state} if module.check_mode \ - else ipmi_cmd.set_power(state, wait=timeout) + if current["powerstate"] != state: + response = {"powerstate": state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout) changed = True else: response = current - if 'error' in response: - module.fail_json(msg=response['error']) + if "error" in response: + module.fail_json(msg=response["error"]) module.exit_json(changed=changed, **response) else: response = [] for entry in machine: - taddr = entry['targetAddress'] + taddr = entry["targetAddress"] if taddr >= INVALID_TARGET_ADDRESS: module.fail_json(msg="targetAddress should be set between 0 to 255.") @@ -241,33 +237,31 @@ def main(): # bridge_request is supported on pyghmi 1.5.30 and later current = ipmi_cmd.get_power(bridge_request={"addr": taddr}) except TypeError: - module.fail_json( - msg="targetAddress isn't supported on the installed pyghmi.") + module.fail_json(msg="targetAddress isn't supported on the installed pyghmi.") - if entry['state']: - tstate = entry['state'] + if entry["state"]: + tstate = entry["state"] elif state: tstate = state else: module.fail_json(msg="Either state or suboption of machine state should be set.") - if current['powerstate'] != tstate: + if current["powerstate"] != tstate: changed = True if not module.check_mode: new = ipmi_cmd.set_power(tstate, wait=timeout, bridge_request={"addr": taddr}) - if 'error' in new: - module.fail_json(msg=new['error']) + if "error" in new: + module.fail_json(msg=new["error"]) - response.append( - {'targetAddress:': taddr, 'powerstate': new['powerstate']}) + response.append({"targetAddress:": taddr, "powerstate": new["powerstate"]}) - if current['powerstate'] == tstate or module.check_mode: - response.append({'targetAddress:': taddr, 'powerstate': tstate}) + if current["powerstate"] == tstate or module.check_mode: + response.append({"targetAddress:": taddr, "powerstate": tstate}) module.exit_json(changed=changed, status=response) except Exception as e: module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py index 9677f01b510..a2046c386b9 100644 --- a/plugins/modules/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -234,53 +234,51 @@ IPTABLES = dict( - ipv4='iptables', - ipv6='ip6tables', + ipv4="iptables", + ipv6="ip6tables", ) SAVE = dict( - ipv4='iptables-save', - ipv6='ip6tables-save', + ipv4="iptables-save", + ipv6="ip6tables-save", ) RESTORE = dict( - ipv4='iptables-restore', - ipv6='ip6tables-restore', + ipv4="iptables-restore", + ipv6="ip6tables-restore", ) -TABLES = ['filter', 'mangle', 'nat', 'raw', 'security'] +TABLES = ["filter", "mangle", "nat", "raw", "security"] def read_state(b_path): - ''' + """ Read a file and store its content in a variable as a list. - ''' - with open(b_path, 'r') as f: + """ + with open(b_path, "r") as f: text = f.read() - return [t for t in text.splitlines() if t != ''] + return [t for t in text.splitlines() if t != ""] def write_state(b_path, lines, changed): - ''' + """ Write given contents to the given path, and return changed status. - ''' + """ # Populate a temporary file tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'w') as f: + with os.fdopen(tmpfd, "w") as f: joined_lines = "\n".join(lines) f.write(f"{joined_lines}\n") # Prepare to copy temporary file to the final destination if not os.path.exists(b_path): b_destdir = os.path.dirname(b_path) - destdir = to_native(b_destdir, errors='surrogate_or_strict') + destdir = to_native(b_destdir, errors="surrogate_or_strict") if b_destdir and not os.path.exists(b_destdir) and not module.check_mode: try: os.makedirs(b_destdir) except Exception as err: - module.fail_json( - msg=f'Error creating {destdir}: {err}', - initial_state=lines) + module.fail_json(msg=f"Error creating {destdir}: {err}", initial_state=lines) changed = True elif not filecmp.cmp(tmpfile, b_path): @@ -291,30 +289,28 @@ def write_state(b_path, lines, changed): try: shutil.copyfile(tmpfile, b_path) except Exception as err: - path = to_native(b_path, errors='surrogate_or_strict') - module.fail_json( - msg=f'Error saving state into {path}: {err}', - initial_state=lines) + path = to_native(b_path, errors="surrogate_or_strict") + module.fail_json(msg=f"Error saving state into {path}: {err}", initial_state=lines) return changed def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): - ''' + """ This ensures iptables-state output is suitable for iptables-restore to roll back to it, i.e. iptables-save output is not empty. This also works for the iptables-nft-save alternative. - ''' + """ if table is None: - table = 'filter' + table = "filter" commandline = list(initializer) - commandline += ['-t', table] + commandline += ["-t", table] dummy = module.run_command(commandline, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) - if f'*{table}' not in out.splitlines(): + if f"*{table}" not in out.splitlines(): # The last resort. - iptables_input = f'*{table}\n:OUTPUT ACCEPT\nCOMMIT\n' + iptables_input = f"*{table}\n:OUTPUT ACCEPT\nCOMMIT\n" dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) @@ -322,77 +318,76 @@ def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): def filter_and_format_state(string): - ''' + """ Remove timestamps to ensure idempotence between runs. Also remove counters by default. And return the result as a list. - ''' - string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) - if not module.params['counters']: - string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) - lines = [line for line in string.splitlines() if line != ''] + """ + string = re.sub(r"((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*", r"\1", string) + if not module.params["counters"]: + string = re.sub(r"\[[0-9]+:[0-9]+\]", r"[0:0]", string) + lines = [line for line in string.splitlines() if line != ""] return lines def parse_per_table_state(all_states_dump): - ''' + """ Convert raw iptables-save output into usable datastructure, for reliable comparisons between initial and final states. - ''' + """ lines = filter_and_format_state(all_states_dump) tables = dict() - current_table = '' + current_table = "" current_list = list() for line in lines: - if re.match(r'^[*](filter|mangle|nat|raw|security)$', line): + if re.match(r"^[*](filter|mangle|nat|raw|security)$", line): current_table = line[1:] continue - if line == 'COMMIT': + if line == "COMMIT": tables[current_table] = current_list - current_table = '' + current_table = "" current_list = list() continue - if line.startswith('# '): + if line.startswith("# "): continue current_list.append(line) return tables def main(): - global module module = AnsibleModule( argument_spec=dict( - path=dict(type='path', required=True), - state=dict(type='str', choices=['saved', 'restored'], required=True), - table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']), - noflush=dict(type='bool', default=False), - counters=dict(type='bool', default=False), - modprobe=dict(type='path'), - ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'), - wait=dict(type='int'), - _timeout=dict(type='int'), - _back=dict(type='path'), + path=dict(type="path", required=True), + state=dict(type="str", choices=["saved", "restored"], required=True), + table=dict(type="str", choices=["filter", "nat", "mangle", "raw", "security"]), + noflush=dict(type="bool", default=False), + counters=dict(type="bool", default=False), + modprobe=dict(type="path"), + ip_version=dict(type="str", choices=["ipv4", "ipv6"], default="ipv4"), + wait=dict(type="int"), + _timeout=dict(type="int"), + _back=dict(type="path"), ), required_together=[ - ['_timeout', '_back'], + ["_timeout", "_back"], ], supports_check_mode=True, ) # We'll parse iptables-restore stderr - module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C') - - path = module.params['path'] - state = module.params['state'] - table = module.params['table'] - noflush = module.params['noflush'] - counters = module.params['counters'] - modprobe = module.params['modprobe'] - ip_version = module.params['ip_version'] - wait = module.params['wait'] - _timeout = module.params['_timeout'] - _back = module.params['_back'] + module.run_command_environ_update = dict(LANG="C", LC_MESSAGES="C") + + path = module.params["path"] + state = module.params["state"] + table = module.params["table"] + noflush = module.params["noflush"] + counters = module.params["counters"] + modprobe = module.params["modprobe"] + ip_version = module.params["ip_version"] + wait = module.params["wait"] + _timeout = module.params["_timeout"] + _back = module.params["_back"] bin_iptables = module.get_bin_path(IPTABLES[ip_version], True) bin_iptables_save = module.get_bin_path(SAVE[ip_version], True) @@ -402,21 +397,21 @@ def main(): changed = False COMMANDARGS = [] INITCOMMAND = [bin_iptables_save] - INITIALIZER = [bin_iptables, '-L', '-n'] - TESTCOMMAND = [bin_iptables_restore, '--test'] + INITIALIZER = [bin_iptables, "-L", "-n"] + TESTCOMMAND = [bin_iptables_restore, "--test"] FALLBACKCMD = [bin_iptables_restore] if counters: - COMMANDARGS.append('--counters') + COMMANDARGS.append("--counters") if table is not None: - COMMANDARGS.extend(['--table', table]) + COMMANDARGS.extend(["--table", table]) if wait is not None: - TESTCOMMAND.extend(['--wait', f'{wait}']) + TESTCOMMAND.extend(["--wait", f"{wait}"]) if modprobe is not None: - b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict') + b_modprobe = to_bytes(modprobe, errors="surrogate_or_strict") if not os.path.exists(b_modprobe): module.fail_json(msg=f"modprobe {modprobe} not found") if not os.path.isfile(b_modprobe): @@ -425,18 +420,18 @@ def main(): module.fail_json(msg=f"modprobe {modprobe} not readable") if not os.access(b_modprobe, os.X_OK): module.fail_json(msg=f"modprobe {modprobe} not executable") - COMMANDARGS.extend(['--modprobe', modprobe]) - INITIALIZER.extend(['--modprobe', modprobe]) - INITCOMMAND.extend(['--modprobe', modprobe]) - TESTCOMMAND.extend(['--modprobe', modprobe]) - FALLBACKCMD.extend(['--modprobe', modprobe]) + COMMANDARGS.extend(["--modprobe", modprobe]) + INITIALIZER.extend(["--modprobe", modprobe]) + INITCOMMAND.extend(["--modprobe", modprobe]) + TESTCOMMAND.extend(["--modprobe", modprobe]) + FALLBACKCMD.extend(["--modprobe", modprobe]) SAVECOMMAND = list(COMMANDARGS) SAVECOMMAND.insert(0, bin_iptables_save) - b_path = to_bytes(path, errors='surrogate_or_strict') + b_path = to_bytes(path, errors="surrogate_or_strict") - if state == 'restored': + if state == "restored": if not os.path.exists(b_path): module.fail_json(msg=f"Source {path} not found") if not os.path.isfile(b_path): @@ -446,7 +441,7 @@ def main(): state_to_restore = read_state(b_path) cmd = None else: - cmd = ' '.join(SAVECOMMAND) + cmd = " ".join(SAVECOMMAND) (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True) @@ -460,18 +455,18 @@ def main(): # in the backup ! So we have to ensure tables to be restored have a backup # in case of rollback. if table is None: - if state == 'restored': + if state == "restored": for t in TABLES: - if f'*{t}' in state_to_restore: - if len(stdout) == 0 or f'*{t}' not in stdout.splitlines(): + if f"*{t}" in state_to_restore: + if len(stdout) == 0 or f"*{t}" not in stdout.splitlines(): (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t) elif len(stdout) == 0: - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter') + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, "filter") - elif state == 'restored' and f'*{table}' not in state_to_restore: + elif state == "restored" and f"*{table}" not in state_to_restore: module.fail_json(msg=f"Table {table} to restore not defined in {path}") - elif len(stdout) == 0 or f'*{table}' not in stdout.splitlines(): + elif len(stdout) == 0 or f"*{table}" not in stdout.splitlines(): (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table) initial_state = filter_and_format_state(stdout) @@ -484,14 +479,11 @@ def main(): tables_before = parse_per_table_state(stdout) initref_state = filter_and_format_state(stdout) - if state == 'saved': + if state == "saved": changed = write_state(b_path, initref_state, changed) module.exit_json( - changed=changed, - cmd=cmd, - tables=tables_before, - initial_state=initial_state, - saved=initref_state) + changed=changed, cmd=cmd, tables=tables_before, initial_state=initial_state, saved=initref_state + ) # # All remaining code is for state=restored @@ -501,36 +493,36 @@ def main(): MAINCOMMAND.insert(0, bin_iptables_restore) if wait is not None: - MAINCOMMAND.extend(['--wait', f'{wait}']) + MAINCOMMAND.extend(["--wait", f"{wait}"]) if _back is not None: - b_back = to_bytes(_back, errors='surrogate_or_strict') + b_back = to_bytes(_back, errors="surrogate_or_strict") dummy = write_state(b_back, initref_state, changed) BACKCOMMAND = list(MAINCOMMAND) BACKCOMMAND.append(_back) if noflush: - MAINCOMMAND.append('--noflush') + MAINCOMMAND.append("--noflush") MAINCOMMAND.append(path) - cmd = ' '.join(MAINCOMMAND) + cmd = " ".join(MAINCOMMAND) TESTCOMMAND = list(MAINCOMMAND) - TESTCOMMAND.insert(1, '--test') + TESTCOMMAND.insert(1, "--test") error_msg = f"Source {path} is not suitable for input to {os.path.basename(bin_iptables_restore)}" # Due to a bug in iptables-nft-restore --test, we have to validate tables # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003). for t in tables_before: testcommand = list(TESTCOMMAND) - testcommand.extend(['--table', t]) + testcommand.extend(["--table", t]) (rc, stdout, stderr) = module.run_command(testcommand) - if 'Another app is currently holding the xtables lock' in stderr: + if "Another app is currently holding the xtables lock" in stderr: error_msg = stderr if rc != 0: - cmd = ' '.join(testcommand) + cmd = " ".join(testcommand) module.fail_json( msg=error_msg, cmd=cmd, @@ -540,11 +532,12 @@ def main(): tables=tables_before, initial_state=initial_state, restored=state_to_restore, - applied=False) + applied=False, + ) if module.check_mode: tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'w') as f: + with os.fdopen(tmpfd, "w") as f: joined_initial_state = "\n".join(initial_state) f.write(f"{joined_initial_state}\n") @@ -557,7 +550,7 @@ def main(): # Let time enough to the plugin to retrieve async status of the module # in case of bad option type/value and the like. if _back is not None: - b_starter = to_bytes(f'{_back}.starter', errors='surrogate_or_strict') + b_starter = to_bytes(f"{_back}.starter", errors="surrogate_or_strict") while True: if os.path.exists(b_starter): os.remove(b_starter) @@ -565,7 +558,7 @@ def main(): time.sleep(0.01) (rc, stdout, stderr) = module.run_command(MAINCOMMAND) - if 'Another app is currently holding the xtables lock' in stderr: + if "Another app is currently holding the xtables lock" in stderr: module.fail_json( msg=stderr, cmd=cmd, @@ -575,11 +568,12 @@ def main(): tables=tables_before, initial_state=initial_state, restored=state_to_restore, - applied=False) + applied=False, + ) (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) restored_state = filter_and_format_state(stdout) - tables_after = parse_per_table_state('\n'.join(restored_state)) + tables_after = parse_per_table_state("\n".join(restored_state)) if restored_state not in (initref_state, initial_state): for table_name, table_content in tables_after.items(): if table_name not in tables_before: @@ -598,7 +592,8 @@ def main(): tables=tables_before, initial_state=initial_state, restored=restored_state, - applied=True) + applied=True, + ) # The rollback implementation currently needs: # Here: @@ -623,7 +618,8 @@ def main(): tables=tables_before, initial_state=initial_state, restored=restored_state, - applied=True) + applied=True, + ) # Here we are: for whatever reason, but probably due to the current ruleset, # the action plugin (i.e. on the controller) was unable to remove the backup @@ -634,9 +630,7 @@ def main(): (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) tables_rollback = parse_per_table_state(stdout) - msg = ( - f"Failed to confirm state restored from {path} after {_timeout}s. Firewall has been rolled back to its initial state." - ) + msg = f"Failed to confirm state restored from {path} after {_timeout}s. Firewall has been rolled back to its initial state." module.fail_json( changed=(tables_before != tables_rollback), @@ -645,8 +639,9 @@ def main(): tables=tables_before, initial_state=initial_state, restored=restored_state, - applied=False) + applied=False, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py index e015cc54caa..364ccdf2714 100644 --- a/plugins/modules/ipwcli_dns.py +++ b/plugins/modules/ipwcli_dns.py @@ -165,45 +165,48 @@ class ResourceRecord: - def __init__(self, module): self.module = module - self.dnsname = module.params['dnsname'] - self.dnstype = module.params['type'] - self.container = module.params['container'] - self.address = module.params['address'] - self.ttl = module.params['ttl'] - self.state = module.params['state'] - self.priority = module.params['priority'] - self.weight = module.params['weight'] - self.port = module.params['port'] - self.target = module.params['target'] - self.order = module.params['order'] - self.preference = module.params['preference'] - self.flags = module.params['flags'] - self.service = module.params['service'] - self.replacement = module.params['replacement'] - self.user = module.params['username'] - self.password = module.params['password'] + self.dnsname = module.params["dnsname"] + self.dnstype = module.params["type"] + self.container = module.params["container"] + self.address = module.params["address"] + self.ttl = module.params["ttl"] + self.state = module.params["state"] + self.priority = module.params["priority"] + self.weight = module.params["weight"] + self.port = module.params["port"] + self.target = module.params["target"] + self.order = module.params["order"] + self.preference = module.params["preference"] + self.flags = module.params["flags"] + self.service = module.params["service"] + self.replacement = module.params["replacement"] + self.user = module.params["username"] + self.password = module.params["password"] def create_naptrrecord(self): # create NAPTR record with the given params - record = (f'naptrrecord {self.dnsname} -set ttl={self.ttl};container={self.container};order={self.order};' - f'preference={self.preference};flags="{self.flags}";service="{self.service}";replacement="{self.replacement}"') + record = ( + f"naptrrecord {self.dnsname} -set ttl={self.ttl};container={self.container};order={self.order};" + f'preference={self.preference};flags="{self.flags}";service="{self.service}";replacement="{self.replacement}"' + ) return record def create_srvrecord(self): # create SRV record with the given params - record = (f'srvrecord {self.dnsname} -set ttl={self.ttl};container={self.container};priority={self.priority};' - f'weight={self.weight};port={self.port};target={self.target}') + record = ( + f"srvrecord {self.dnsname} -set ttl={self.ttl};container={self.container};priority={self.priority};" + f"weight={self.weight};port={self.port};target={self.target}" + ) return record def create_arecord(self): # create A record with the given params - if self.dnstype == 'AAAA': - record = f'aaaarecord {self.dnsname} {self.address} -set ttl={self.ttl};container={self.container}' + if self.dnstype == "AAAA": + record = f"aaaarecord {self.dnsname} {self.address} -set ttl={self.ttl};container={self.container}" else: - record = f'arecord {self.dnsname} {self.address} -set ttl={self.ttl};container={self.container}' + record = f"arecord {self.dnsname} {self.address} -set ttl={self.ttl};container={self.container}" return record @@ -211,136 +214,133 @@ def list_record(self, record): # check if the record exists via list on ipwcli search = f"list {record.replace(';', '&&').replace('set', 'where')}" cmd = [ - self.module.get_bin_path('ipwcli', True), - f'-user={self.user}', - f'-password={self.password}', + self.module.get_bin_path("ipwcli", True), + f"-user={self.user}", + f"-password={self.password}", ] rc, out, err = self.module.run_command(cmd, data=search) - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + if "Invalid username or password" in out: + self.module.fail_json(msg="access denied at ipwcli login: Invalid username or password") - if ((f'ARecord {self.dnsname}' in out and rc == 0) or (f'SRVRecord {self.dnsname}' in out and rc == 0) or - (f'NAPTRRecord {self.dnsname}' in out and rc == 0)): + if ( + (f"ARecord {self.dnsname}" in out and rc == 0) + or (f"SRVRecord {self.dnsname}" in out and rc == 0) + or (f"NAPTRRecord {self.dnsname}" in out and rc == 0) + ): return True, rc, out, err return False, rc, out, err def deploy_record(self, record): # check what happens if create fails on ipworks - stdin = f'create {record}' + stdin = f"create {record}" cmd = [ - self.module.get_bin_path('ipwcli', True), - f'-user={self.user}', - f'-password={self.password}', + self.module.get_bin_path("ipwcli", True), + f"-user={self.user}", + f"-password={self.password}", ] rc, out, err = self.module.run_command(cmd, data=stdin) - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + if "Invalid username or password" in out: + self.module.fail_json(msg="access denied at ipwcli login: Invalid username or password") - if '1 object(s) created.' in out: + if "1 object(s) created." in out: return rc, out, err else: - self.module.fail_json(msg='record creation failed', stderr=out) + self.module.fail_json(msg="record creation failed", stderr=out) def delete_record(self, record): # check what happens if create fails on ipworks stdin = f"delete {record.replace(';', '&&').replace('set', 'where')}" cmd = [ - self.module.get_bin_path('ipwcli', True), - f'-user={self.user}', - f'-password={self.password}', + self.module.get_bin_path("ipwcli", True), + f"-user={self.user}", + f"-password={self.password}", ] rc, out, err = self.module.run_command(cmd, data=stdin) - if 'Invalid username or password' in out: - self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + if "Invalid username or password" in out: + self.module.fail_json(msg="access denied at ipwcli login: Invalid username or password") - if '1 object(s) were updated.' in out: + if "1 object(s) were updated." in out: return rc, out, err else: - self.module.fail_json(msg='record deletion failed', stderr=out) + self.module.fail_json(msg="record deletion failed", stderr=out) def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( - dnsname=dict(type='str', required=True), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), - container=dict(type='str', required=True), - address=dict(type='str'), - ttl=dict(type='int', default=3600), - state=dict(type='str', default='present', choices=['absent', 'present']), - priority=dict(type='int', default=10), - weight=dict(type='int', default=10), - port=dict(type='int'), - target=dict(type='str'), - order=dict(type='int'), - preference=dict(type='int'), - flags=dict(type='str', choices=['S', 'A', 'U', 'P']), - service=dict(type='str'), - replacement=dict(type='str'), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True) + dnsname=dict(type="str", required=True), + type=dict(type="str", required=True, choices=["A", "AAAA", "SRV", "NAPTR"]), + container=dict(type="str", required=True), + address=dict(type="str"), + ttl=dict(type="int", default=3600), + state=dict(type="str", default="present", choices=["absent", "present"]), + priority=dict(type="int", default=10), + weight=dict(type="int", default=10), + port=dict(type="int"), + target=dict(type="str"), + order=dict(type="int"), + preference=dict(type="int"), + flags=dict(type="str", choices=["S", "A", "U", "P"]), + service=dict(type="str"), + replacement=dict(type="str"), + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), ) # define result - result = dict( - changed=False, - stdout='', - stderr='', - rc=0, - record='' - ) + result = dict(changed=False, stdout="", stderr="", rc=0, record="") # supports check mode module = AnsibleModule( argument_spec=module_args, required_if=[ - ['type', 'A', ['address']], - ['type', 'AAAA', ['address']], - ['type', 'SRV', ['port', 'target']], - ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']], + ["type", "A", ["address"]], + ["type", "AAAA", ["address"]], + ["type", "SRV", ["port", "target"]], + ["type", "NAPTR", ["preference", "order", "service", "replacement"]], ], - supports_check_mode=True + supports_check_mode=True, ) user = ResourceRecord(module) - if user.dnstype == 'NAPTR': + if user.dnstype == "NAPTR": record = user.create_naptrrecord() - elif user.dnstype == 'SRV': + elif user.dnstype == "SRV": record = user.create_srvrecord() - elif user.dnstype == 'A' or user.dnstype == 'AAAA': + elif user.dnstype == "A" or user.dnstype == "AAAA": record = user.create_arecord() found, rc, out, err = user.list_record(record) - if found and user.state == 'absent': + if found and user.state == "absent": if module.check_mode: module.exit_json(changed=True) rc, out, err = user.delete_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err - elif not found and user.state == 'present': + result["changed"] = True + result["record"] = record + result["rc"] = rc + result["stdout"] = out + result["stderr"] = err + elif not found and user.state == "present": if module.check_mode: module.exit_json(changed=True) rc, out, err = user.deploy_record(record) - result['changed'] = True - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err + result["changed"] = True + result["record"] = record + result["rc"] = rc + result["stdout"] = out + result["stderr"] = err else: - result['changed'] = False - result['record'] = record - result['rc'] = rc - result['stdout'] = out - result['stderr'] = err + result["changed"] = False + result["record"] = record + result["rc"] = rc + result["stdout"] = out + result["stderr"] = err module.exit_json(**result) @@ -349,5 +349,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py index f8080072182..f701ab49db4 100644 --- a/plugins/modules/irc.py +++ b/plugins/modules/irc.py @@ -183,36 +183,50 @@ from ansible.module_utils.basic import AnsibleModule -def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_tls=False, validate_certs=True, - part=True, style=None): - '''send message to IRC''' +def send_msg( + msg, + server="localhost", + port="6667", + channel=None, + nick_to=None, + key=None, + topic=None, + nick="ansible", + color="none", + passwd=False, + timeout=30, + use_tls=False, + validate_certs=True, + part=True, + style=None, +): + """send message to IRC""" nick_to = [] if nick_to is None else nick_to colornumbers = { - 'white': "00", - 'black': "01", - 'blue': "02", - 'green': "03", - 'red': "04", - 'brown': "05", - 'purple': "06", - 'orange': "07", - 'yellow': "08", - 'light_green': "09", - 'teal': "10", - 'light_cyan': "11", - 'light_blue': "12", - 'pink': "13", - 'gray': "14", - 'light_gray': "15", + "white": "00", + "black": "01", + "blue": "02", + "green": "03", + "red": "04", + "brown": "05", + "purple": "06", + "orange": "07", + "yellow": "08", + "light_green": "09", + "teal": "10", + "light_cyan": "11", + "light_blue": "12", + "pink": "13", + "gray": "14", + "light_gray": "15", } stylechoices = { - 'bold': "\x02", - 'underline': "\x1F", - 'reverse': "\x16", - 'italic': "\x1D", + "bold": "\x02", + "underline": "\x1f", + "reverse": "\x16", + "italic": "\x1d", } try: @@ -241,56 +255,57 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k irc.connect((server, int(port))) if passwd: - irc.send(to_bytes(f'PASS {passwd}\r\n')) - irc.send(to_bytes(f'NICK {nick}\r\n')) - irc.send(to_bytes(f'USER {nick} {nick} {nick} :ansible IRC\r\n')) - motd = '' + irc.send(to_bytes(f"PASS {passwd}\r\n")) + irc.send(to_bytes(f"NICK {nick}\r\n")) + irc.send(to_bytes(f"USER {nick} {nick} {nick} :ansible IRC\r\n")) + motd = "" start = time.time() while 1: motd += to_native(irc.recv(1024)) # The server might send back a shorter nick than we specified (due to NICKLEN), # so grab that and use it from now on (assuming we find the 00[1-4] response). - match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + match = re.search(r"^:\S+ 00[1-4] (?P\S+) :", motd, flags=re.M) if match: - nick = match.group('nick') + nick = match.group("nick") break elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC server welcome response') + raise Exception("Timeout waiting for IRC server welcome response") time.sleep(0.5) if channel: if key: - irc.send(to_bytes(f'JOIN {channel} {key}\r\n')) + irc.send(to_bytes(f"JOIN {channel} {key}\r\n")) else: - irc.send(to_bytes(f'JOIN {channel}\r\n')) + irc.send(to_bytes(f"JOIN {channel}\r\n")) - join = '' + join = "" start = time.time() while 1: join += to_native(irc.recv(1024)) - if re.search(rf'^:\S+ 366 {nick} {channel} :', join, flags=re.M | re.I): + if re.search(rf"^:\S+ 366 {nick} {channel} :", join, flags=re.M | re.I): break elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC JOIN response') + raise Exception("Timeout waiting for IRC JOIN response") time.sleep(0.5) if topic is not None: - irc.send(to_bytes(f'TOPIC {channel} :{topic}\r\n')) + irc.send(to_bytes(f"TOPIC {channel} :{topic}\r\n")) time.sleep(1) if nick_to: for nick in nick_to: - irc.send(to_bytes(f'PRIVMSG {nick} :{message}\r\n')) + irc.send(to_bytes(f"PRIVMSG {nick} :{message}\r\n")) if channel: - irc.send(to_bytes(f'PRIVMSG {channel} :{message}\r\n')) + irc.send(to_bytes(f"PRIVMSG {channel} :{message}\r\n")) time.sleep(1) if part: if channel: - irc.send(to_bytes(f'PART {channel}\r\n')) - irc.send(to_bytes('QUIT\r\n')) + irc.send(to_bytes(f"PART {channel}\r\n")) + irc.send(to_bytes("QUIT\r\n")) time.sleep(1) irc.close() + # =========================================== # Main # @@ -299,29 +314,46 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k def main(): module = AnsibleModule( argument_spec=dict( - server=dict(default='localhost'), - port=dict(type='int', default=6667), - nick=dict(default='ansible'), - nick_to=dict(type='list', elements='str'), + server=dict(default="localhost"), + port=dict(type="int", default=6667), + nick=dict(default="ansible"), + nick_to=dict(type="list", elements="str"), msg=dict(required=True), - color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", - "green", "red", "brown", - "purple", "orange", "yellow", - "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", - "light_gray", "none"]), + color=dict( + default="none", + aliases=["colour"], + choices=[ + "white", + "black", + "blue", + "green", + "red", + "brown", + "purple", + "orange", + "yellow", + "light_green", + "teal", + "light_cyan", + "light_blue", + "pink", + "gray", + "light_gray", + "none", + ], + ), style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), channel=dict(), key=dict(no_log=True), topic=dict(), passwd=dict(no_log=True), - timeout=dict(type='int', default=30), - part=dict(type='bool', default=True), - use_tls=dict(type='bool', default=True, aliases=['use_ssl']), - validate_certs=dict(type='bool', default=True), + timeout=dict(type="int", default=30), + part=dict(type="bool", default=True), + use_tls=dict(type="bool", default=True, aliases=["use_ssl"]), + validate_certs=dict(type="bool", default=True), ), supports_check_mode=True, - required_one_of=[['channel', 'nick_to']] + required_one_of=[["channel", "nick_to"]], ) server = module.params["server"] @@ -343,13 +375,28 @@ def main(): validate_certs = module.params["validate_certs"] try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style) + send_msg( + msg, + server, + port, + channel, + nick_to, + key, + topic, + nick, + color, + passwd, + timeout, + use_tls, + validate_certs, + part, + style, + ) except Exception as e: module.fail_json(msg=f"unable to send to IRC: {e}", exception=traceback.format_exc()) - module.exit_json(changed=False, channel=channel, nick=nick, - msg=msg) + module.exit_json(changed=False, channel=channel, nick=nick, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py index 0d1a8476869..b01c5e449ab 100644 --- a/plugins/modules/iso_create.py +++ b/plugins/modules/iso_create.py @@ -153,6 +153,7 @@ PYCDLIB_IMP_ERR = None try: import pycdlib + HAS_PYCDLIB = True except ImportError: PYCDLIB_IMP_ERR = traceback.format_exc() @@ -168,7 +169,7 @@ def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=No # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot, # followed by a maximum 3 character extension, followed by a semicolon and a version file_name = os.path.basename(file_path) - if '.' not in file_name: + if "." not in file_name: file_in_iso_path = f"{file_path.upper()}.;1" else: file_in_iso_path = f"{file_path.upper()};1" @@ -179,7 +180,9 @@ def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=No if use_udf: udf_path = file_path try: - iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) + iso_file.add_file( + src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path + ) except Exception as err: module.fail_json(msg=f"Failed to add file {src_file} to ISO file due to {err}") @@ -203,31 +206,31 @@ def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_jol def main(): argument_spec = dict( - src_files=dict(type='list', required=True, elements='path'), - dest_iso=dict(type='path', required=True), - interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1), - vol_ident=dict(type='str'), - rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']), - joliet=dict(type='int', choices=[1, 2, 3]), - udf=dict(type='bool', default=False), + src_files=dict(type="list", required=True, elements="path"), + dest_iso=dict(type="path", required=True), + interchange_level=dict(type="int", choices=[1, 2, 3, 4], default=1), + vol_ident=dict(type="str"), + rock_ridge=dict(type="str", choices=["1.09", "1.10", "1.12"]), + joliet=dict(type="int", choices=[1, 2, 3]), + udf=dict(type="bool", default=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if not HAS_PYCDLIB: - module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR) + module.fail_json(missing_required_lib("pycdlib"), exception=PYCDLIB_IMP_ERR) - src_file_list = module.params.get('src_files') + src_file_list = module.params.get("src_files") if src_file_list and len(src_file_list) == 0: - module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.') + module.fail_json(msg="Please specify source file and/or directory list using src_files parameter.") for src_file in src_file_list: if not os.path.exists(src_file): module.fail_json(msg=f"Specified source file/directory path does not exist on local machine, {src_file}") - dest_iso = module.params.get('dest_iso') + dest_iso = module.params.get("dest_iso") if dest_iso and len(dest_iso) == 0: - module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.') + module.fail_json(msg="Please specify the absolute path of the new created ISO file using dest_iso parameter.") dest_iso_dir = os.path.dirname(dest_iso) if dest_iso_dir and not os.path.exists(dest_iso_dir): @@ -235,17 +238,17 @@ def main(): try: os.makedirs(dest_iso_dir) except OSError as err: - module.fail_json(msg=f'Exception caught when creating folder {dest_iso_dir}, with error {err}') + module.fail_json(msg=f"Exception caught when creating folder {dest_iso_dir}, with error {err}") - volume_id = module.params.get('vol_ident') + volume_id = module.params.get("vol_ident") if volume_id is None: - volume_id = '' - inter_level = module.params.get('interchange_level') - rock_ridge = module.params.get('rock_ridge') - use_joliet = module.params.get('joliet') + volume_id = "" + inter_level = module.params.get("interchange_level") + rock_ridge = module.params.get("rock_ridge") + use_joliet = module.params.get("joliet") use_udf = None - if module.params['udf']: - use_udf = '2.60' + if module.params["udf"]: + use_udf = "2.60" result = dict( changed=False, @@ -255,21 +258,29 @@ def main(): vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, - udf=use_udf + udf=use_udf, ) if not module.check_mode: iso_file = pycdlib.PyCdlib(always_consistent=True) - iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) + iso_file.new( + interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf + ) for src_file in src_file_list: # if specify a dir then go through the dir to add files and dirs if os.path.isdir(src_file): dir_list = [] file_list = [] - src_file = src_file.rstrip('/') + src_file = src_file.rstrip("/") dir_name = os.path.basename(src_file) - add_directory(module, iso_file=iso_file, dir_path=f"/{dir_name}", rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) + add_directory( + module, + iso_file=iso_file, + dir_path=f"/{dir_name}", + rock_ridge=rock_ridge, + use_joliet=use_joliet, + use_udf=use_udf, + ) # get dir list and file list for path, dirs, files in os.walk(src_file): @@ -278,23 +289,42 @@ def main(): for dir in dirs: dir_list.append(os.path.join(path, dir)) for new_dir in dir_list: - add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1], - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + add_directory( + module, + iso_file=iso_file, + dir_path=new_dir.split(os.path.dirname(src_file))[1], + rock_ridge=rock_ridge, + use_joliet=use_joliet, + use_udf=use_udf, + ) for new_file in file_list: - add_file(module, iso_file=iso_file, src_file=new_file, - file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge, - use_joliet=use_joliet, use_udf=use_udf) + add_file( + module, + iso_file=iso_file, + src_file=new_file, + file_path=new_file.split(os.path.dirname(src_file))[1], + rock_ridge=rock_ridge, + use_joliet=use_joliet, + use_udf=use_udf, + ) # if specify a file then add this file directly to the '/' path in ISO else: - add_file(module, iso_file=iso_file, src_file=src_file, file_path=f"/{os.path.basename(src_file)}", - rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + add_file( + module, + iso_file=iso_file, + src_file=src_file, + file_path=f"/{os.path.basename(src_file)}", + rock_ridge=rock_ridge, + use_joliet=use_joliet, + use_udf=use_udf, + ) iso_file.write(dest_iso) iso_file.close() - result['changed'] = True + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py index 169e301deed..ae307f319b7 100644 --- a/plugins/modules/iso_customize.py +++ b/plugins/modules/iso_customize.py @@ -179,7 +179,7 @@ def iso_check_file_exists(opened_iso, dest_file): else: parent_dir = f"{parent_dir}/{item}" - if '.' not in file_name: + if "." not in file_name: file_in_iso_path = f"{file_name.upper()}.;1" else: file_in_iso_path = f"{file_name.upper()};1" @@ -200,7 +200,7 @@ def iso_add_file(module, opened_iso, iso_type, src_file, dest_file): file_dir = os.path.dirname(dest_file).strip() file_name = os.path.basename(dest_file) - if '.' not in file_name: + if "." not in file_name: file_in_iso_path = f"{dest_file.upper()}.;1" else: file_in_iso_path = f"{dest_file.upper()};1" @@ -239,7 +239,7 @@ def iso_delete_file(module, opened_iso, iso_type, dest_file): if not iso_check_file_exists(opened_iso, dest_file): module.fail_json(msg=f"The file {dest_file} does not exist.") - if '.' not in file_name: + if "." not in file_name: file_in_iso_path = f"{dest_file.upper()}.;1" else: file_in_iso_path = f"{dest_file.upper()};1" @@ -276,7 +276,7 @@ def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list): iso_delete_file(module, iso, iso_type, item) for item in add_files_list: - iso_add_file(module, iso, iso_type, item['src_file'], item['dest_file']) + iso_add_file(module, iso, iso_type, item["src_file"], item["dest_file"]) iso.write(dest_iso) except Exception as err: @@ -289,38 +289,42 @@ def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list): def main(): argument_spec = dict( - src_iso=dict(type='path', required=True), - dest_iso=dict(type='path', required=True), - delete_files=dict(type='list', elements='str', default=[]), + src_iso=dict(type="path", required=True), + dest_iso=dict(type="path", required=True), + delete_files=dict(type="list", elements="str", default=[]), add_files=dict( - type='list', elements='dict', default=[], + type="list", + elements="dict", + default=[], options=dict( - src_file=dict(type='path', required=True), - dest_file=dict(type='str', required=True), + src_file=dict(type="path", required=True), + dest_file=dict(type="str", required=True), ), ), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[('delete_files', 'add_files'), ], + required_one_of=[ + ("delete_files", "add_files"), + ], supports_check_mode=True, ) deps.validate(module) - src_iso = module.params['src_iso'] + src_iso = module.params["src_iso"] if not os.path.exists(src_iso): module.fail_json(msg=f"ISO file {src_iso} does not exist.") - dest_iso = module.params['dest_iso'] + dest_iso = module.params["dest_iso"] dest_iso_dir = os.path.dirname(dest_iso) if dest_iso_dir and not os.path.exists(dest_iso_dir): module.fail_json(msg=f"The dest directory {dest_iso_dir} does not exist") - delete_files_list = [s.strip() for s in module.params['delete_files']] - add_files_list = module.params['add_files'] + delete_files_list = [s.strip() for s in module.params["delete_files"]] + add_files_list = module.params["add_files"] if add_files_list: for item in add_files_list: - if not os.path.exists(item['src_file']): + if not os.path.exists(item["src_file"]): module.fail_json(msg=f"The file {item['src_file']} does not exist.") result = dict( @@ -334,9 +338,9 @@ def main(): if not module.check_mode: iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list) - result['changed'] = True + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py index 347775c4c6e..2fd807d15cf 100644 --- a/plugins/modules/iso_extract.py +++ b/plugins/modules/iso_extract.py @@ -99,21 +99,21 @@ def main(): module = AnsibleModule( argument_spec=dict( - image=dict(type='path', required=True, aliases=['path', 'src']), - dest=dict(type='path', required=True), - files=dict(type='list', elements='str', required=True), - force=dict(type='bool', default=True), - password=dict(type='str', no_log=True), - executable=dict(type='path'), # No default on purpose + image=dict(type="path", required=True, aliases=["path", "src"]), + dest=dict(type="path", required=True), + files=dict(type="list", elements="str", required=True), + force=dict(type="bool", default=True), + password=dict(type="str", no_log=True), + executable=dict(type="path"), # No default on purpose ), supports_check_mode=True, ) - image = module.params['image'] - dest = module.params['dest'] - files = module.params['files'] - force = module.params['force'] - password = module.params['password'] - executable = module.params['executable'] + image = module.params["image"] + dest = module.params["dest"] + files = module.params["files"] + force = module.params["force"] + password = module.params["password"] + executable = module.params["executable"] result = dict( changed=False, @@ -123,12 +123,12 @@ def main(): # We want to know if the user provided it or not, so we set default here if executable is None: - executable = '7z' + executable = "7z" binary = module.get_bin_path(executable, None) # When executable was provided and binary not found, warn user ! - if module.params['executable'] is not None and not binary: + if module.params["executable"] is not None and not binary: module.warn(f"Executable '{executable}' is not found on the system, trying to mount ISO instead.") if not os.path.exists(dest): @@ -137,7 +137,7 @@ def main(): if not os.path.exists(os.path.dirname(image)): module.fail_json(msg=f"ISO image '{image}' does not exist") - result['files'] = [] + result["files"] = [] extract_files = list(files) if not force: @@ -145,11 +145,13 @@ def main(): for f in files: dest_file = os.path.join(dest, os.path.basename(f)) if os.path.exists(dest_file): - result['files'].append(dict( - checksum=None, - dest=dest_file, - src=f, - )) + result["files"].append( + dict( + checksum=None, + dest=dest_file, + src=f, + ) + ) extract_files.remove(f) if not extract_files: @@ -159,27 +161,32 @@ def main(): # Use 7zip when we have a binary, otherwise try to mount if binary: - cmd = [binary, 'x', image, f'-o{tmp_dir}'] + cmd = [binary, "x", image, f"-o{tmp_dir}"] if password: cmd += [f"-p{password}"] cmd += extract_files else: - cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] + cmd = [module.get_bin_path("mount"), "-o", "loop,ro", image, tmp_dir] rc, out, err = module.run_command(cmd) if rc != 0: - result.update(dict( - cmd=cmd, - rc=rc, - stderr=err, - stdout=out, - )) + result.update( + dict( + cmd=cmd, + rc=rc, + stderr=err, + stdout=out, + ) + ) shutil.rmtree(tmp_dir) if binary: module.fail_json(msg=f"Failed to extract from ISO image '{image}' to '{tmp_dir}'", **result) else: - module.fail_json(msg=f"Failed to mount ISO image '{image}' to '{tmp_dir}', and we could not find executable '{executable}'.", **result) + module.fail_json( + msg=f"Failed to mount ISO image '{image}' to '{tmp_dir}', and we could not find executable '{executable}'.", + **result, + ) try: for f in extract_files: @@ -196,25 +203,27 @@ def main(): else: dest_checksum = None - result['files'].append(dict( - checksum=src_checksum, - dest=dest_file, - src=f, - )) + result["files"].append( + dict( + checksum=src_checksum, + dest=dest_file, + src=f, + ) + ) if src_checksum != dest_checksum: if not module.check_mode: shutil.copy(tmp_src, dest_file) - result['changed'] = True + result["changed"] = True finally: if not binary: - module.run_command([module.get_bin_path('umount'), tmp_dir]) + module.run_command([module.get_bin_path("umount"), tmp_dir]) shutil.rmtree(tmp_dir) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py index fb489c743f2..e9ed772433c 100644 --- a/plugins/modules/jabber.py +++ b/plugins/modules/jabber.py @@ -98,7 +98,6 @@ def main(): - module = AnsibleModule( argument_spec=dict( user=dict(required=True), @@ -106,52 +105,52 @@ def main(): to=dict(required=True), msg=dict(required=True), host=dict(), - port=dict(default=5222, type='int'), + port=dict(default=5222, type="int"), encoding=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_XMPP: - module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) + module.fail_json(msg=missing_required_lib("xmpppy"), exception=XMPP_IMP_ERR) - jid = xmpp.JID(module.params['user']) + jid = xmpp.JID(module.params["user"]) user = jid.getNode() server = jid.getDomain() - port = module.params['port'] - password = module.params['password'] + port = module.params["port"] + password = module.params["password"] try: - to, nick = module.params['to'].split('/', 1) + to, nick = module.params["to"].split("/", 1) except ValueError: - to, nick = module.params['to'], None + to, nick = module.params["to"], None - if module.params['host']: - host = module.params['host'] + if module.params["host"]: + host = module.params["host"] else: host = server - if module.params['encoding']: - xmpp.simplexml.ENCODING = module.params['encoding'] + if module.params["encoding"]: + xmpp.simplexml.ENCODING = module.params["encoding"] - msg = xmpp.protocol.Message(body=module.params['msg']) + msg = xmpp.protocol.Message(body=module.params["msg"]) try: conn = xmpp.Client(server, debug=[]) if not conn.connect(server=(host, port)): - module.fail_json(rc=1, msg=f'Failed to connect to server: {server}') - if not conn.auth(user, password, 'Ansible'): - module.fail_json(rc=1, msg=f'Failed to authorize {user} on: {server}') + module.fail_json(rc=1, msg=f"Failed to connect to server: {server}") + if not conn.auth(user, password, "Ansible"): + module.fail_json(rc=1, msg=f"Failed to authorize {user} on: {server}") # some old servers require this, also the sleep following send conn.sendInitPresence(requestRoster=0) if nick: # sending to room instead of user, need to join - msg.setType('groupchat') - msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') - join = xmpp.Presence(to=module.params['to']) - join.setTag('x', namespace='http://jabber.org/protocol/muc') + msg.setType("groupchat") + msg.setTag("x", namespace="http://jabber.org/protocol/muc#user") + join = xmpp.Presence(to=module.params["to"]) + join.setTag("x", namespace="http://jabber.org/protocol/muc") conn.send(join) time.sleep(1) else: - msg.setType('chat') + msg.setType("chat") msg.setTo(to) if not module.check_mode: @@ -164,5 +163,5 @@ def main(): module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py index 8cc879e874f..f4b26128a5d 100644 --- a/plugins/modules/java_cert.py +++ b/plugins/modules/java_cert.py @@ -213,48 +213,33 @@ def _get_keystore_type_keytool_parameters(keystore_type): - ''' Check that custom keystore is presented in parameters ''' + """Check that custom keystore is presented in parameters""" if keystore_type: return ["-storetype", keystore_type] return [] def _check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): - ''' Check if certificate with alias is present in keystore - located at keystore_path ''' - test_cmd = [ - executable, - "-list", - "-keystore", - keystore_path, - "-alias", - alias, - "-rfc" - ] + """Check if certificate with alias is present in keystore + located at keystore_path""" + test_cmd = [executable, "-list", "-keystore", keystore_path, "-alias", alias, "-rfc"] test_cmd += _get_keystore_type_keytool_parameters(keystore_type) (check_rc, stdout, dummy) = module.run_command(test_cmd, data=keystore_pass, check_rc=False) if check_rc == 0: return (True, stdout) - return (False, '') + return (False, "") def _get_certificate_from_url(module, executable, url, port, pem_certificate_output): remote_cert_pem_chain = _download_cert_url(module, executable, url, port) - with open(pem_certificate_output, 'w') as f: + with open(pem_certificate_output, "w") as f: f.write(remote_cert_pem_chain) def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_certificate_output, openssl_bin): - """ Read a X509 certificate chain file and output the first certificate in the list """ - extract_cmd = [ - openssl_bin, - "x509", - "-in", - pem_certificate_file, - "-out", - pem_certificate_output - ] + """Read a X509 certificate chain file and output the first certificate in the list""" + extract_cmd = [openssl_bin, "x509", "-in", pem_certificate_file, "-out", pem_certificate_output] (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) if extract_rc != 0: @@ -264,76 +249,69 @@ def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_cert if extract_rc != 0: # this time it is a real failure - module.fail_json(msg=f"Internal module failure, cannot extract certificate, error: {extract_stderr}", - rc=extract_rc, cmd=extract_cmd) + module.fail_json( + msg=f"Internal module failure, cannot extract certificate, error: {extract_stderr}", + rc=extract_rc, + cmd=extract_cmd, + ) return extract_rc def _get_digest_from_x509_file(module, pem_certificate_file, openssl_bin): - """ Read a X509 certificate file and output sha256 digest using openssl """ + """Read a X509 certificate file and output sha256 digest using openssl""" # cleanup file before to compare (dummy, tmp_certificate) = tempfile.mkstemp() module.add_cleanup_file(tmp_certificate) _get_first_certificate_from_x509_file(module, pem_certificate_file, tmp_certificate, openssl_bin) - dgst_cmd = [ - openssl_bin, - "dgst", - "-r", - "-sha256", - tmp_certificate - ] + dgst_cmd = [openssl_bin, "dgst", "-r", "-sha256", tmp_certificate] (dgst_rc, dgst_stdout, dgst_stderr) = module.run_command(dgst_cmd, check_rc=False) if dgst_rc != 0: - module.fail_json(msg=f"Internal module failure, cannot compute digest for certificate, error: {dgst_stderr}", - rc=dgst_rc, cmd=dgst_cmd) + module.fail_json( + msg=f"Internal module failure, cannot compute digest for certificate, error: {dgst_stderr}", + rc=dgst_rc, + cmd=dgst_cmd, + ) - return dgst_stdout.split(' ')[0] + return dgst_stdout.split(" ")[0] def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, password, dest): - """ Runs keytools to extract the public cert from a PKCS12 archive and write it to a file. """ - export_cmd = [ - executable, - "-list", - "-noprompt", - "-keystore", - pkcs_file, - "-storetype", - "pkcs12", - "-rfc" - ] + """Runs keytools to extract the public cert from a PKCS12 archive and write it to a file.""" + export_cmd = [executable, "-list", "-noprompt", "-keystore", pkcs_file, "-storetype", "pkcs12", "-rfc"] # Append optional alias if alias: export_cmd.extend(["-alias", alias]) (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) if export_rc != 0: - module.fail_json(msg=f"Internal module failure, cannot extract public certificate from PKCS12, message: {export_stdout}", - stderr=export_err, - rc=export_rc) + module.fail_json( + msg=f"Internal module failure, cannot extract public certificate from PKCS12, message: {export_stdout}", + stderr=export_err, + rc=export_rc, + ) - with open(dest, 'w') as f: + with open(dest, "w") as f: f.write(export_stdout) -def get_proxy_settings(scheme='https'): - """ Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found """ - proxy_url = getproxies().get(scheme, '') +def get_proxy_settings(scheme="https"): + """Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found""" + proxy_url = getproxies().get(scheme, "") if not proxy_url: return (False, False) else: parsed_url = urlparse(proxy_url) if parsed_url.scheme: - (proxy_host, proxy_port) = parsed_url.netloc.split(':') + (proxy_host, proxy_port) = parsed_url.netloc.split(":") else: - (proxy_host, proxy_port) = parsed_url.path.split(':') + (proxy_host, proxy_port) = parsed_url.path.split(":") return (proxy_host, proxy_port) def build_proxy_options(): - """ Returns list of valid proxy options for keytool """ + """Returns list of valid proxy options for keytool""" (proxy_host, proxy_port) = get_proxy_settings() no_proxy = os.getenv("no_proxy") @@ -344,8 +322,8 @@ def build_proxy_options(): if no_proxy is not None: # For Java's nonProxyHosts property, items are separated by '|', # and patterns have to start with "*". - non_proxy_hosts = no_proxy.replace(',', '|') - non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts) + non_proxy_hosts = no_proxy.replace(",", "|") + non_proxy_hosts = re.sub(r"(^|\|)\.", r"\1*.", non_proxy_hosts) # The property name is http.nonProxyHosts, there is no # separate setting for HTTPS. @@ -354,14 +332,14 @@ def build_proxy_options(): def _update_permissions(module, keystore_path): - """ Updates keystore file attributes as necessary """ + """Updates keystore file attributes as necessary""" file_args = module.load_file_common_arguments(module.params, path=keystore_path) return module.set_fs_attributes_if_different(file_args, False) def _download_cert_url(module, executable, url, port): - """ Fetches the certificate from the remote URL using `keytool -printcert...` - The PEM formatted string is returned """ + """Fetches the certificate from the remote URL using `keytool -printcert...` + The PEM formatted string is returned""" proxy_opts = build_proxy_options() fetch_cmd = [executable, "-printcert", "-rfc", "-sslserver"] + proxy_opts + [f"{url}:{port}"] @@ -369,20 +347,27 @@ def _download_cert_url(module, executable, url, port): (fetch_rc, fetch_out, fetch_err) = module.run_command(fetch_cmd, check_rc=False) if fetch_rc != 0: - module.fail_json(msg=f"Internal module failure, cannot download certificate, error: {fetch_err}", - rc=fetch_rc, cmd=fetch_cmd) + module.fail_json( + msg=f"Internal module failure, cannot download certificate, error: {fetch_err}", rc=fetch_rc, cmd=fetch_cmd + ) return fetch_out -def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, - keystore_path, keystore_pass, keystore_alias, keystore_type): - ''' Import pkcs12 from path into keystore located on - keystore_path as alias ''' - optional_aliases = { - "-destalias": keystore_alias, - "-srcalias": pkcs12_alias - } +def import_pkcs12_path( + module, + executable, + pkcs12_path, + pkcs12_pass, + pkcs12_alias, + keystore_path, + keystore_pass, + keystore_alias, + keystore_type, +): + """Import pkcs12 from path into keystore located on + keystore_path as alias""" + optional_aliases = {"-destalias": keystore_alias, "-srcalias": pkcs12_alias} import_cmd = [ executable, "-importkeystore", @@ -408,83 +393,64 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia # Use local certificate from local path and import it to a java keystore (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) - diff = {'before': '\n', 'after': f'{keystore_alias}\n'} + diff = {"before": "\n", "after": f"{keystore_alias}\n"} if import_rc != 0 or not os.path.exists(keystore_path): module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) - return dict(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) + return dict( + changed=True, msg=import_out, rc=import_rc, cmd=import_cmd, stdout=import_out, error=import_err, diff=diff + ) def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): - ''' Import certificate from path into keystore located on - keystore_path as alias ''' - import_cmd = [ - executable, - "-importcert", - "-noprompt", - "-keystore", - keystore_path, - "-file", - path, - "-alias", - alias - ] + """Import certificate from path into keystore located on + keystore_path as alias""" + import_cmd = [executable, "-importcert", "-noprompt", "-keystore", keystore_path, "-file", path, "-alias", alias] import_cmd += _get_keystore_type_keytool_parameters(keystore_type) if trust_cacert: import_cmd.extend(["-trustcacerts"]) # Use local certificate from local path and import it to a java keystore - (import_rc, import_out, import_err) = module.run_command(import_cmd, - data=f"{keystore_pass}\n{keystore_pass}", - check_rc=False) - diff = {'before': '\n', 'after': f'{alias}\n'} + (import_rc, import_out, import_err) = module.run_command( + import_cmd, data=f"{keystore_pass}\n{keystore_pass}", check_rc=False + ) + diff = {"before": "\n", "after": f"{alias}\n"} if import_rc != 0: module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) - return dict(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) + return dict( + changed=True, msg=import_out, rc=import_rc, cmd=import_cmd, stdout=import_out, error=import_err, diff=diff + ) def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): - ''' Delete certificate identified with alias from keystore on keystore_path ''' - del_cmd = [ - executable, - "-delete", - "-noprompt", - "-keystore", - keystore_path, - "-alias", - alias - ] + """Delete certificate identified with alias from keystore on keystore_path""" + del_cmd = [executable, "-delete", "-noprompt", "-keystore", keystore_path, "-alias", alias] del_cmd += _get_keystore_type_keytool_parameters(keystore_type) # Delete SSL certificate from keystore (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) - diff = {'before': f'{alias}\n', 'after': None} + diff = {"before": f"{alias}\n", "after": None} if del_rc != 0: module.fail_json(msg=del_out, rc=del_rc, cmd=del_cmd, error=del_err) - return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd, - stdout=del_out, error=del_err, diff=diff) + return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd, stdout=del_out, error=del_err, diff=diff) def test_keytool(module, executable): - ''' Test if keytool is actually executable or not ''' + """Test if keytool is actually executable or not""" module.run_command([executable], check_rc=True) def test_keystore(module, keystore_path): - ''' Check if we can access keystore as file or not ''' + """Check if we can access keystore as file or not""" if keystore_path is None: - keystore_path = '' + keystore_path = "" if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path): # Keystore doesn't exist we want to create it @@ -493,60 +459,59 @@ def test_keystore(module, keystore_path): def main(): argument_spec = dict( - cert_url=dict(type='str'), - cert_path=dict(type='path'), - cert_content=dict(type='str'), - pkcs12_path=dict(type='path'), - pkcs12_password=dict(type='str', no_log=True), - pkcs12_alias=dict(type='str'), - cert_alias=dict(type='str'), - cert_port=dict(type='int', default=443), - keystore_path=dict(type='path'), - keystore_pass=dict(type='str', required=True, no_log=True), - trust_cacert=dict(type='bool', default=False), - keystore_create=dict(type='bool', default=False), - keystore_type=dict(type='str'), - executable=dict(type='str', default='keytool'), - state=dict(type='str', default='present', choices=['absent', 'present']), + cert_url=dict(type="str"), + cert_path=dict(type="path"), + cert_content=dict(type="str"), + pkcs12_path=dict(type="path"), + pkcs12_password=dict(type="str", no_log=True), + pkcs12_alias=dict(type="str"), + cert_alias=dict(type="str"), + cert_port=dict(type="int", default=443), + keystore_path=dict(type="path"), + keystore_pass=dict(type="str", required=True, no_log=True), + trust_cacert=dict(type="bool", default=False), + keystore_create=dict(type="bool", default=False), + keystore_type=dict(type="str"), + executable=dict(type="str", default="keytool"), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule( argument_spec=argument_spec, - required_if=[['state', 'present', ('cert_path', 'cert_url', 'cert_content', 'pkcs12_path'), True], - ['state', 'absent', ('cert_url', 'cert_alias'), True]], - required_together=[['keystore_path', 'keystore_pass']], - mutually_exclusive=[ - ['cert_url', 'cert_path', 'cert_content', 'pkcs12_path'] + required_if=[ + ["state", "present", ("cert_path", "cert_url", "cert_content", "pkcs12_path"), True], + ["state", "absent", ("cert_url", "cert_alias"), True], ], + required_together=[["keystore_path", "keystore_pass"]], + mutually_exclusive=[["cert_url", "cert_path", "cert_content", "pkcs12_path"]], supports_check_mode=True, add_file_common_args=True, ) - url = module.params.get('cert_url') - path = module.params.get('cert_path') - content = module.params.get('cert_content') - port = module.params.get('cert_port') + url = module.params.get("cert_url") + path = module.params.get("cert_path") + content = module.params.get("cert_content") + port = module.params.get("cert_port") - pkcs12_path = module.params.get('pkcs12_path') - pkcs12_pass = module.params.get('pkcs12_password', '') - pkcs12_alias = module.params.get('pkcs12_alias', '1') + pkcs12_path = module.params.get("pkcs12_path") + pkcs12_pass = module.params.get("pkcs12_password", "") + pkcs12_alias = module.params.get("pkcs12_alias", "1") - cert_alias = module.params.get('cert_alias') or url - trust_cacert = module.params.get('trust_cacert') + cert_alias = module.params.get("cert_alias") or url + trust_cacert = module.params.get("trust_cacert") - keystore_path = module.params.get('keystore_path') - keystore_pass = module.params.get('keystore_pass') - keystore_create = module.params.get('keystore_create') - keystore_type = module.params.get('keystore_type') - executable = module.params.get('executable') - state = module.params.get('state') + keystore_path = module.params.get("keystore_path") + keystore_pass = module.params.get("keystore_pass") + keystore_create = module.params.get("keystore_create") + keystore_type = module.params.get("keystore_type") + executable = module.params.get("executable") + state = module.params.get("state") # openssl dependency resolution - openssl_bin = module.get_bin_path('openssl', True) + openssl_bin = module.get_bin_path("openssl", True) if path and not cert_alias: - module.fail_json(changed=False, - msg=f"Using local path import from {keystore_path} requires alias argument.") + module.fail_json(changed=False, msg=f"Using local path import from {keystore_path} requires alias argument.") test_keytool(module, executable) @@ -554,7 +519,8 @@ def main(): test_keystore(module, keystore_path) alias_exists, alias_exists_output = _check_cert_present( - module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + module, executable, keystore_path, keystore_pass, cert_alias, keystore_type + ) (dummy, new_certificate) = tempfile.mkstemp() (dummy, old_certificate) = tempfile.mkstemp() @@ -563,7 +529,7 @@ def main(): result = dict() - if state == 'absent' and alias_exists: + if state == "absent" and alias_exists: if module.check_mode: module.exit_json(changed=True) @@ -571,7 +537,7 @@ def main(): result = delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) # dump certificate to enroll in the keystore on disk and compute digest - if state == 'present': + if state == "present": # The alias exists in the keystore so we must now compare the SHA256 hash of the # public certificate already in the keystore, and the certificate we are wanting to add if alias_exists: @@ -580,7 +546,7 @@ def main(): keystore_cert_digest = _get_digest_from_x509_file(module, old_certificate, openssl_bin) else: - keystore_cert_digest = '' + keystore_cert_digest = "" if pkcs12_path: # Extracting certificate with openssl @@ -603,7 +569,6 @@ def main(): new_cert_digest = _get_digest_from_x509_file(module, new_certificate, openssl_bin) if keystore_cert_digest != new_cert_digest: - if module.check_mode: module.exit_json(changed=True) @@ -613,15 +578,32 @@ def main(): delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) if pkcs12_path: - result = import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, - keystore_path, keystore_pass, cert_alias, keystore_type) + result = import_pkcs12_path( + module, + executable, + pkcs12_path, + pkcs12_pass, + pkcs12_alias, + keystore_path, + keystore_pass, + cert_alias, + keystore_type, + ) else: - result = import_cert_path(module, executable, new_certificate, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) + result = import_cert_path( + module, + executable, + new_certificate, + keystore_path, + keystore_pass, + cert_alias, + keystore_type, + trust_cacert, + ) if os.path.exists(keystore_path): changed_permissions = _update_permissions(module, keystore_path) - result['changed'] = result.get('changed', False) or changed_permissions + result["changed"] = result.get("changed", False) or changed_permissions module.exit_json(**result) diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py index 686a2be6b29..32167ab87e8 100644 --- a/plugins/modules/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -200,6 +200,7 @@ from cryptography.hazmat.primitives import hashes from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends.openssl import backend + HAS_CRYPTOGRAPHY_PKCS12 = True except ImportError: HAS_CRYPTOGRAPHY_PKCS12 = False @@ -210,72 +211,73 @@ def __init__(self, module): self.module = module self.result = dict() - self.keytool_bin = module.get_bin_path('keytool', True) + self.keytool_bin = module.get_bin_path("keytool", True) - self.certificate = module.params['certificate'] - self.keypass = module.params['private_key_passphrase'] - self.keystore_path = module.params['dest'] - self.name = module.params['name'] - self.password = module.params['password'] - self.private_key = module.params['private_key'] - self.ssl_backend = module.params['ssl_backend'] - self.keystore_type = module.params['keystore_type'] + self.certificate = module.params["certificate"] + self.keypass = module.params["private_key_passphrase"] + self.keystore_path = module.params["dest"] + self.name = module.params["name"] + self.password = module.params["password"] + self.private_key = module.params["private_key"] + self.ssl_backend = module.params["ssl_backend"] + self.keystore_type = module.params["keystore_type"] - if self.ssl_backend == 'openssl': - self.openssl_bin = module.get_bin_path('openssl', True) + if self.ssl_backend == "openssl": + self.openssl_bin = module.get_bin_path("openssl", True) else: if not HAS_CRYPTOGRAPHY_PKCS12: - self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0')) + self.module.fail_json(msg=missing_required_lib("cryptography >= 3.0")) - if module.params['certificate_path'] is None: + if module.params["certificate_path"] is None: self.certificate_path = create_file(self.certificate) self.module.add_cleanup_file(self.certificate_path) else: - self.certificate_path = module.params['certificate_path'] + self.certificate_path = module.params["certificate_path"] - if module.params['private_key_path'] is None: + if module.params["private_key_path"] is None: self.private_key_path = create_file(self.private_key) self.module.add_cleanup_file(self.private_key_path) else: - self.private_key_path = module.params['private_key_path'] + self.private_key_path = module.params["private_key_path"] def update_permissions(self): file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path) return self.module.set_fs_attributes_if_different(file_args, False) - def read_certificate_fingerprint(self, cert_format='PEM'): - if self.ssl_backend == 'cryptography': - if cert_format == 'PEM': + def read_certificate_fingerprint(self, cert_format="PEM"): + if self.ssl_backend == "cryptography": + if cert_format == "PEM": cert_loader = load_pem_x509_certificate else: cert_loader = load_der_x509_certificate try: - with open(self.certificate_path, 'rb') as cert_file: - cert = cert_loader( - cert_file.read(), - backend=backend - ) + with open(self.certificate_path, "rb") as cert_file: + cert = cert_loader(cert_file.read(), backend=backend) except (OSError, ValueError) as e: self.module.fail_json(msg=f"Unable to read the provided certificate: {e}") fp = cert.fingerprint(hashes.SHA256()).hex().upper() - fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) + fingerprint = ":".join([fp[i : i + 2] for i in range(0, len(fp), 2)]) else: current_certificate_fingerprint_cmd = [ - self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256" + self.openssl_bin, + "x509", + "-noout", + "-in", + self.certificate_path, + "-fingerprint", + "-sha256", ] (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command( - current_certificate_fingerprint_cmd, - environ_update=None, - check_rc=False + current_certificate_fingerprint_cmd, environ_update=None, check_rc=False ) if rc != 0: return self.module.fail_json( msg=current_certificate_fingerprint_out, err=current_certificate_fingerprint_err, cmd=current_certificate_fingerprint_cmd, - rc=rc + rc=rc, ) current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) @@ -283,7 +285,7 @@ def read_certificate_fingerprint(self, cert_format='PEM'): return self.module.fail_json( msg=f"Unable to find the current certificate fingerprint in {current_certificate_fingerprint_out}", cmd=current_certificate_fingerprint_cmd, - rc=rc + rc=rc, ) fingerprint = current_certificate_match.group(1) @@ -291,25 +293,33 @@ def read_certificate_fingerprint(self, cert_format='PEM'): def read_stored_certificate_fingerprint(self): stored_certificate_fingerprint_cmd = [ - self.keytool_bin, "-list", "-alias", self.name, - "-keystore", self.keystore_path, "-v" + self.keytool_bin, + "-list", + "-alias", + self.name, + "-keystore", + self.keystore_path, + "-v", ] (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( - stored_certificate_fingerprint_cmd, data=self.password, check_rc=False) + stored_certificate_fingerprint_cmd, data=self.password, check_rc=False + ) if rc != 0: - if f"keytool error: java.lang.Exception: Alias <{self.name}> does not exist" \ - in stored_certificate_fingerprint_out: + if ( + f"keytool error: java.lang.Exception: Alias <{self.name}> does not exist" + in stored_certificate_fingerprint_out + ): return "alias mismatch" if re.match( - r"keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect", - stored_certificate_fingerprint_out + r"keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect", + stored_certificate_fingerprint_out, ): return "password mismatch" return self.module.fail_json( msg=stored_certificate_fingerprint_out, err=stored_certificate_fingerprint_err, cmd=stored_certificate_fingerprint_cmd, - rc=rc + rc=rc, ) if self.keystore_type not in (None, self.current_type()): @@ -320,69 +330,52 @@ def read_stored_certificate_fingerprint(self): return self.module.fail_json( msg=f"Unable to find the stored certificate fingerprint in {stored_certificate_fingerprint_out}", cmd=stored_certificate_fingerprint_cmd, - rc=rc + rc=rc, ) return stored_certificate_match.group(1) def current_type(self): - magic_bytes = b'\xfe\xed\xfe\xed' - with open(self.keystore_path, 'rb') as fd: + magic_bytes = b"\xfe\xed\xfe\xed" + with open(self.keystore_path, "rb") as fd: header = fd.read(4) if header == magic_bytes: - return 'jks' - return 'pkcs12' + return "jks" + return "pkcs12" def cert_changed(self): current_certificate_fingerprint = self.read_certificate_fingerprint() stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() return current_certificate_fingerprint != stored_certificate_fingerprint - def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'): - if key_format == 'PEM': + def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format="PEM", cert_format="PEM"): + if key_format == "PEM": key_loader = load_pem_private_key else: key_loader = load_der_private_key - if cert_format == 'PEM': + if cert_format == "PEM": cert_loader = load_pem_x509_certificate else: cert_loader = load_der_x509_certificate try: - with open(self.private_key_path, 'rb') as key_file: - private_key = key_loader( - key_file.read(), - password=to_bytes(self.keypass), - backend=backend - ) + with open(self.private_key_path, "rb") as key_file: + private_key = key_loader(key_file.read(), password=to_bytes(self.keypass), backend=backend) except TypeError: # Re-attempt with no password to match existing behavior try: - with open(self.private_key_path, 'rb') as key_file: - private_key = key_loader( - key_file.read(), - password=None, - backend=backend - ) + with open(self.private_key_path, "rb") as key_file: + private_key = key_loader(key_file.read(), password=None, backend=backend) except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e: - self.module.fail_json( - msg=f"The following error occurred while loading the provided private_key: {e}" - ) + self.module.fail_json(msg=f"The following error occurred while loading the provided private_key: {e}") except (OSError, ValueError, UnsupportedAlgorithm) as e: - self.module.fail_json( - msg=f"The following error occurred while loading the provided private_key: {e}" - ) + self.module.fail_json(msg=f"The following error occurred while loading the provided private_key: {e}") try: - with open(self.certificate_path, 'rb') as cert_file: - cert = cert_loader( - cert_file.read(), - backend=backend - ) + with open(self.certificate_path, "rb") as cert_file: + cert = cert_loader(cert_file.read(), backend=backend) except (OSError, ValueError, UnsupportedAlgorithm) as e: - self.module.fail_json( - msg=f"The following error occurred while loading the provided certificate: {e}" - ) + self.module.fail_json(msg=f"The following error occurred while loading the provided certificate: {e}") if self.password: encryption = BestAvailableEncryption(to_bytes(self.password)) @@ -390,21 +383,30 @@ def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', encryption = NoEncryption() pkcs12_bundle = serialize_key_and_certificates( - name=to_bytes(self.name), - key=private_key, - cert=cert, - cas=None, - encryption_algorithm=encryption + name=to_bytes(self.name), key=private_key, cert=cert, cas=None, encryption_algorithm=encryption ) - with open(keystore_p12_path, 'wb') as p12_file: + with open(keystore_p12_path, "wb") as p12_file: p12_file.write(pkcs12_bundle) self.result.update(msg="PKCS#12 bundle created by cryptography backend") def openssl_create_pkcs12_bundle(self, keystore_p12_path): - export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, - "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + export_p12_cmd = [ + self.openssl_bin, + "pkcs12", + "-export", + "-name", + self.name, + "-in", + self.certificate_path, + "-inkey", + self.private_key_path, + "-out", + keystore_p12_path, + "-passout", + "stdin", + ] # when keypass is provided, add -passin cmd_stdin = "" @@ -420,42 +422,49 @@ def openssl_create_pkcs12_bundle(self, keystore_p12_path): self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) if rc != 0: - self.result['err'] = export_p12_err + self.result["err"] = export_p12_err self.module.fail_json(**self.result) def create(self): """Create the keystore, or replace it with a rollback in case of - keytool failure. + keytool failure. """ if self.module.check_mode: - self.result['changed'] = True + self.result["changed"] = True return self.result keystore_p12_path = create_path() self.module.add_cleanup_file(keystore_p12_path) - if self.ssl_backend == 'cryptography': + if self.ssl_backend == "cryptography": self.cryptography_create_pkcs12_bundle(keystore_p12_path) else: self.openssl_create_pkcs12_bundle(keystore_p12_path) - if self.keystore_type == 'pkcs12': + if self.keystore_type == "pkcs12": # Preserve properties of the destination file, if any. self.module.atomic_move(os.path.abspath(keystore_p12_path), os.path.abspath(self.keystore_path)) self.update_permissions() - self.result['changed'] = True + self.result["changed"] = True return self.result - import_keystore_cmd = [self.keytool_bin, "-importkeystore", - "-destkeystore", self.keystore_path, - "-srckeystore", keystore_p12_path, - "-srcstoretype", "pkcs12", - "-alias", self.name, - "-noprompt"] + import_keystore_cmd = [ + self.keytool_bin, + "-importkeystore", + "-destkeystore", + self.keystore_path, + "-srckeystore", + keystore_p12_path, + "-srcstoretype", + "pkcs12", + "-alias", + self.name, + "-noprompt", + ] - if self.keystore_type == 'jks': - keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help']) - if '-deststoretype' in keytool_help[1] + keytool_help[2]: + if self.keystore_type == "jks": + keytool_help = self.module.run_command([self.keytool_bin, "-importkeystore", "-help"]) + if "-deststoretype" in keytool_help[1] + keytool_help[2]: import_keystore_cmd.insert(4, "-deststoretype") import_keystore_cmd.insert(5, self.keystore_type) @@ -467,7 +476,7 @@ def create(self): os.remove(self.keystore_path) (rc, import_keystore_out, import_keystore_err) = self.module.run_command( - import_keystore_cmd, data=f'{self.password}\n{self.password}\n{self.password}', check_rc=False + import_keystore_cmd, data=f"{self.password}\n{self.password}\n{self.password}", check_rc=False ) self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) @@ -477,13 +486,13 @@ def create(self): if keystore_backup is not None: self.module.preserved_copy(keystore_backup, self.keystore_path) os.remove(keystore_backup) - self.result['err'] = import_keystore_err + self.result["err"] = import_keystore_err return self.module.fail_json(**self.result) self.update_permissions() if keystore_backup is not None: os.remove(keystore_backup) - self.result['changed'] = True + self.result["changed"] = True return self.result def exists(self): @@ -499,49 +508,48 @@ def create_path(): def create_file(content): tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'w') as f: + with os.fdopen(tmpfd, "w") as f: f.write(content) return tmpfile def main(): - choose_between = (['certificate', 'certificate_path'], - ['private_key', 'private_key_path']) + choose_between = (["certificate", "certificate_path"], ["private_key", "private_key_path"]) module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - dest=dict(type='path', required=True), - certificate=dict(type='str', no_log=True), - certificate_path=dict(type='path'), - private_key=dict(type='str', no_log=True), - private_key_path=dict(type='path', no_log=False), - private_key_passphrase=dict(type='str', no_log=True), - password=dict(type='str', required=True, no_log=True), - ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), - keystore_type=dict(type='str', choices=['jks', 'pkcs12']), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + dest=dict(type="path", required=True), + certificate=dict(type="str", no_log=True), + certificate_path=dict(type="path"), + private_key=dict(type="str", no_log=True), + private_key_path=dict(type="path", no_log=False), + private_key_passphrase=dict(type="str", no_log=True), + password=dict(type="str", required=True, no_log=True), + ssl_backend=dict(type="str", default="openssl", choices=["openssl", "cryptography"]), + keystore_type=dict(type="str", choices=["jks", "pkcs12"]), + force=dict(type="bool", default=False), ), required_one_of=choose_between, mutually_exclusive=choose_between, supports_check_mode=True, add_file_common_args=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") result = dict() jks = JavaKeystore(module) if jks.exists(): - if module.params['force'] or jks.cert_changed(): + if module.params["force"] or jks.cert_changed(): result = jks.create() else: - result['changed'] = jks.update_permissions() + result["changed"] = jks.update_permissions() else: result = jks.create() module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jboss.py b/plugins/modules/jboss.py index 7ba770f8f01..6b42858c01b 100644 --- a/plugins/modules/jboss.py +++ b/plugins/modules/jboss.py @@ -82,7 +82,7 @@ from ansible.module_utils.basic import AnsibleModule -DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments' +DEFAULT_DEPLOY_PATH = "/var/lib/jbossas/standalone/deployments" def is_deployed(deploy_path, deployment): @@ -100,49 +100,49 @@ def is_failed(deploy_path, deployment): def main(): module = AnsibleModule( argument_spec=dict( - src=dict(type='path'), - deployment=dict(type='str', required=True), - deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH), - state=dict(type='str', choices=['absent', 'present'], default='present'), + src=dict(type="path"), + deployment=dict(type="str", required=True), + deploy_path=dict(type="path", default=DEFAULT_DEPLOY_PATH), + state=dict(type="str", choices=["absent", "present"], default="present"), ), - required_if=[('state', 'present', ('src',))], - supports_check_mode=True + required_if=[("state", "present", ("src",))], + supports_check_mode=True, ) result = dict(changed=False) - src = module.params['src'] - deployment = module.params['deployment'] - deploy_path = module.params['deploy_path'] - state = module.params['state'] + src = module.params["src"] + deployment = module.params["deployment"] + deploy_path = module.params["deploy_path"] + state = module.params["state"] if not os.path.exists(deploy_path): module.fail_json(msg="deploy_path does not exist.") - if state == 'absent' and src: - module.warn('Parameter src is ignored when state=absent') - elif state == 'present' and not os.path.exists(src): - module.fail_json(msg=f'Source file {src} does not exist.') + if state == "absent" and src: + module.warn("Parameter src is ignored when state=absent") + elif state == "present" and not os.path.exists(src): + module.fail_json(msg=f"Source file {src} does not exist.") deployed = is_deployed(deploy_path, deployment) # === when check_mode === if module.check_mode: - if state == 'present': + if state == "present": if not deployed: - result['changed'] = True + result["changed"] = True elif deployed: if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): - result['changed'] = True + result["changed"] = True - elif state == 'absent' and deployed: - result['changed'] = True + elif state == "absent" and deployed: + result["changed"] = True module.exit_json(**result) # ======================= - if state == 'present' and not deployed: + if state == "present" and not deployed: if is_failed(deploy_path, deployment): # Clean up old failed deployment os.remove(os.path.join(deploy_path, f"{deployment}.failed")) @@ -151,11 +151,11 @@ def main(): while not deployed: deployed = is_deployed(deploy_path, deployment) if is_failed(deploy_path, deployment): - module.fail_json(msg=f'Deploying {deployment} failed.') + module.fail_json(msg=f"Deploying {deployment} failed.") time.sleep(1) - result['changed'] = True + result["changed"] = True - if state == 'present' and deployed: + if state == "present" and deployed: if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): os.remove(os.path.join(deploy_path, f"{deployment}.deployed")) module.preserved_copy(src, os.path.join(deploy_path, deployment)) @@ -163,21 +163,21 @@ def main(): while not deployed: deployed = is_deployed(deploy_path, deployment) if is_failed(deploy_path, deployment): - module.fail_json(msg=f'Deploying {deployment} failed.') + module.fail_json(msg=f"Deploying {deployment} failed.") time.sleep(1) - result['changed'] = True + result["changed"] = True - if state == 'absent' and deployed: + if state == "absent" and deployed: os.remove(os.path.join(deploy_path, f"{deployment}.deployed")) while deployed: deployed = not is_undeployed(deploy_path, deployment) if is_failed(deploy_path, deployment): - module.fail_json(msg=f'Undeploying {deployment} failed.') + module.fail_json(msg=f"Undeploying {deployment} failed.") time.sleep(1) - result['changed'] = True + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py index 4b584b608b9..9285a1c3915 100644 --- a/plugins/modules/jenkins_build.py +++ b/plugins/modules/jenkins_build.py @@ -150,6 +150,7 @@ JENKINS_IMP_ERR = None try: import jenkins + python_jenkins_installed = True except ImportError: JENKINS_IMP_ERR = traceback.format_exc() @@ -159,28 +160,27 @@ class JenkinsBuild: - def __init__(self, module): self.module = module - self.name = module.params.get('name') - self.password = module.params.get('password') - self.args = module.params.get('args') - self.state = module.params.get('state') - self.token = module.params.get('token') - self.user = module.params.get('user') - self.jenkins_url = module.params.get('url') - self.build_number = module.params.get('build_number') - self.detach = module.params.get('detach') - self.time_between_checks = module.params.get('time_between_checks') + self.name = module.params.get("name") + self.password = module.params.get("password") + self.args = module.params.get("args") + self.state = module.params.get("state") + self.token = module.params.get("token") + self.user = module.params.get("user") + self.jenkins_url = module.params.get("url") + self.build_number = module.params.get("build_number") + self.detach = module.params.get("detach") + self.time_between_checks = module.params.get("time_between_checks") self.server = self.get_jenkins_connection() self.result = { - 'changed': False, - 'url': self.jenkins_url, - 'name': self.name, - 'user': self.user, - 'state': self.state, + "changed": False, + "url": self.jenkins_url, + "name": self.name, + "user": self.user, + "state": self.state, } self.EXCL_STATE = "excluded state" @@ -196,14 +196,15 @@ def get_jenkins_connection(self): else: return jenkins.Jenkins(self.jenkins_url) except Exception as e: - self.module.fail_json(msg=f'Unable to connect to Jenkins server, {e}') + self.module.fail_json(msg=f"Unable to connect to Jenkins server, {e}") def get_next_build(self): try: - build_number = self.server.get_job_info(self.name)['nextBuildNumber'] + build_number = self.server.get_job_info(self.name)["nextBuildNumber"] except Exception as e: - self.module.fail_json(msg=f'Unable to get job info from Jenkins server, {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to get job info from Jenkins server, {e}", exception=traceback.format_exc() + ) return build_number @@ -216,8 +217,7 @@ def get_build_status(self): response["result"] = "ABSENT" return response except Exception as e: - self.module.fail_json(msg=f'Unable to fetch build information, {e}', - exception=traceback.format_exc()) + self.module.fail_json(msg=f"Unable to fetch build information, {e}", exception=traceback.format_exc()) def present_build(self): self.build_number = self.get_next_build() @@ -228,56 +228,59 @@ def present_build(self): else: self.server.build_job(self.name, self.args) except Exception as e: - self.module.fail_json(msg=f'Unable to create build for {self.jenkins_url}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to create build for {self.jenkins_url}: {e}", exception=traceback.format_exc() + ) def stopped_build(self): build_info = None try: build_info = self.server.get_build_info(self.name, self.build_number) - if build_info['building'] is True: + if build_info["building"] is True: self.server.stop_build(self.name, self.build_number) except Exception as e: - self.module.fail_json(msg=f'Unable to stop build for {self.jenkins_url}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to stop build for {self.jenkins_url}: {e}", exception=traceback.format_exc() + ) else: - if build_info['building'] is False: + if build_info["building"] is False: self.module.exit_json(**self.result) def absent_build(self): try: self.server.delete_build(self.name, self.build_number) except Exception as e: - self.module.fail_json(msg=f'Unable to delete build for {self.jenkins_url}: {e}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to delete build for {self.jenkins_url}: {e}", exception=traceback.format_exc() + ) def get_result(self): result = self.result build_status = self.get_build_status() - if build_status['result'] is None: + if build_status["result"] is None: # If detached mode is active mark as success, we wouldn't be able to get here if it didn't exist if self.detach: - result['changed'] = True - result['build_info'] = build_status + result["changed"] = True + result["build_info"] = build_status return result sleep(self.time_between_checks) self.get_result() else: - if self.state == "stopped" and build_status['result'] == "ABORTED": - result['changed'] = True - result['build_info'] = build_status - elif self.state == "absent" and build_status['result'] == "ABSENT": - result['changed'] = True - result['build_info'] = build_status - elif self.state != "absent" and build_status['result'] == "SUCCESS": - result['changed'] = True - result['build_info'] = build_status + if self.state == "stopped" and build_status["result"] == "ABORTED": + result["changed"] = True + result["build_info"] = build_status + elif self.state == "absent" and build_status["result"] == "ABSENT": + result["changed"] = True + result["build_info"] = build_status + elif self.state != "absent" and build_status["result"] == "SUCCESS": + result["changed"] = True + result["build_info"] = build_status else: - result['failed'] = True - result['build_info'] = build_status + result["failed"] = True + result["build_info"] = build_status return result @@ -285,35 +288,37 @@ def get_result(self): def test_dependencies(module): if not python_jenkins_installed: module.fail_json( - msg=missing_required_lib("python-jenkins", - url="https://python-jenkins.readthedocs.io/en/latest/install.html"), - exception=JENKINS_IMP_ERR) + msg=missing_required_lib( + "python-jenkins", url="https://python-jenkins.readthedocs.io/en/latest/install.html" + ), + exception=JENKINS_IMP_ERR, + ) def main(): module = AnsibleModule( argument_spec=dict( - args=dict(type='dict'), - build_number=dict(type='int'), + args=dict(type="dict"), + build_number=dict(type="int"), name=dict(required=True), password=dict(no_log=True), - state=dict(choices=['present', 'absent', 'stopped'], default="present"), + state=dict(choices=["present", "absent", "stopped"], default="present"), token=dict(no_log=True), url=dict(default="http://localhost:8080"), user=dict(), - detach=dict(type='bool', default=False), - time_between_checks=dict(type='int', default=10), + detach=dict(type="bool", default=False), + time_between_checks=dict(type="int", default=10), ), - mutually_exclusive=[['password', 'token']], - required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], + mutually_exclusive=[["password", "token"]], + required_if=[["state", "absent", ["build_number"], True], ["state", "stopped", ["build_number"], True]], ) test_dependencies(module) jenkins_build = JenkinsBuild(module) - if module.params.get('state') == "present": + if module.params.get("state") == "present": jenkins_build.present_build() - elif module.params.get('state') == "stopped": + elif module.params.get("state") == "stopped": jenkins_build.stopped_build() else: jenkins_build.absent_build() @@ -323,5 +328,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py index b55299bc161..2c805cc8b85 100644 --- a/plugins/modules/jenkins_build_info.py +++ b/plugins/modules/jenkins_build_info.py @@ -105,6 +105,7 @@ JENKINS_IMP_ERR = None try: import jenkins + python_jenkins_installed = True except ImportError: JENKINS_IMP_ERR = traceback.format_exc() @@ -114,23 +115,22 @@ class JenkinsBuildInfo: - def __init__(self, module): self.module = module - self.name = module.params.get('name') - self.password = module.params.get('password') - self.token = module.params.get('token') - self.user = module.params.get('user') - self.jenkins_url = module.params.get('url') - self.build_number = module.params.get('build_number') + self.name = module.params.get("name") + self.password = module.params.get("password") + self.token = module.params.get("token") + self.user = module.params.get("user") + self.jenkins_url = module.params.get("url") + self.build_number = module.params.get("build_number") self.server = self.get_jenkins_connection() self.result = { - 'changed': False, - 'url': self.jenkins_url, - 'name': self.name, - 'user': self.user, + "changed": False, + "url": self.jenkins_url, + "name": self.name, + "user": self.user, } def get_jenkins_connection(self): @@ -144,13 +144,13 @@ def get_jenkins_connection(self): else: return jenkins.Jenkins(self.jenkins_url) except Exception as e: - self.module.fail_json(msg=f'Unable to connect to Jenkins server, {e}') + self.module.fail_json(msg=f"Unable to connect to Jenkins server, {e}") def get_build_status(self): try: if self.build_number is None: job_info = self.server.get_job_info(self.name) - self.build_number = job_info['lastBuild']['number'] + self.build_number = job_info["lastBuild"]["number"] return self.server.get_build_info(self.name, self.build_number) except jenkins.JenkinsException as e: @@ -158,16 +158,15 @@ def get_build_status(self): response["result"] = "ABSENT" return response except Exception as e: - self.module.fail_json(msg=f'Unable to fetch build information, {e}', - exception=traceback.format_exc()) + self.module.fail_json(msg=f"Unable to fetch build information, {e}", exception=traceback.format_exc()) def get_result(self): result = self.result build_status = self.get_build_status() - if build_status['result'] == "ABSENT": - result['failed'] = True - result['build_info'] = build_status + if build_status["result"] == "ABSENT": + result["failed"] = True + result["build_info"] = build_status return result @@ -175,22 +174,24 @@ def get_result(self): def test_dependencies(module): if not python_jenkins_installed: module.fail_json( - msg=missing_required_lib("python-jenkins", - url="https://python-jenkins.readthedocs.io/en/latest/install.html"), - exception=JENKINS_IMP_ERR) + msg=missing_required_lib( + "python-jenkins", url="https://python-jenkins.readthedocs.io/en/latest/install.html" + ), + exception=JENKINS_IMP_ERR, + ) def main(): module = AnsibleModule( argument_spec=dict( - build_number=dict(type='int'), + build_number=dict(type="int"), name=dict(required=True), password=dict(no_log=True), token=dict(no_log=True), url=dict(default="http://localhost:8080"), user=dict(), ), - mutually_exclusive=[['password', 'token']], + mutually_exclusive=[["password", "token"]], supports_check_mode=True, ) @@ -201,5 +202,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py index baeab6963e8..9daed1ec462 100644 --- a/plugins/modules/jenkins_credential.py +++ b/plugins/modules/jenkins_credential.py @@ -332,7 +332,6 @@ # Function to validate file paths exist on disk def validate_file_exist(module, path): - if path and not os.path.exists(path): module.fail_json(msg=f"File not found: {path}") @@ -362,13 +361,9 @@ def get_jenkins_crumb(module, headers): crumb_request_field = json_data["crumbRequestField"] crumb = json_data["crumb"] headers[crumb_request_field] = crumb # Set the crumb in headers - headers["Content-Type"] = ( - "application/x-www-form-urlencoded" # Set Content-Type for form data - ) + headers["Content-Type"] = "application/x-www-form-urlencoded" # Set Content-Type for form data if type == "token": - headers["Cookie"] = ( - session_cookie # Set session cookie for token operations - ) + headers["Cookie"] = session_cookie # Set session cookie for token operations return crumb_request_field, crumb, session_cookie # Return for test purposes except Exception: @@ -395,11 +390,7 @@ def clean_data(data): } # Filter out None values and unwanted keys - cleaned_data = { - key: value - for key, value in data.items() - if value is not None and key not in keys_to_remove - } + cleaned_data = {key: value for key, value in data.items() if value is not None and key not in keys_to_remove} return cleaned_data @@ -430,9 +421,7 @@ def target_exists(module, check_domain=False): elif status == 404: return False else: - module.fail_json( - msg=f"Unexpected status code {status} when checking {name} existence." - ) + module.fail_json(msg=f"Unexpected status code {status} when checking {name} existence.") # Function to delete the scope or credential provided @@ -447,7 +436,6 @@ def delete_target(module, headers): body = False try: - if type == "token": delete_url = f"{url}/user/{user}/descriptorByName/jenkins.security.ApiTokenProperty/revoke" body = urlencode({"tokenUuid": id}) @@ -456,9 +444,7 @@ def delete_target(module, headers): delete_url = f"{url}/credentials/store/{location}/domain/{id}/doDelete" else: - delete_url = ( - f"{url}/credentials/store/{location}/domain/{scope}/credential/{id}/doDelete" - ) + delete_url = f"{url}/credentials/store/{location}/domain/{scope}/credential/{id}/doDelete" response, info = fetch_url( module, @@ -470,9 +456,7 @@ def delete_target(module, headers): status = info.get("status", 0) if not status == 200: - module.fail_json( - msg=f"Failed to delete: HTTP {status}, {response}, {headers}" - ) + module.fail_json(msg=f"Failed to delete: HTTP {status}, {response}, {headers}") except Exception as e: module.fail_json(msg=f"Exception during delete: {e}") @@ -493,7 +477,6 @@ def read_privateKey(module): # body (bytes): Encoded multipart data # content_type (str): Content-Type header including boundary def embed_file_into_body(module, file_path, credentials): - filename = os.path.basename(file_path) try: @@ -520,7 +503,6 @@ def embed_file_into_body(module, file_path, credentials): # Main function to run the Ansible module def run_module(): - module = AnsibleModule( argument_spec=dict( id=dict(type="str"), @@ -629,7 +611,6 @@ def run_module(): module.exit_json(**result) if state == "present": - # If updating, we need to delete the existing credential/domain first based on force parameter if force and (does_exist or type == "token"): delete_target(module, headers) @@ -639,13 +620,11 @@ def run_module(): module.exit_json(**result) if type == "token": - post_url = f"{url}/user/{jenkins_user}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken" body = f"newTokenName={name}" elif type == "scope": - post_url = f"{url}/credentials/store/{location}/createDomain" specifications = [] @@ -711,13 +690,11 @@ def run_module(): credentials.update({"$class": cred_class[type]}) if type == "file": - # Build multipart body and content-type body, content_type = embed_file_into_body(module, filePath, credentials) headers["Content-Type"] = content_type elif type == "github_app": - private_key = read_privateKey(module) credentials.update( @@ -728,7 +705,6 @@ def run_module(): ) elif type == "ssh_key": - private_key = read_privateKey(module) credentials.update( @@ -741,20 +717,15 @@ def run_module(): ) elif type == "certificate": - name, ext = os.path.splitext(filePath) if ext.lower() in [".p12", ".pfx"]: try: with open(filePath, "rb") as f: file_content = f.read() - uploaded_keystore = base64.b64encode(file_content).decode( - "utf-8" - ) + uploaded_keystore = base64.b64encode(file_content).decode("utf-8") except Exception as e: - module.fail_json( - msg=f"Failed to read or encode keystore file: {e}" - ) + module.fail_json(msg=f"Failed to read or encode keystore file: {e}") credentials.update( { @@ -772,9 +743,7 @@ def run_module(): with open(private_key_path, "r") as f: private_key = f.read() except Exception as e: - module.fail_json( - msg=f"Failed to read PEM files: {e}" - ) + module.fail_json(msg=f"Failed to read PEM files: {e}") credentials.update( { @@ -797,21 +766,16 @@ def run_module(): body = urlencode({"json": json.dumps(payload)}) else: # Delete - delete_target(module, headers) module.exit_json(changed=True, msg=f"{id} deleted successfully.") - if ( - not type == "scope" and not scope == "_" - ): # Check if custom scope exists if adding to a custom scope + if not type == "scope" and not scope == "_": # Check if custom scope exists if adding to a custom scope if not target_exists(module, True): module.fail_json(msg=f"Domain {scope} doesn't exists") try: - response, info = fetch_url( - module, post_url, headers=headers, data=body, method="POST" - ) + response, info = fetch_url(module, post_url, headers=headers, data=body, method="POST") except Exception as e: module.fail_json(msg=f"Request to {post_url} failed: {e}") diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py index afa7eed24d2..770ac35233c 100644 --- a/plugins/modules/jenkins_job.py +++ b/plugins/modules/jenkins_job.py @@ -165,6 +165,7 @@ JENKINS_IMP_ERR = None try: import jenkins + python_jenkins_installed = True except ImportError: JENKINS_IMP_ERR = traceback.format_exc() @@ -175,35 +176,31 @@ class JenkinsJob: - def __init__(self, module): self.module = module - self.config = module.params.get('config') - self.name = module.params.get('name') - self.password = module.params.get('password') - self.state = module.params.get('state') - self.enabled = module.params.get('enabled') - self.token = module.params.get('token') - self.user = module.params.get('user') - self.jenkins_url = module.params.get('url') + self.config = module.params.get("config") + self.name = module.params.get("name") + self.password = module.params.get("password") + self.state = module.params.get("state") + self.enabled = module.params.get("enabled") + self.token = module.params.get("token") + self.user = module.params.get("user") + self.jenkins_url = module.params.get("url") self.server = self.get_jenkins_connection() self.result = { - 'changed': False, - 'url': self.jenkins_url, - 'name': self.name, - 'user': self.user, - 'state': self.state, - 'diff': { - 'before': "", - 'after': "" - } + "changed": False, + "url": self.jenkins_url, + "name": self.name, + "user": self.user, + "state": self.state, + "diff": {"before": "", "after": ""}, } self.EXCL_STATE = "excluded state" - if not module.params['validate_certs']: - os.environ['PYTHONHTTPSVERIFY'] = '0' + if not module.params["validate_certs"]: + os.environ["PYTHONHTTPSVERIFY"] = "0" def get_jenkins_connection(self): try: @@ -216,7 +213,7 @@ def get_jenkins_connection(self): else: return jenkins.Jenkins(self.jenkins_url) except Exception as e: - self.module.fail_json(msg=f'Unable to connect to Jenkins server, {e}', exception=traceback.format_exc()) + self.module.fail_json(msg=f"Unable to connect to Jenkins server, {e}", exception=traceback.format_exc()) def get_job_status(self): try: @@ -224,23 +221,24 @@ def get_job_status(self): if "color" not in response: return self.EXCL_STATE else: - return to_native(response['color']) + return to_native(response["color"]) except Exception as e: - self.module.fail_json(msg=f'Unable to fetch job information, {e}', exception=traceback.format_exc()) + self.module.fail_json(msg=f"Unable to fetch job information, {e}", exception=traceback.format_exc()) def job_exists(self): try: return bool(self.server.job_exists(self.name)) except Exception as e: - self.module.fail_json(msg=f'Unable to validate if job exists, {e} for {self.jenkins_url}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to validate if job exists, {e} for {self.jenkins_url}", exception=traceback.format_exc() + ) def get_config(self): return job_config_to_string(self.config) def get_current_config(self): - return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8')) + return job_config_to_string(self.server.get_job_config(self.name).encode("utf-8")) def has_config_changed(self): # config is optional, if not provided we keep the current config as is @@ -250,8 +248,8 @@ def has_config_changed(self): config_file = self.get_config() machine_file = self.get_current_config() - self.result['diff']['after'] = config_file - self.result['diff']['before'] = machine_file + self.result["diff"]["after"] = config_file + self.result["diff"]["before"] = machine_file if machine_file != config_file: return True @@ -259,7 +257,7 @@ def has_config_changed(self): def present_job(self): if self.config is None and self.enabled is None: - self.module.fail_json(msg='one of the following params is required on state=present: config,enabled') + self.module.fail_json(msg="one of the following params is required on state=present: config,enabled") if not self.job_exists(): self.create_job() @@ -285,82 +283,87 @@ def update_job(self): # Handle job config if self.has_config_changed(): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.server.reconfig_job(self.name, self.get_config()) # Handle job disable/enable elif status != self.EXCL_STATE and self.has_state_changed(status): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.switch_state() except Exception as e: - self.module.fail_json(msg=f'Unable to reconfigure job, {e} for {self.jenkins_url}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to reconfigure job, {e} for {self.jenkins_url}", exception=traceback.format_exc() + ) def create_job(self): if self.config is None: - self.module.fail_json(msg='missing required param: config') + self.module.fail_json(msg="missing required param: config") - self.result['changed'] = True + self.result["changed"] = True try: config_file = self.get_config() - self.result['diff']['after'] = config_file + self.result["diff"]["after"] = config_file if not self.module.check_mode: self.server.create_job(self.name, config_file) except Exception as e: - self.module.fail_json(msg=f'Unable to create job, {e} for {self.jenkins_url}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to create job, {e} for {self.jenkins_url}", exception=traceback.format_exc() + ) def absent_job(self): if self.job_exists(): - self.result['changed'] = True - self.result['diff']['before'] = self.get_current_config() + self.result["changed"] = True + self.result["diff"]["before"] = self.get_current_config() if not self.module.check_mode: try: self.server.delete_job(self.name) except Exception as e: - self.module.fail_json(msg=f'Unable to delete job, {e} for {self.jenkins_url}', - exception=traceback.format_exc()) + self.module.fail_json( + msg=f"Unable to delete job, {e} for {self.jenkins_url}", exception=traceback.format_exc() + ) def get_result(self): result = self.result if self.job_exists(): - result['enabled'] = self.get_job_status() != "disabled" + result["enabled"] = self.get_job_status() != "disabled" else: - result['enabled'] = None + result["enabled"] = None return result def test_dependencies(module): if not python_jenkins_installed: module.fail_json( - msg=missing_required_lib("python-jenkins", - url="https://python-jenkins.readthedocs.io/en/latest/install.html"), - exception=JENKINS_IMP_ERR) + msg=missing_required_lib( + "python-jenkins", url="https://python-jenkins.readthedocs.io/en/latest/install.html" + ), + exception=JENKINS_IMP_ERR, + ) def job_config_to_string(xml_str): - return ET.tostring(ET.fromstring(xml_str)).decode('ascii') + return ET.tostring(ET.fromstring(xml_str)).decode("ascii") def main(): module = AnsibleModule( argument_spec=dict( - config=dict(type='str'), - name=dict(type='str', required=True), - password=dict(type='str', no_log=True), - state=dict(type='str', choices=['present', 'absent'], default="present"), - enabled=dict(type='bool'), - token=dict(type='str', no_log=True), - url=dict(type='str', default="http://localhost:8080"), - user=dict(type='str'), - validate_certs=dict(type='bool', default=True), + config=dict(type="str"), + name=dict(type="str", required=True), + password=dict(type="str", no_log=True), + state=dict(type="str", choices=["present", "absent"], default="present"), + enabled=dict(type="bool"), + token=dict(type="str", no_log=True), + url=dict(type="str", default="http://localhost:8080"), + user=dict(type="str"), + validate_certs=dict(type="bool", default=True), ), mutually_exclusive=[ - ['password', 'token'], - ['config', 'enabled'], + ["password", "token"], + ["config", "enabled"], ], supports_check_mode=True, ) @@ -368,7 +371,7 @@ def main(): test_dependencies(module) jenkins_job = JenkinsJob(module) - if module.params.get('state') == "present": + if module.params.get("state") == "present": jenkins_job.present_job() else: jenkins_job.absent_job() @@ -377,5 +380,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py index 271cbc60c5a..08ffdc11ccb 100644 --- a/plugins/modules/jenkins_job_info.py +++ b/plugins/modules/jenkins_job_info.py @@ -144,6 +144,7 @@ JENKINS_IMP_ERR = None try: import jenkins + HAS_JENKINS = True except ImportError: JENKINS_IMP_ERR = traceback.format_exc() @@ -158,12 +159,14 @@ def get_jenkins_connection(module): password = module.params.get("password") token = module.params.get("token") - validate_certs = module.params.get('validate_certs') - if not validate_certs and hasattr(ssl, 'SSLContext'): + validate_certs = module.params.get("validate_certs") + if not validate_certs and hasattr(ssl, "SSLContext"): ssl._create_default_https_context = ssl._create_unverified_context - if validate_certs and not hasattr(ssl, 'SSLContext'): - module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9." - " Either update Python or use validate_certs=false.") + if validate_certs and not hasattr(ssl, "SSLContext"): + module.fail_json( + msg="Module does not support changing verification mode with python < 2.7.9." + " Either update Python or use validate_certs=false." + ) if username and (password or token): return jenkins.Jenkins(url, username, password or token) @@ -176,9 +179,11 @@ def get_jenkins_connection(module): def test_dependencies(module): if not HAS_JENKINS: module.fail_json( - msg=missing_required_lib("python-jenkins", - url="https://python-jenkins.readthedocs.io/en/latest/install.html"), - exception=JENKINS_IMP_ERR) + msg=missing_required_lib( + "python-jenkins", url="https://python-jenkins.readthedocs.io/en/latest/install.html" + ), + exception=JENKINS_IMP_ERR, + ) def get_jobs(module): @@ -190,19 +195,19 @@ def get_jobs(module): except jenkins.NotFoundException: pass else: - jobs.append({ - "name": job_info["name"], - "fullname": job_info["fullName"], - "url": job_info["url"], - "color": job_info["color"] - }) + jobs.append( + { + "name": job_info["name"], + "fullname": job_info["fullName"], + "url": job_info["url"], + "color": job_info["color"], + } + ) else: all_jobs = jenkins_conn.get_all_jobs() if module.params.get("glob"): - jobs.extend( - j for j in all_jobs - if fnmatch.fnmatch(j["fullname"], module.params.get("glob"))) + jobs.extend(j for j in all_jobs if fnmatch.fnmatch(j["fullname"], module.params.get("glob"))) else: jobs = all_jobs # python-jenkins includes the internal Jenkins class used for each job @@ -222,18 +227,18 @@ def get_jobs(module): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str'), - glob=dict(type='str'), - color=dict(type='str'), - password=dict(type='str', no_log=True), - token=dict(type='str', no_log=True), - url=dict(type='str', default="http://localhost:8080"), - user=dict(type='str'), - validate_certs=dict(type='bool', default=True), + name=dict(type="str"), + glob=dict(type="str"), + color=dict(type="str"), + password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + url=dict(type="str", default="http://localhost:8080"), + user=dict(type="str"), + validate_certs=dict(type="bool", default=True), ), mutually_exclusive=[ - ['password', 'token'], - ['name', 'glob'], + ["password", "token"], + ["name", "glob"], ], supports_check_mode=True, ) @@ -244,12 +249,10 @@ def main(): try: jobs = get_jobs(module) except jenkins.JenkinsException as err: - module.fail_json( - msg=f'Unable to connect to Jenkins server, {err}', - exception=traceback.format_exc()) + module.fail_json(msg=f"Unable to connect to Jenkins server, {err}", exception=traceback.format_exc()) module.exit_json(changed=False, jobs=jobs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py index a17bc514368..de16d6b0efc 100644 --- a/plugins/modules/jenkins_node.py +++ b/plugins/modules/jenkins_node.py @@ -167,14 +167,14 @@ class JenkinsNode: def __init__(self, module: AnsibleModule) -> None: self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - self.token = module.params['token'] - self.user = module.params['user'] - self.url = module.params['url'] - self.num_executors = module.params['num_executors'] - self.labels = module.params['labels'] - self.offline_message: str | None = module.params['offline_message'] + self.name = module.params["name"] + self.state = module.params["state"] + self.token = module.params["token"] + self.user = module.params["user"] + self.url = module.params["url"] + self.num_executors = module.params["num_executors"] + self.labels = module.params["labels"] + self.offline_message: str | None = module.params["offline_message"] if self.offline_message is not None: self.offline_message = self.offline_message.strip() @@ -189,17 +189,17 @@ def __init__(self, module: AnsibleModule) -> None: self.instance = self.get_jenkins_instance() self.result = { - 'changed': False, - 'url': self.url, - 'user': self.user, - 'name': self.name, - 'state': self.state, - 'created': False, - 'deleted': False, - 'disabled': False, - 'enabled': False, - 'configured': False, - 'warnings': [], + "changed": False, + "url": self.url, + "user": self.user, + "name": self.name, + "state": self.state, + "created": False, + "deleted": False, + "disabled": False, + "enabled": False, + "configured": False, + "warnings": [], } def get_jenkins_instance(self): @@ -211,7 +211,7 @@ def get_jenkins_instance(self): else: return jenkins.Jenkins(self.url) except Exception as e: - self.module.fail_json(msg=f'Unable to connect to Jenkins server, {e}') + self.module.fail_json(msg=f"Unable to connect to Jenkins server, {e}") def configure_node(self, present): if not present: @@ -227,17 +227,17 @@ def configure_node(self, present): root = et.fromstring(data) if self.num_executors is not None: - elem = root.find('numExecutors') + elem = root.find("numExecutors") if elem is None: - elem = et.SubElement(root, 'numExecutors') + elem = et.SubElement(root, "numExecutors") if elem.text is None or int(elem.text) != self.num_executors: elem.text = str(self.num_executors) configured = True if self.labels is not None: - elem = root.find('label') + elem = root.find("label") if elem is None: - elem = et.SubElement(root, 'label') + elem = et.SubElement(root, "label") labels = [] if elem.text: labels = elem.text.split() @@ -253,9 +253,9 @@ def configure_node(self, present): self.instance.reconfig_node(self.name, data) - self.result['configured'] = configured + self.result["configured"] = configured if configured: - self.result['changed'] = True + self.result["changed"] = True def present_node(self, configure=True): # type: (bool) -> bool """Assert node present. @@ -266,6 +266,7 @@ def present_node(self, configure=True): # type: (bool) -> bool Returns: True if the node is present, False otherwise (i.e. is check mode). """ + def create_node(): try: self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH) @@ -277,7 +278,7 @@ def create_node(): self.module.fail_json(msg=f"Create node failed: {e}", exception=traceback.format_exc()) # TODO: Remove authorization workaround. - self.result['warnings'].append( + self.result["warnings"].append( "suppressed 401 Not Authorized on redirect after node created: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" ) @@ -293,9 +294,9 @@ def create_node(): if configure: self.configure_node(present) - self.result['created'] = created + self.result["created"] = created if created: - self.result['changed'] = True + self.result["changed"] = True return present # Used to gate downstream queries when in check mode. @@ -311,7 +312,7 @@ def delete_node(): self.module.fail_json(msg=f"Delete node failed: {e}", exception=traceback.format_exc()) # TODO: Remove authorization workaround. - self.result['warnings'].append( + self.result["warnings"].append( "suppressed 401 Not Authorized on redirect after node deleted: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" ) @@ -323,9 +324,9 @@ def delete_node(): deleted = True - self.result['deleted'] = deleted + self.result["deleted"] = deleted if deleted: - self.result['changed'] = True + self.result["changed"] = True def enabled_node(self): def get_offline(): # type: () -> bool @@ -336,6 +337,7 @@ def get_offline(): # type: () -> bool enabled = False if present: + def enable_node(): try: self.instance.enable_node(self.name) @@ -349,7 +351,7 @@ def enable_node(): self.module.fail_json(msg=f"Enable node failed: {e}", exception=traceback.format_exc()) # TODO: Remove authorization workaround. - self.result['warnings'].append( + self.result["warnings"].append( "suppressed 401 Not Authorized on redirect after node enabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" ) @@ -367,9 +369,9 @@ def enable_node(): raise Exception("enabled_node present is False outside of check mode") enabled = False - self.result['enabled'] = enabled + self.result["enabled"] = enabled if enabled: - self.result['changed'] = True + self.result["changed"] = True def disabled_node(self): def get_offline_info(): @@ -399,9 +401,7 @@ def get_offline_info(): # Toggling the node online to set the message when toggling offline # again is not an option as during this transient online time jobs # may be scheduled on the node which is not acceptable. - self.result["warnings"].append( - "unable to change offline message when already offline" - ) + self.result["warnings"].append("unable to change offline message when already offline") else: offline_message = self.offline_message changed = True @@ -419,7 +419,7 @@ def disable_node(): self.module.fail_json(msg=f"Disable node failed: {e}", exception=traceback.format_exc()) # TODO: Remove authorization workaround. - self.result['warnings'].append( + self.result["warnings"].append( "suppressed 401 Not Authorized on redirect after node disabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" ) @@ -439,10 +439,10 @@ def disable_node(): if disabled: changed = True - self.result['disabled'] = disabled + self.result["disabled"] = disabled if changed: - self.result['changed'] = True + self.result["changed"] = True self.configure_node(present) @@ -450,14 +450,14 @@ def disable_node(): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, type='str'), - url=dict(default='http://localhost:8080'), + name=dict(required=True, type="str"), + url=dict(default="http://localhost:8080"), user=dict(), token=dict(no_log=True), - state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'), - num_executors=dict(type='int'), - labels=dict(type='list', elements='str'), - offline_message=dict(type='str'), + state=dict(choices=["enabled", "disabled", "present", "absent"], default="present"), + num_executors=dict(type="int"), + labels=dict(type="list", elements="str"), + offline_message=dict(type="str"), ), supports_check_mode=True, ) @@ -466,12 +466,12 @@ def main(): jenkins_node = JenkinsNode(module) - state = module.params.get('state') - if state == 'enabled': + state = module.params.get("state") + if state == "enabled": jenkins_node.enabled_node() - elif state == 'disabled': + elif state == "disabled": jenkins_node.disabled_node() - elif state == 'present': + elif state == "present": jenkins_node.present_node() else: jenkins_node.absent_node() @@ -479,5 +479,5 @@ def main(): module.exit_json(**jenkins_node.result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py index 6c7cd135867..534c40e65cd 100644 --- a/plugins/modules/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -359,20 +359,22 @@ def __init__(self, module): # Shortcuts for the params self.params = self.module.params - self.url = self.params['url'] - self.timeout = self.params['timeout'] + self.url = self.params["url"] + self.timeout = self.params["timeout"] # Authentication for non-Jenkins calls self.updates_url_credentials = {} - if self.params.get('updates_url_username') and self.params.get('updates_url_password'): - self.updates_url_credentials["Authorization"] = basic_auth_header(self.params['updates_url_username'], self.params['updates_url_password']) + if self.params.get("updates_url_username") and self.params.get("updates_url_password"): + self.updates_url_credentials["Authorization"] = basic_auth_header( + self.params["updates_url_username"], self.params["updates_url_password"] + ) # Crumb self.crumb = {} # Authentication for Jenkins calls - if self.params.get('url_username') and self.params.get('url_password'): - self.crumb["Authorization"] = basic_auth_header(self.params['url_username'], self.params['url_password']) + if self.params.get("url_username") and self.params.get("url_password"): + self.crumb["Authorization"] = basic_auth_header(self.params["url_username"], self.params["url_password"]) # Cookie jar for crumb session self.cookies = None @@ -385,15 +387,12 @@ def __init__(self, module): self._get_installed_plugins() def _csrf_enabled(self): - csrf_data = self._get_json_data( - f"{self.url}/api/json", 'CSRF') + csrf_data = self._get_json_data(f"{self.url}/api/json", "CSRF") - if 'useCrumbs' not in csrf_data: - self.module.fail_json( - msg="Required fields not found in the Crumbs response.", - details=csrf_data) + if "useCrumbs" not in csrf_data: + self.module.fail_json(msg="Required fields not found in the Crumbs response.", details=csrf_data) - return csrf_data['useCrumbs'] + return csrf_data["useCrumbs"] def _get_json_data(self, url, what, **kwargs): # Get the JSON data @@ -403,9 +402,7 @@ def _get_json_data(self, url, what, **kwargs): try: json_data = json.loads(to_native(r.read())) except Exception as e: - self.module.fail_json( - msg=f"Cannot parse {what} JSON data.", - details=to_native(e)) + self.module.fail_json(msg=f"Cannot parse {what} JSON data.", details=to_native(e)) return json_data @@ -424,17 +421,21 @@ def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, * self.module.debug(f"fetching url: {url}") is_jenkins_call = url.startswith(self.url) - self.module.params['force_basic_auth'] = is_jenkins_call + self.module.params["force_basic_auth"] = is_jenkins_call response, info = fetch_url( - self.module, url, timeout=self.timeout, cookies=self.cookies, + self.module, + url, + timeout=self.timeout, + cookies=self.cookies, headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, - **kwargs) - if info['status'] == 200: + **kwargs, + ) + if info["status"] == 200: return response else: err_msg = f"{msg_status}. fetching url {url} failed. response code: {info['status']}" - if info['status'] > 400: # extend error message + if info["status"] > 400: # extend error message err_msg = f"{err_msg}. response body: {info['body']}" except Exception as e: err_msg = f"{msg_status}. fetching url {url} failed. error msg: {e}" @@ -446,9 +447,7 @@ def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, * # failed on all urls self.module.fail_json(msg=msg_exception, details=errors) - def _get_url_data( - self, url, what=None, msg_status=None, msg_exception=None, - dont_fail=False, **kwargs): + def _get_url_data(self, url, what=None, msg_status=None, msg_exception=None, dont_fail=False, **kwargs): # Compose default messages if msg_status is None: msg_status = f"Cannot get {what}" @@ -459,18 +458,22 @@ def _get_url_data( # Get the URL data try: is_jenkins_call = url.startswith(self.url) - self.module.params['force_basic_auth'] = is_jenkins_call + self.module.params["force_basic_auth"] = is_jenkins_call response, info = fetch_url( - self.module, url, timeout=self.timeout, cookies=self.cookies, + self.module, + url, + timeout=self.timeout, + cookies=self.cookies, headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, - **kwargs) + **kwargs, + ) - if info['status'] != 200: + if info["status"] != 200: if dont_fail: - raise FailedInstallingWithPluginManager(info['msg']) + raise FailedInstallingWithPluginManager(info["msg"]) else: - self.module.fail_json(msg=msg_status, details=info['msg']) + self.module.fail_json(msg=msg_status, details=info["msg"]) except Exception as e: if dont_fail: raise FailedInstallingWithPluginManager(e) @@ -480,39 +483,34 @@ def _get_url_data( return response def _get_crumb(self): - crumb_data = self._get_json_data( - f"{self.url}/crumbIssuer/api/json", 'Crumb') + crumb_data = self._get_json_data(f"{self.url}/crumbIssuer/api/json", "Crumb") - if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: - self.crumb[crumb_data['crumbRequestField']] = crumb_data['crumb'] + if "crumbRequestField" in crumb_data and "crumb" in crumb_data: + self.crumb[crumb_data["crumbRequestField"]] = crumb_data["crumb"] else: - self.module.fail_json( - msg="Required fields not found in the Crum response.", - details=crumb_data) + self.module.fail_json(msg="Required fields not found in the Crum response.", details=crumb_data) def _get_installed_plugins(self): - plugins_data = self._get_json_data( - f"{self.url}/pluginManager/api/json?depth=1", - 'list of plugins') + plugins_data = self._get_json_data(f"{self.url}/pluginManager/api/json?depth=1", "list of plugins") # Check if we got valid data - if 'plugins' not in plugins_data: + if "plugins" not in plugins_data: self.module.fail_json(msg="No valid plugin data found.") # Create final list of installed/pined plugins self.is_installed = False self.is_pinned = False self.is_enabled = False - self.installed_plugins = plugins_data['plugins'] + self.installed_plugins = plugins_data["plugins"] - for p in plugins_data['plugins']: - if p['shortName'] == self.params['name']: + for p in plugins_data["plugins"]: + if p["shortName"] == self.params["name"]: self.is_installed = True - if p['pinned']: + if p["pinned"]: self.is_pinned = True - if p['enabled']: + if p["enabled"]: self.is_enabled = True break @@ -522,50 +520,36 @@ def _install_dependencies(self): self.dependencies_states = [] for dep_name, dep_version in dependencies.items(): - if not any(p['shortName'] == dep_name and p['version'] == dep_version for p in self.installed_plugins): + if not any(p["shortName"] == dep_name and p["version"] == dep_version for p in self.installed_plugins): dep_params = self.params.copy() - dep_params['name'] = dep_name - dep_params['version'] = dep_version + dep_params["name"] = dep_name + dep_params["version"] = dep_version dep_module = AnsibleModule( - argument_spec=self.module.argument_spec, - supports_check_mode=self.module.check_mode + argument_spec=self.module.argument_spec, supports_check_mode=self.module.check_mode ) dep_module.params = dep_params dep_plugin = JenkinsPlugin(dep_module) if not dep_plugin.install(): - self.dependencies_states.append( - { - 'name': dep_name, - 'version': dep_version, - 'state': 'absent'}) + self.dependencies_states.append({"name": dep_name, "version": dep_version, "state": "absent"}) else: - self.dependencies_states.append( - { - 'name': dep_name, - 'version': dep_version, - 'state': 'present'}) + self.dependencies_states.append({"name": dep_name, "version": dep_version, "state": "present"}) else: - self.dependencies_states.append( - { - 'name': dep_name, - 'version': dep_version, - 'state': 'present'}) + self.dependencies_states.append({"name": dep_name, "version": dep_version, "state": "present"}) def _install_with_plugin_manager(self): if not self.module.check_mode: # Install the plugin (with dependencies) install_script = ( - f"""d = Jenkins.instance.updateCenter.getPlugin("{self.params['name']}").deploy(); d.get();""") + f"""d = Jenkins.instance.updateCenter.getPlugin("{self.params["name"]}").deploy(); d.get();""" + ) - if self.params['with_dependencies']: + if self.params["with_dependencies"]: install_script = ( 'Jenkins.instance.updateCenter.getPlugin("%s")' - '.getNeededDependencies().each{it.deploy()}; %s' % ( - self.params['name'], install_script)) + ".getNeededDependencies().each{it.deploy()}; %s" % (self.params["name"], install_script) + ) - script_data = { - 'script': install_script - } + script_data = {"script": install_script} data = urlencode(script_data) # Send the installation request @@ -574,7 +558,8 @@ def _install_with_plugin_manager(self): msg_status="Cannot install plugin.", msg_exception="Plugin installation has failed.", data=data, - dont_fail=True) + dont_fail=True, + ) hpi_file = f"{self.params['jenkins_home']}/plugins/{self.params['name']}.hpi" @@ -583,10 +568,9 @@ def _install_with_plugin_manager(self): def install(self): changed = False - plugin_file = ( - f"{self.params['jenkins_home']}/plugins/{self.params['name']}.jpi") + plugin_file = f"{self.params['jenkins_home']}/plugins/{self.params['name']}.jpi" - if not self.is_installed and self.params['version'] in [None, 'latest']: + if not self.is_installed and self.params["version"] in [None, "latest"]: try: self._install_with_plugin_manager() changed = True @@ -595,32 +579,31 @@ def install(self): if not changed: # Check if the plugin directory exists - if not os.path.isdir(self.params['jenkins_home']): - self.module.fail_json( - msg="Jenkins home directory doesn't exist.") + if not os.path.isdir(self.params["jenkins_home"]): + self.module.fail_json(msg="Jenkins home directory doesn't exist.") checksum_old = None if os.path.isfile(plugin_file): # Make the checksum of the currently installed plugin - with open(plugin_file, 'rb') as plugin_fh: + with open(plugin_file, "rb") as plugin_fh: plugin_content = plugin_fh.read() checksum_old = hashlib.sha1(plugin_content).hexdigest() # Install dependencies - if self.params['with_dependencies']: + if self.params["with_dependencies"]: self._install_dependencies() - if self.params['version'] in [None, 'latest']: + if self.params["version"] in [None, "latest"]: # Take latest version plugin_urls = self._get_latest_plugin_urls() else: # Take specific version plugin_urls = self._get_versioned_plugin_urls() if ( - self.params['updates_expiration'] == 0 or - self.params['version'] not in [None, 'latest'] or - checksum_old is None): - + self.params["updates_expiration"] == 0 + or self.params["version"] not in [None, "latest"] + or checksum_old is None + ): # Download the plugin file directly r = self._download_plugin(plugin_urls) @@ -645,12 +628,12 @@ def install(self): self._write_file(plugin_file, data) changed = True - elif self.params['version'] == 'latest': + elif self.params["version"] == "latest": # Check for update from the updates JSON file plugin_data = self._download_updates() # If the latest version changed, download it - if checksum_old != to_bytes(plugin_data['sha1']): + if checksum_old != to_bytes(plugin_data["sha1"]): if not self.module.check_mode: r = self._download_plugin(plugin_urls) self._write_file(plugin_file, r) @@ -659,16 +642,13 @@ def install(self): # Change file attributes if needed if os.path.isfile(plugin_file): - params = { - 'dest': plugin_file - } + params = {"dest": plugin_file} params.update(self.params) file_args = self.module.load_file_common_arguments(params) if not self.module.check_mode: # Not sure how to run this in the check mode - changed = self.module.set_fs_attributes_if_different( - file_args, changed) + changed = self.module.set_fs_attributes_if_different(file_args, changed) else: # See the comment above changed = True @@ -677,22 +657,22 @@ def install(self): def _get_latest_plugin_urls(self): urls = [] - for base_url in self.params['updates_url']: - for update_segment in self.params['latest_plugins_url_segments']: + for base_url in self.params["updates_url"]: + for update_segment in self.params["latest_plugins_url_segments"]: urls.append(f"{base_url}/{update_segment}/{self.params['name']}.hpi") return urls def _get_latest_compatible_plugin_version(self, plugin_name=None): - if not hasattr(self, 'jenkins_version'): - self.module.params['force_basic_auth'] = True + if not hasattr(self, "jenkins_version"): + self.module.params["force_basic_auth"] = True resp, info = fetch_url(self.module, self.url) raw_version = info.get("x-jenkins") self.jenkins_version = self.parse_version(raw_version) - name = plugin_name or self.params['name'] + name = plugin_name or self.params["name"] cache_path = f"{self.params['jenkins_home']}/ansible_jenkins_plugin_cache.json" plugin_version_urls = [] - for base_url in self.params['updates_url']: - for update_json in self.params['plugin_versions_url_segment']: + for base_url in self.params["updates_url"]: + for update_json in self.params["plugin_versions_url_segment"]: plugin_version_urls.append(f"{base_url}/{update_json}") try: # Check if file is saved localy @@ -727,29 +707,30 @@ def _get_latest_compatible_plugin_version(self, plugin_name=None): for idx, (version_title, version_info) in enumerate(sorted_versions): required_core = version_info.get("requiredCore", "0.0") if self.parse_version(required_core) <= self.jenkins_version: - return 'latest' if idx == 0 else version_title + return "latest" if idx == 0 else version_title - self.module.warn( - f"No compatible version found for plugin '{name}'. Installing latest version.") - return 'latest' + self.module.warn(f"No compatible version found for plugin '{name}'. Installing latest version.") + return "latest" def _get_versioned_plugin_urls(self): urls = [] - for base_url in self.params['updates_url']: - for versioned_segment in self.params['versioned_plugins_url_segments']: - urls.append(f"{base_url}/{versioned_segment}/{self.params['name']}/{self.params['version']}/{self.params['name']}.hpi") + for base_url in self.params["updates_url"]: + for versioned_segment in self.params["versioned_plugins_url_segments"]: + urls.append( + f"{base_url}/{versioned_segment}/{self.params['name']}/{self.params['version']}/{self.params['name']}.hpi" + ) return urls def _get_update_center_urls(self): urls = [] - for base_url in self.params['updates_url']: - for update_json in self.params['update_json_url_segment']: + for base_url in self.params["updates_url"]: + for update_json in self.params["update_json_url_segment"]: urls.append(f"{base_url}/{update_json}") return urls def _get_versioned_dependencies(self): # Get dependencies for the specified plugin version - plugin_data = self._download_updates()['dependencies'] + plugin_data = self._download_updates()["dependencies"] dependencies_info = { dep["name"]: self._get_latest_compatible_plugin_version(dep["name"]) @@ -761,11 +742,9 @@ def _get_versioned_dependencies(self): def _download_updates(self): try: - updates_file, download_updates = download_updates_file(self.params['updates_expiration']) + updates_file, download_updates = download_updates_file(self.params["updates_expiration"]) except OSError as e: - self.module.fail_json( - msg="Cannot create temporal directory.", - details=to_native(e)) + self.module.fail_json(msg="Cannot create temporal directory.", details=to_native(e)) # Download the updates file if needed if download_updates: @@ -773,9 +752,8 @@ def _download_updates(self): # Get the data r = self._get_urls_data( - urls, - msg_status="Remote updates not found.", - msg_exception="Updates download failed.") + urls, msg_status="Remote updates not found.", msg_exception="Updates download failed." + ) # Write the updates file tmp_update_fd, tmp_updates_file = tempfile.mkstemp() @@ -785,14 +763,14 @@ def _download_updates(self): os.close(tmp_update_fd) except IOError as e: self.module.fail_json( - msg=f"Cannot close the tmp updates file {tmp_updates_file}.", - details=to_native(e)) + msg=f"Cannot close the tmp updates file {tmp_updates_file}.", details=to_native(e) + ) else: tmp_updates_file = updates_file # Open the updates file try: - f = io.open(tmp_updates_file, encoding='utf-8') + f = io.open(tmp_updates_file, encoding="utf-8") # Read only the second line dummy = f.readline() @@ -800,29 +778,28 @@ def _download_updates(self): except IOError as e: self.module.fail_json( msg=f"Cannot open{' temporary' if tmp_updates_file != updates_file else ''} updates file.", - details=to_native(e)) + details=to_native(e), + ) except Exception as e: self.module.fail_json( msg=f"Cannot load JSON data from the{' temporary' if tmp_updates_file != updates_file else ''} updates file.", - details=to_native(e)) + details=to_native(e), + ) # Move the updates file to the right place if we could read it if tmp_updates_file != updates_file: self.module.atomic_move(os.path.abspath(tmp_updates_file), os.path.abspath(updates_file)) # Check if we have the plugin data available - if not data.get('plugins', {}).get(self.params['name']): + if not data.get("plugins", {}).get(self.params["name"]): self.module.fail_json(msg="Cannot find plugin data in the updates file.") - return data['plugins'][self.params['name']] + return data["plugins"][self.params["name"]] def _download_plugin(self, plugin_urls): # Download the plugin - return self._get_urls_data( - plugin_urls, - msg_status="Plugin not found.", - msg_exception="Plugin download failed.") + return self._get_urls_data(plugin_urls, msg_status="Plugin not found.", msg_exception="Plugin download failed.") def _write_file(self, f, data): # Store the plugin into a temp file and then move it @@ -836,9 +813,7 @@ def _write_file(self, f, data): try: os.close(tmp_f_fd) except IOError as e: - self.module.fail_json( - msg=f'Cannot close the temporal plugin file {tmp_f}.', - details=to_native(e)) + self.module.fail_json(msg=f"Cannot close the temporal plugin file {tmp_f}.", details=to_native(e)) # Move the file onto the right place self.module.atomic_move(os.path.abspath(tmp_f), os.path.abspath(f)) @@ -849,26 +824,23 @@ def uninstall(self): # Perform the action if self.is_installed: if not self.module.check_mode: - self._pm_query('doUninstall', 'Uninstallation') + self._pm_query("doUninstall", "Uninstallation") changed = True return changed def pin(self): - return self._pinning('pin') + return self._pinning("pin") def unpin(self): - return self._pinning('unpin') + return self._pinning("unpin") def _pinning(self, action): changed = False # Check if the plugin is pinned/unpinned - if ( - action == 'pin' and not self.is_pinned or - action == 'unpin' and self.is_pinned): - + if action == "pin" and not self.is_pinned or action == "unpin" and self.is_pinned: # Perform the action if not self.module.check_mode: self._pm_query(action, f"{action.capitalize()}ning") @@ -878,24 +850,19 @@ def _pinning(self, action): return changed def enable(self): - return self._enabling('enable') + return self._enabling("enable") def disable(self): - return self._enabling('disable') + return self._enabling("disable") def _enabling(self, action): changed = False # Check if the plugin is pinned/unpinned - if ( - action == 'enable' and not self.is_enabled or - action == 'disable' and self.is_enabled): - + if action == "enable" and not self.is_enabled or action == "disable" and self.is_enabled: # Perform the action if not self.module.check_mode: - self._pm_query( - f"make{action.capitalize()}d", - f"{action[:-1].capitalize()}ing") + self._pm_query(f"make{action.capitalize()}d", f"{action[:-1].capitalize()}ing") changed = True @@ -906,51 +873,45 @@ def _pm_query(self, action, msg): # Send the request self._get_url_data( - url, - msg_status=f"Plugin not found. {url}", - msg_exception=f"{msg} has failed.", - method="POST") + url, msg_status=f"Plugin not found. {url}", msg_exception=f"{msg} has failed.", method="POST" + ) @staticmethod def parse_version(version_str): - return tuple(int(x) for x in version_str.split('.')) + return tuple(int(x) for x in version_str.split(".")) def main(): # Module arguments argument_spec = url_argument_spec() argument_spec.update( - group=dict(type='str', default='jenkins'), - jenkins_home=dict(type='path', default='/var/lib/jenkins'), - mode=dict(default='0644', type='raw'), - name=dict(type='str', required=True), - owner=dict(type='str', default='jenkins'), + group=dict(type="str", default="jenkins"), + jenkins_home=dict(type="path", default="/var/lib/jenkins"), + mode=dict(default="0644", type="raw"), + name=dict(type="str", required=True), + owner=dict(type="str", default="jenkins"), state=dict( - choices=[ - 'present', - 'absent', - 'pinned', - 'unpinned', - 'enabled', - 'disabled', - 'latest'], - default='present'), + choices=["present", "absent", "pinned", "unpinned", "enabled", "disabled", "latest"], default="present" + ), timeout=dict(default=30, type="int"), updates_expiration=dict(default=86400, type="int"), - updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', - 'http://mirrors.jenkins.io']), + updates_url=dict( + type="list", elements="str", default=["https://updates.jenkins.io", "http://mirrors.jenkins.io"] + ), updates_url_username=dict(type="str"), updates_url_password=dict(type="str", no_log=True), - update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', - 'updates/update-center.json']), - plugin_versions_url_segment=dict(type="list", elements="str", default=['plugin-versions.json', - 'current/plugin-versions.json']), - latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), - versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), - url=dict(default='http://localhost:8080'), + update_json_url_segment=dict( + type="list", elements="str", default=["update-center.json", "updates/update-center.json"] + ), + plugin_versions_url_segment=dict( + type="list", elements="str", default=["plugin-versions.json", "current/plugin-versions.json"] + ), + latest_plugins_url_segments=dict(type="list", elements="str", default=["latest"]), + versioned_plugins_url_segments=dict(type="list", elements="str", default=["download/plugins", "plugins"]), + url=dict(default="http://localhost:8080"), url_password=dict(no_log=True), version=dict(), - with_dependencies=dict(default=True, type='bool'), + with_dependencies=dict(default=True, type="bool"), ) # Module settings module = AnsibleModule( @@ -961,47 +922,50 @@ def main(): # Convert timeout to float try: - module.params['timeout'] = float(module.params['timeout']) + module.params["timeout"] = float(module.params["timeout"]) except ValueError as e: - module.fail_json( - msg=f"Cannot convert {module.params['timeout']} to float.", - details=to_native(e)) + module.fail_json(msg=f"Cannot convert {module.params['timeout']} to float.", details=to_native(e)) # Instantiate the JenkinsPlugin object jp = JenkinsPlugin(module) # Set version to latest if state is latest - if module.params['state'] == 'latest': - module.params['state'] = 'present' - module.params['version'] = jp._get_latest_compatible_plugin_version() + if module.params["state"] == "latest": + module.params["state"] = "present" + module.params["version"] = jp._get_latest_compatible_plugin_version() # Set version to latest compatible version if version is latest - if module.params['version'] == 'latest': - module.params['version'] = jp._get_latest_compatible_plugin_version() + if module.params["version"] == "latest": + module.params["version"] = jp._get_latest_compatible_plugin_version() # Create some shortcuts - name = module.params['name'] - state = module.params['state'] + name = module.params["name"] + state = module.params["state"] # Initial change state of the task changed = False # Perform action depending on the requested state - if state == 'present': + if state == "present": changed = jp.install() - elif state == 'absent': + elif state == "absent": changed = jp.uninstall() - elif state == 'pinned': + elif state == "pinned": changed = jp.pin() - elif state == 'unpinned': + elif state == "unpinned": changed = jp.unpin() - elif state == 'enabled': + elif state == "enabled": changed = jp.enable() - elif state == 'disabled': + elif state == "disabled": changed = jp.disable() # Print status of the change - module.exit_json(changed=changed, plugin=name, state=state, dependencies=jp.dependencies_states if hasattr(jp, 'dependencies_states') else None) + module.exit_json( + changed=changed, + plugin=name, + state=state, + dependencies=jp.dependencies_states if hasattr(jp, "dependencies_states") else None, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py index 291e5b61cbf..b13d7de66e2 100644 --- a/plugins/modules/jenkins_script.py +++ b/plugins/modules/jenkins_script.py @@ -111,32 +111,30 @@ def is_csrf_protection_enabled(module): - resp, info = fetch_url(module, - f"{module.params['url']}/api/json", - timeout=module.params['timeout'], - method='GET') + resp, info = fetch_url(module, f"{module.params['url']}/api/json", timeout=module.params["timeout"], method="GET") if info["status"] != 200: - module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output='') + module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output="") content = to_native(resp.read()) - return json.loads(content).get('useCrumbs', False) + return json.loads(content).get("useCrumbs", False) def get_crumb(module, cookies): - resp, info = fetch_url(module, - f"{module.params['url']}/crumbIssuer/api/json", - method='GET', - timeout=module.params['timeout'], - cookies=cookies) + resp, info = fetch_url( + module, + f"{module.params['url']}/crumbIssuer/api/json", + method="GET", + timeout=module.params["timeout"], + cookies=cookies, + ) if info["status"] != 200: - module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output='') + module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output="") content = to_native(resp.read()) return json.loads(content) def main(): - module = AnsibleModule( argument_spec=dict( script=dict(required=True, type="str"), @@ -145,53 +143,56 @@ def main(): user=dict(type="str"), password=dict(no_log=True, type="str"), timeout=dict(type="int", default=10), - args=dict(type="dict") + args=dict(type="dict"), ) ) - if module.params['user'] is not None: - if module.params['password'] is None: - module.fail_json(msg="password required when user provided", output='') - module.params['url_username'] = module.params['user'] - module.params['url_password'] = module.params['password'] - module.params['force_basic_auth'] = True + if module.params["user"] is not None: + if module.params["password"] is None: + module.fail_json(msg="password required when user provided", output="") + module.params["url_username"] = module.params["user"] + module.params["url_password"] = module.params["password"] + module.params["force_basic_auth"] = True - if module.params['args'] is not None: + if module.params["args"] is not None: from string import Template + try: - script_contents = Template(module.params['script']).substitute(module.params['args']) + script_contents = Template(module.params["script"]).substitute(module.params["args"]) except KeyError as err: - module.fail_json(msg=f"Error with templating variable: {err}", output='') + module.fail_json(msg=f"Error with templating variable: {err}", output="") else: - script_contents = module.params['script'] + script_contents = module.params["script"] headers = {} cookies = None if is_csrf_protection_enabled(module): cookies = cookiejar.LWPCookieJar() crumb = get_crumb(module, cookies) - headers = {crumb['crumbRequestField']: crumb['crumb']} - - resp, info = fetch_url(module, - f"{module.params['url']}/scriptText", - data=urlencode({'script': script_contents}), - headers=headers, - method="POST", - timeout=module.params['timeout'], - cookies=cookies) + headers = {crumb["crumbRequestField"]: crumb["crumb"]} + + resp, info = fetch_url( + module, + f"{module.params['url']}/scriptText", + data=urlencode({"script": script_contents}), + headers=headers, + method="POST", + timeout=module.params["timeout"], + cookies=cookies, + ) if info["status"] != 200: - module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output='') + module.fail_json(msg=f"HTTP error {info['status']} {info['msg']}", output="") result = to_native(resp.read()) - if 'Exception:' in result and 'at java.lang.Thread' in result: - module.fail_json(msg=f"script failed with stacktrace:\n {result}", output='') + if "Exception:" in result and "at java.lang.Thread" in result: + module.fail_json(msg=f"script failed with stacktrace:\n {result}", output="") module.exit_json( output=result, ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py index 50897b30f69..e39f76120de 100644 --- a/plugins/modules/jira.py +++ b/plugins/modules/jira.py @@ -492,109 +492,145 @@ class JIRA(StateModuleHelper): module = dict( argument_spec=dict( - attachment=dict(type='dict', options=dict( - content=dict(type='str'), - filename=dict(type='path', required=True), - mimetype=dict(type='str') - )), - uri=dict(type='str', required=True), + attachment=dict( + type="dict", + options=dict( + content=dict(type="str"), filename=dict(type="path", required=True), mimetype=dict(type="str") + ), + ), + uri=dict(type="str", required=True), operation=dict( - type='str', - choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'], - aliases=['command'], required=True + type="str", + choices=[ + "attach", + "create", + "comment", + "edit", + "update", + "fetch", + "transition", + "link", + "search", + "worklog", + ], + aliases=["command"], + required=True, + ), + username=dict(type="str"), + password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + client_cert=dict(type="path"), + client_key=dict(type="path"), + project=dict( + type="str", + ), + summary=dict( + type="str", + ), + description=dict( + type="str", + ), + issuetype=dict( + type="str", + ), + issue=dict(type="str", aliases=["ticket"]), + comment=dict( + type="str", + ), + comment_visibility=dict( + type="dict", + options=dict( + type=dict(type="str", choices=["group", "role"], required=True), + value=dict(type="str", required=True), + ), + ), + status=dict( + type="str", + ), + status_id=dict( + type="str", + ), + assignee=dict( + type="str", + ), + fields=dict(default={}, type="dict"), + linktype=dict( + type="str", + ), + inwardissue=dict( + type="str", ), - username=dict(type='str'), - password=dict(type='str', no_log=True), - token=dict(type='str', no_log=True), - client_cert=dict(type='path'), - client_key=dict(type='path'), - project=dict(type='str', ), - summary=dict(type='str', ), - description=dict(type='str', ), - issuetype=dict(type='str', ), - issue=dict(type='str', aliases=['ticket']), - comment=dict(type='str', ), - comment_visibility=dict(type='dict', options=dict( - type=dict(type='str', choices=['group', 'role'], required=True), - value=dict(type='str', required=True) - )), - status=dict(type='str', ), - status_id=dict(type='str', ), - assignee=dict(type='str', ), - fields=dict(default={}, type='dict'), - linktype=dict(type='str', ), - inwardissue=dict(type='str', ), - outwardissue=dict(type='str', ), - jql=dict(type='str', ), - maxresults=dict(type='int'), - timeout=dict(type='float', default=10), - validate_certs=dict(default=True, type='bool'), - account_id=dict(type='str'), + outwardissue=dict( + type="str", + ), + jql=dict( + type="str", + ), + maxresults=dict(type="int"), + timeout=dict(type="float", default=10), + validate_certs=dict(default=True, type="bool"), + account_id=dict(type="str"), ), mutually_exclusive=[ - ['username', 'token'], - ['password', 'token'], - ['assignee', 'account_id'], - ['status', 'status_id'] - ], - required_together=[ - ['username', 'password'], - ['client_cert', 'client_key'] + ["username", "token"], + ["password", "token"], + ["assignee", "account_id"], + ["status", "status_id"], ], + required_together=[["username", "password"], ["client_cert", "client_key"]], required_one_of=[ - ['username', 'token'], + ["username", "token"], ], required_if=( - ('operation', 'attach', ['issue', 'attachment']), - ('operation', 'create', ['project', 'issuetype', 'summary']), - ('operation', 'comment', ['issue', 'comment']), - ('operation', 'workflow', ['issue', 'comment']), - ('operation', 'fetch', ['issue']), - ('operation', 'transition', ['issue']), - ('operation', 'transition', ['status', 'status_id'], True), - ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), - ('operation', 'search', ['jql']), + ("operation", "attach", ["issue", "attachment"]), + ("operation", "create", ["project", "issuetype", "summary"]), + ("operation", "comment", ["issue", "comment"]), + ("operation", "workflow", ["issue", "comment"]), + ("operation", "fetch", ["issue"]), + ("operation", "transition", ["issue"]), + ("operation", "transition", ["status", "status_id"], True), + ("operation", "link", ["linktype", "inwardissue", "outwardissue"]), + ("operation", "search", ["jql"]), ), - supports_check_mode=False + supports_check_mode=False, ) - state_param = 'operation' + state_param = "operation" def __init_module__(self): if self.vars.fields is None: self.vars.fields = {} if self.vars.assignee: - self.vars.fields['assignee'] = {'name': self.vars.assignee} + self.vars.fields["assignee"] = {"name": self.vars.assignee} if self.vars.account_id: - self.vars.fields['assignee'] = {'accountId': self.vars.account_id} - self.vars.uri = self.vars.uri.strip('/') - self.vars.set('restbase', f"{self.vars.uri}/rest/api/2") + self.vars.fields["assignee"] = {"accountId": self.vars.account_id} + self.vars.uri = self.vars.uri.strip("/") + self.vars.set("restbase", f"{self.vars.uri}/rest/api/2") @cause_changes(when="success") def operation_create(self): createfields = { - 'project': {'key': self.vars.project}, - 'summary': self.vars.summary, - 'issuetype': {'name': self.vars.issuetype}} + "project": {"key": self.vars.project}, + "summary": self.vars.summary, + "issuetype": {"name": self.vars.issuetype}, + } if self.vars.description: - createfields['description'] = self.vars.description + createfields["description"] = self.vars.description # Merge in any additional or overridden fields if self.vars.fields: createfields.update(self.vars.fields) - data = {'fields': createfields} + data = {"fields": createfields} url = f"{self.vars.restbase}/issue/" self.vars.meta = self.post(url, data) @cause_changes(when="success") def operation_comment(self): - data = { - 'body': self.vars.comment - } + data = {"body": self.vars.comment} # if comment_visibility is specified restrict visibility if self.vars.comment_visibility is not None: - data['visibility'] = self.vars.comment_visibility + data["visibility"] = self.vars.comment_visibility # Use 'fields' to merge in any additional data if self.vars.fields: @@ -605,12 +641,10 @@ def operation_comment(self): @cause_changes(when="success") def operation_worklog(self): - data = { - 'comment': self.vars.comment - } + data = {"comment": self.vars.comment} # if comment_visibility is specified restrict visibility if self.vars.comment_visibility is not None: - data['visibility'] = self.vars.comment_visibility + data["visibility"] = self.vars.comment_visibility # Use 'fields' to merge in any additional data if self.vars.fields: @@ -621,9 +655,7 @@ def operation_worklog(self): @cause_changes(when="success") def operation_edit(self): - data = { - 'fields': self.vars.fields - } + data = {"fields": self.vars.fields} url = f"{self.vars.restbase}/issue/{self.vars.issue}" self.vars.meta = self.put(url, data) @@ -663,13 +695,13 @@ def operation_transition(self): elif self.vars.status_id is not None: tid = self.vars.status_id.strip() - for t in tmeta['transitions']: + for t in tmeta["transitions"]: if target is not None: - if t['name'] == target: - tid = t['id'] + if t["name"] == target: + tid = t["id"] break else: - if tid == t['id']: + if tid == t["id"]: break else: if target is not None: @@ -679,28 +711,29 @@ def operation_transition(self): fields = dict(self.vars.fields) if self.vars.summary is not None: - fields.update({'summary': self.vars.summary}) + fields.update({"summary": self.vars.summary}) if self.vars.description is not None: - fields.update({'description': self.vars.description}) + fields.update({"description": self.vars.description}) # Perform it - data = {'transition': {"id": tid}, - 'fields': fields} + data = {"transition": {"id": tid}, "fields": fields} if self.vars.comment is not None: - data.update({"update": { - "comment": [{ - "add": {"body": self.vars.comment} - }], - }}) + data.update( + { + "update": { + "comment": [{"add": {"body": self.vars.comment}}], + } + } + ) url = f"{self.vars.restbase}/issue/{self.vars.issue}/transitions" self.vars.meta = self.post(url, data) @cause_changes(when="success") def operation_link(self): data = { - 'type': {'name': self.vars.linktype}, - 'inwardIssue': {'key': self.vars.inwardissue}, - 'outwardIssue': {'key': self.vars.outwardissue}, + "type": {"name": self.vars.linktype}, + "inwardIssue": {"key": self.vars.inwardissue}, + "outwardIssue": {"key": self.vars.outwardissue}, } url = f"{self.vars.restbase}/issueLink/" self.vars.meta = self.post(url, data) @@ -708,15 +741,15 @@ def operation_link(self): @cause_changes(when="success") def operation_attach(self): v = self.vars - filename = v.attachment.get('filename') - content = v.attachment.get('content') + filename = v.attachment.get("filename") + content = v.attachment.get("content") if not any((filename, content)): - raise ValueError('at least one of filename or content must be provided') - mime = v.attachment.get('mimetype') + raise ValueError("at least one of filename or content must be provided") + mime = v.attachment.get("mimetype") if not os.path.isfile(filename): - raise ValueError(f'The provided filename does not exist: {filename}') + raise ValueError(f"The provided filename does not exist: {filename}") content_type, data = self._prepare_attachment(filename, content, mime) @@ -747,13 +780,13 @@ def escape_quotes(s): if not mime_type: try: - mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' + mime_type = mimetypes.guess_type(filename or "", strict=False)[0] or "application/octet-stream" except Exception: - mime_type = 'application/octet-stream' - main_type, sep, sub_type = mime_type.partition('/') + mime_type = "application/octet-stream" + main_type, sep, sub_type = mime_type.partition("/") if not content and filename: - with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: + with open(to_bytes(filename, errors="surrogate_or_strict"), "rb") as f: content = f.read() else: try: @@ -765,26 +798,16 @@ def escape_quotes(s): f"--{boundary}", f'Content-Disposition: form-data; name="file"; filename={escape_quotes(name)}', f"Content-Type: {main_type}/{sub_type}", - '', + "", to_text(content), f"--{boundary}--", - "" + "", ] - return ( - f"multipart/form-data; boundary={boundary}", - "\r\n".join(lines) - ) + return (f"multipart/form-data; boundary={boundary}", "\r\n".join(lines)) - def request( - self, - url, - data=None, - method=None, - content_type='application/json', - additional_headers=None - ): - if data and content_type == 'application/json': + def request(self, url, data=None, method=None, content_type="application/json", additional_headers=None): + if data and content_type == "application/json": data = json.dumps(data) headers = {} @@ -800,52 +823,58 @@ def request( # the requests as authorized for this user. if self.vars.token is not None: - headers.update({ - "Content-Type": content_type, - "Authorization": f"Bearer {self.vars.token}", - }) + headers.update( + { + "Content-Type": content_type, + "Authorization": f"Bearer {self.vars.token}", + } + ) else: - auth = to_text(base64.b64encode(to_bytes(f'{self.vars.username}:{self.vars.password}', - errors='surrogate_or_strict'))) - headers.update({ - "Content-Type": content_type, - "Authorization": f"Basic {auth}", - }) + auth = to_text( + base64.b64encode(to_bytes(f"{self.vars.username}:{self.vars.password}", errors="surrogate_or_strict")) + ) + headers.update( + { + "Content-Type": content_type, + "Authorization": f"Basic {auth}", + } + ) response, info = fetch_url( self.module, url, data=data, method=method, timeout=self.vars.timeout, headers=headers ) - if info['status'] not in (200, 201, 204): + if info["status"] not in (200, 201, 204): error = None try: - error = json.loads(info['body']) + error = json.loads(info["body"]) except Exception: msg = f'The request "{method} {url}" returned the unexpected status code {info["status"]} {info["msg"]}\n{info.get("body")}' self.module.fail_json(msg=to_native(msg), exception=traceback.format_exc()) if error: msg = [] - for key in ('errorMessages', 'errors'): + for key in ("errorMessages", "errors"): if error.get(key): msg.append(to_native(error[key])) if msg: - self.module.fail_json(msg=', '.join(msg)) + self.module.fail_json(msg=", ".join(msg)) self.module.fail_json(msg=to_native(error)) # Fallback print body, if it can't be decoded - self.module.fail_json(msg=to_native(info['body'])) + self.module.fail_json(msg=to_native(info["body"])) body = response.read() if body: - return json.loads(to_text(body, errors='surrogate_or_strict')) + return json.loads(to_text(body, errors="surrogate_or_strict")) return {} - def post(self, url, data, content_type='application/json', additional_headers=None): - return self.request(url, data=data, method='POST', content_type=content_type, - additional_headers=additional_headers) + def post(self, url, data, content_type="application/json", additional_headers=None): + return self.request( + url, data=data, method="POST", content_type=content_type, additional_headers=additional_headers + ) def put(self, url, data): - return self.request(url, data=data, method='PUT') + return self.request(url, data=data, method="PUT") def get(self, url): return self.request(url) @@ -856,5 +885,5 @@ def main(): jira.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py index b6b4c4a6bf4..b31c14b164f 100644 --- a/plugins/modules/kdeconfig.py +++ b/plugins/modules/kdeconfig.py @@ -130,70 +130,70 @@ def __del__(self): def run_kwriteconfig(module, cmd, path, groups, key, value): """Invoke kwriteconfig with arguments""" - args = [cmd, '--file', path, '--key', key] + args = [cmd, "--file", path, "--key", key] for group in groups: - args.extend(['--group', group]) + args.extend(["--group", group]) if isinstance(value, bool): - args.extend(['--type', 'bool']) + args.extend(["--type", "bool"]) if value: - args.append('true') + args.append("true") else: - args.append('false') + args.append("false") else: - args.extend(['--', value]) + args.extend(["--", value]) module.run_command(args, check_rc=True) def run_module(module, tmpdir, kwriteconfig): - result = dict(changed=False, msg='OK', path=module.params['path']) - b_path = to_bytes(module.params['path']) - tmpfile = os.path.join(tmpdir, 'file') + result = dict(changed=False, msg="OK", path=module.params["path"]) + b_path = to_bytes(module.params["path"]) + tmpfile = os.path.join(tmpdir, "file") b_tmpfile = to_bytes(tmpfile) diff = dict( - before='', - after='', - before_header=result['path'], - after_header=result['path'], + before="", + after="", + before_header=result["path"], + after_header=result["path"], ) try: - with open(b_tmpfile, 'wb') as dst: + with open(b_tmpfile, "wb") as dst: try: - with open(b_path, 'rb') as src: + with open(b_path, "rb") as src: b_data = src.read() except IOError: - result['changed'] = True + result["changed"] = True else: dst.write(b_data) try: - diff['before'] = to_text(b_data) + diff["before"] = to_text(b_data) except UnicodeError: - diff['before'] = repr(b_data) + diff["before"] = repr(b_data) except IOError: - module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc()) + module.fail_json(msg="Unable to create temporary file", traceback=traceback.format_exc()) - for row in module.params['values']: - groups = row['groups'] + for row in module.params["values"]: + groups = row["groups"] if groups is None: - groups = [row['group']] - key = row['key'] - value = row['bool_value'] + groups = [row["group"]] + key = row["key"] + value = row["bool_value"] if value is None: - value = row['value'] + value = row["value"] run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value) - with open(b_tmpfile, 'rb') as tmpf: + with open(b_tmpfile, "rb") as tmpf: b_data = tmpf.read() try: - diff['after'] = to_text(b_data) + diff["after"] = to_text(b_data) except UnicodeError: - diff['after'] = repr(b_data) + diff["after"] = repr(b_data) - result['changed'] = result['changed'] or diff['after'] != diff['before'] + result["changed"] = result["changed"] or diff["after"] != diff["before"] file_args = module.load_file_common_arguments(module.params) if module.check_mode: - if not result['changed']: + if not result["changed"]: shutil.copystat(b_path, b_tmpfile) uid, gid = module.user_and_group(b_path) os.chown(b_tmpfile, uid, gid) @@ -201,49 +201,56 @@ def run_module(module, tmpdir, kwriteconfig): diff = {} else: diff = None - result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + result["changed"] = module.set_fs_attributes_if_different(file_args, result["changed"], diff=diff) if module._diff: - result['diff'] = diff + result["diff"] = diff module.exit_json(**result) - if result['changed']: - if module.params['backup'] and os.path.exists(b_path): - result['backup_file'] = module.backup_local(result['path']) + if result["changed"]: + if module.params["backup"] and os.path.exists(b_path): + result["backup_file"] = module.backup_local(result["path"]) try: module.atomic_move(b_tmpfile, os.path.abspath(b_path)) except IOError: - module.ansible.fail_json(msg=f"Unable to move temporary file {tmpfile} to {result['path']}, IOError", traceback=traceback.format_exc()) + module.ansible.fail_json( + msg=f"Unable to move temporary file {tmpfile} to {result['path']}, IOError", + traceback=traceback.format_exc(), + ) - if result['changed']: - module.set_fs_attributes_if_different(file_args, result['changed']) + if result["changed"]: + module.set_fs_attributes_if_different(file_args, result["changed"]) else: if module._diff: diff = {} else: diff = None - result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + result["changed"] = module.set_fs_attributes_if_different(file_args, result["changed"], diff=diff) if module._diff: - result['diff'] = diff + result["diff"] = diff module.exit_json(**result) def main(): - single_value_arg = dict(group=dict(type='str'), - groups=dict(type='list', elements='str'), - key=dict(type='str', required=True, no_log=False), - value=dict(type='str'), - bool_value=dict(type='bool')) - required_alternatives = [('group', 'groups'), ('value', 'bool_value')] + single_value_arg = dict( + group=dict(type="str"), + groups=dict(type="list", elements="str"), + key=dict(type="str", required=True, no_log=False), + value=dict(type="str"), + bool_value=dict(type="bool"), + ) + required_alternatives = [("group", "groups"), ("value", "bool_value")] module_args = dict( - values=dict(type='list', - elements='dict', - options=single_value_arg, - mutually_exclusive=required_alternatives, - required_one_of=required_alternatives, - required=True), - path=dict(type='path', required=True), - kwriteconfig_path=dict(type='path'), - backup=dict(type='bool', default=False), + values=dict( + type="list", + elements="dict", + options=single_value_arg, + mutually_exclusive=required_alternatives, + required_one_of=required_alternatives, + required=True, + ), + path=dict(type="path", required=True), + kwriteconfig_path=dict(type="path"), + backup=dict(type="bool", default=False), ) module = AnsibleModule( @@ -253,21 +260,21 @@ def main(): ) kwriteconfig = None - if module.params['kwriteconfig_path'] is not None: - kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True) + if module.params["kwriteconfig_path"] is not None: + kwriteconfig = module.get_bin_path(module.params["kwriteconfig_path"], required=True) else: - for progname in ('kwriteconfig6', 'kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'): + for progname in ("kwriteconfig6", "kwriteconfig5", "kwriteconfig", "kwriteconfig4"): kwriteconfig = module.get_bin_path(progname) if kwriteconfig is not None: break if kwriteconfig is None: - module.fail_json(msg='kwriteconfig is not installed') - for v in module.params['values']: - if not v['key']: + module.fail_json(msg="kwriteconfig is not installed") + for v in module.params["values"]: + if not v["key"]: module.fail_json(msg="'key' cannot be empty") with TemporaryDirectory(dir=module.tmpdir) as tmpdir: run_module(module, tmpdir, kwriteconfig) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py index 82395a8b26a..2943380b373 100644 --- a/plugins/modules/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -54,34 +54,34 @@ class Blacklist(StateModuleHelper): - output_params = ('name', 'state') + output_params = ("name", "state") module = dict( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + blacklist_file=dict(type="str", default="/etc/modprobe.d/blacklist-ansible.conf"), ), supports_check_mode=True, ) def __init_module__(self): - self.pattern = re.compile(rf'^blacklist\s+{re.escape(self.vars.name)}$') + self.pattern = re.compile(rf"^blacklist\s+{re.escape(self.vars.name)}$") self.vars.filename = self.vars.blacklist_file - self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True) + self.vars.set("file_exists", os.path.exists(self.vars.filename), output=False, change=True) if not self.vars.file_exists: - with open(self.vars.filename, 'a'): + with open(self.vars.filename, "a"): pass self.vars.file_exists = True - self.vars.set('lines', [], change=True, diff=True) + self.vars.set("lines", [], change=True, diff=True) else: with open(self.vars.filename) as fd: - self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True) - self.vars.set('is_blacklisted', self._is_module_blocked(), change=True) + self.vars.set("lines", [x.rstrip() for x in fd.readlines()], change=True, diff=True) + self.vars.set("is_blacklisted", self._is_module_blocked(), change=True) def _is_module_blocked(self): for line in self.vars.lines: stripped = line.strip() - if stripped.startswith('#'): + if stripped.startswith("#"): continue if self.pattern.match(stripped): return True @@ -97,7 +97,7 @@ def state_present(self): if self.vars.is_blacklisted: return self.vars.is_blacklisted = True - self.vars.lines = self.vars.lines + [f'blacklist {self.vars.name}'] + self.vars.lines = self.vars.lines + [f"blacklist {self.vars.name}"] def __quit_module__(self): if self.has_changed() and not self.module.check_mode: @@ -111,5 +111,5 @@ def main(): Blacklist.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py index c9bf896055c..72225d61825 100644 --- a/plugins/modules/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -226,8 +226,13 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ - import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, + is_struct_included, +) from ansible.module_utils.basic import AnsibleModule @@ -239,15 +244,19 @@ def find_exec_in_executions(searched_exec, executions): :return: Index of the execution, -1 if not found.. """ for i, existing_exec in enumerate(executions, start=0): - if ("providerId" in existing_exec and "providerId" in searched_exec and - existing_exec["providerId"] == searched_exec["providerId"] or - "displayName" in existing_exec and "displayName" in searched_exec and - existing_exec["displayName"] == searched_exec["displayName"]): + if ( + "providerId" in existing_exec + and "providerId" in searched_exec + and existing_exec["providerId"] == searched_exec["providerId"] + or "displayName" in existing_exec + and "displayName" in searched_exec + and existing_exec["displayName"] == searched_exec["displayName"] + ): return i return -1 -def create_or_update_executions(kc, config, realm='master'): +def create_or_update_executions(kc, config, realm="master"): """ Create or update executions for an authentication flow. :param kc: Keycloak API access. @@ -284,9 +293,12 @@ def create_or_update_executions(kc, config, realm='master'): if new_exec[key] is None: exclude_key.append(key) # Compare the executions to see if it need changes - if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: + if ( + not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) + or exec_index != new_exec_index + ): exec_found = True - if new_exec['index'] is None: + if new_exec["index"] is None: new_exec_index = exec_index before += f"{existing_executions[exec_index]}\n" execution = existing_executions[exec_index].copy() @@ -299,7 +311,9 @@ def create_or_update_executions(kc, config, realm='master'): exec_index = new_exec_index after += f"{new_exec}\n" elif new_exec["displayName"] is not None: - kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + kc.create_subflow( + new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"] + ) execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index @@ -308,14 +322,14 @@ def create_or_update_executions(kc, config, realm='master'): changed = True if exec_index != -1: # Update the existing execution - updated_exec = { - "id": execution["id"] - } + updated_exec = {"id": execution["id"]} # add the execution configuration if new_exec["authenticationConfig"] is not None: if "authenticationConfig" in execution and "id" in execution["authenticationConfig"]: kc.delete_authentication_config(execution["authenticationConfig"]["id"], realm=realm) - kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) + kc.add_authenticationConfig_to_execution( + updated_exec["id"], new_exec["authenticationConfig"], realm=realm + ) for key in new_exec: # remove unwanted key for the next API call if key not in ("flowAlias", "authenticationConfig", "subFlowType"): @@ -329,7 +343,9 @@ def create_or_update_executions(kc, config, realm='master'): after += f"{kc.get_executions_representation(config, realm=realm)[new_exec_index]}\n" return changed, dict(before=before, after=after) except Exception as e: - kc.module.fail_json(msg=f"Could not create or update executions for authentication flow {config['alias']} in realm {realm}: {e}") + kc.module.fail_json( + msg=f"Could not create or update executions for authentication flow {config['alias']} in realm {realm}: {e}" + ) def main(): @@ -341,35 +357,41 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - realm=dict(type='str', required=True), - alias=dict(type='str', required=True), - providerId=dict(type='str', choices=["basic-flow", "client-flow"]), - description=dict(type='str'), - copyFrom=dict(type='str'), - authenticationExecutions=dict(type='list', elements='dict', - options=dict( - providerId=dict(type='str'), - displayName=dict(type='str'), - requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), - flowAlias=dict(type='str'), - authenticationConfig=dict(type='dict'), - index=dict(type='int'), - subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'), - )), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default=False), + realm=dict(type="str", required=True), + alias=dict(type="str", required=True), + providerId=dict(type="str", choices=["basic-flow", "client-flow"]), + description=dict(type="str"), + copyFrom=dict(type="str"), + authenticationExecutions=dict( + type="list", + elements="dict", + options=dict( + providerId=dict(type="str"), + displayName=dict(type="str"), + requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type="str"), + flowAlias=dict(type="str"), + authenticationConfig=dict(type="dict"), + index=dict(type="int"), + subFlowType=dict(choices=["basic-flow", "form-flow"], default="basic-flow", type="str"), + ), + ), + state=dict(choices=["absent", "present"], default="present"), + force=dict(type="bool", default=False), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', flow={}) + result = dict(changed=False, msg="", flow={}) # Obtain access token, initialize API try: @@ -379,9 +401,9 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - force = module.params.get('force') + realm = module.params.get("realm") + state = module.params.get("state") + force = module.params.get("force") new_auth_repr = { "alias": module.params.get("alias"), @@ -397,21 +419,21 @@ def main(): # Cater for when it doesn't exist (an empty dict) if not auth_repr: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = f"{new_auth_repr['alias']} absent" + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = f"{new_auth_repr['alias']} absent" module.exit_json(**result) - elif state == 'present': + elif state == "present": # Process a creation - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before='', after=new_auth_repr) + result["diff"] = dict(before="", after=new_auth_repr) if module.check_mode: module.exit_json(**result) @@ -424,7 +446,7 @@ def main(): # If the authentication still not exist on the server, raise an exception. if auth_repr is None: - result['msg'] = f"Authentication just created not found: {new_auth_repr}" + result["msg"] = f"Authentication just created not found: {new_auth_repr}" module.fail_json(**result) # Configure the executions for the flow @@ -434,17 +456,17 @@ def main(): exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) if exec_repr is not None: auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr + result["end_state"] = auth_repr else: - if state == 'present': + if state == "present": # Process an update if force: # If force option is true # Delete the actual authentication flow - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=auth_repr, after=new_auth_repr) + result["diff"] = dict(before=auth_repr, after=new_auth_repr) if module.check_mode: module.exit_json(**result) kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) @@ -455,30 +477,30 @@ def main(): auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) # If the authentication still not exist on the server, raise an exception. if auth_repr is None: - result['msg'] = f"Authentication just created not found: {new_auth_repr}" + result["msg"] = f"Authentication just created not found: {new_auth_repr}" module.fail_json(**result) # Configure the executions for the flow if module.check_mode: module.exit_json(**result) changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) - result['changed'] |= changed + result["changed"] |= changed if module._diff: - result['diff'] = diff + result["diff"] = diff # Get executions created exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) if exec_repr is not None: auth_repr["authenticationExecutions"] = exec_repr - result['end_state'] = auth_repr + result["end_state"] = auth_repr else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=auth_repr, after='') + result["diff"] = dict(before=auth_repr, after="") if module.check_mode: module.exit_json(**result) @@ -486,10 +508,10 @@ def main(): # delete it kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) - result['msg'] = f"Authentication flow: {new_auth_repr['alias']} id: {auth_repr['id']} is deleted" + result["msg"] = f"Authentication flow: {new_auth_repr['alias']} id: {auth_repr['id']} is deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py index eda653f74d1..8375d9e26e1 100644 --- a/plugins/modules/keycloak_authentication_required_actions.py +++ b/plugins/modules/keycloak_authentication_required_actions.py @@ -172,22 +172,26 @@ type: str """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule def sanitize_required_actions(objects): for obj in objects: - alias = obj['alias'] - name = obj['name'] - provider_id = obj['providerId'] + alias = obj["alias"] + name = obj["name"] + provider_id = obj["providerId"] if not name: - obj['name'] = alias + obj["name"] = alias if provider_id != alias: - obj['providerId'] = alias + obj["providerId"] = alias return objects @@ -213,21 +217,21 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - realm=dict(type='str', required=True), + realm=dict(type="str", required=True), required_actions=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - alias=dict(type='str', required=True), - config=dict(type='dict'), - defaultAction=dict(type='bool'), - enabled=dict(type='bool'), - name=dict(type='str'), - priority=dict(type='int'), - providerId=dict(type='str') - ) + alias=dict(type="str", required=True), + config=dict(type="dict"), + defaultAction=dict(type="bool"), + enabled=dict(type="bool"), + name=dict(type="str"), + priority=dict(type="int"), + providerId=dict(type="str"), + ), ), - state=dict(type='str', choices=['present', 'absent'], required=True) + state=dict(type="str", choices=["present", "absent"], required=True), ) argument_spec.update(meta_args) @@ -235,12 +239,14 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, ) - result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + result = dict(changed=False, msg="", end_state={}, diff=dict(before={}, after={})) # Obtain access token, initialize API try: @@ -251,9 +257,9 @@ def main(): kc = KeycloakAPI(module, connection_header) # Convenience variables - realm = module.params.get('realm') - desired_required_actions = module.params.get('required_actions') - state = module.params.get('state') + realm = module.params.get("realm") + desired_required_actions = module.params.get("required_actions") + state = module.params.get("state") # Sanitize required actions desired_required_actions = sanitize_required_actions(desired_required_actions) @@ -264,7 +270,7 @@ def main(): # Get required actions before_required_actions = kc.get_required_actions(realm=realm) - if state == 'present': + if state == "present": # Initialize empty lists to hold the required actions that need to be # registered, updated, and original ones of the updated one register_required_actions = [] @@ -277,7 +283,7 @@ def main(): # Loop through the before required actions and check if the aliases match for before_required_action in before_required_actions: - if desired_required_action['alias'] == before_required_action['alias']: + if desired_required_action["alias"] == before_required_action["alias"]: update_required = False # Fill in the parameters @@ -288,7 +294,10 @@ def main(): # Loop through the keys of the desired and before required actions # and check if there are any differences between them for key in desired_required_action.keys(): - if key in before_required_action and desired_required_action[key] != before_required_action[key]: + if ( + key in before_required_action + and desired_required_action[key] != before_required_action[key] + ): update_required = True break @@ -303,13 +312,13 @@ def main(): # add it to the list of required actions to register if not found: # Check if name is provided - if 'name' not in desired_required_action or desired_required_action['name'] is None: + if "name" not in desired_required_action or desired_required_action["name"] is None: module.fail_json( msg=f"Unable to register required action {desired_required_action['alias']} in realm {realm}: name not included" ) # Check if provider ID is provided - if 'providerId' not in desired_required_action or desired_required_action['providerId'] is None: + if "providerId" not in desired_required_action or desired_required_action["providerId"] is None: module.fail_json( msg=f"Unable to register required action {desired_required_action['alias']} in realm {realm}: providerId not included" ) @@ -321,23 +330,20 @@ def main(): diff_required_actions = updated_required_actions.copy() diff_required_actions.extend(register_required_actions) - result['diff'] = dict( - before=before_updated_required_actions, - after=diff_required_actions - ) + result["diff"] = dict(before=before_updated_required_actions, after=diff_required_actions) # Handle changed if register_required_actions or updated_required_actions: - result['changed'] = True + result["changed"] = True # Handle check mode if module.check_mode: if register_required_actions or updated_required_actions: - result['change'] = True - result['msg'] = 'Required actions would be registered/updated' + result["change"] = True + result["msg"] = "Required actions would be registered/updated" else: - result['change'] = False - result['msg'] = 'Required actions would not be registered/updated' + result["change"] = False + result["msg"] = "Required actions would not be registered/updated" module.exit_json(**result) @@ -345,12 +351,16 @@ def main(): if register_required_actions: for register_required_action in register_required_actions: kc.register_required_action(realm=realm, rep=register_required_action) - kc.update_required_action(alias=register_required_action['alias'], realm=realm, rep=register_required_action) + kc.update_required_action( + alias=register_required_action["alias"], realm=realm, rep=register_required_action + ) # Update required actions if updated_required_actions: for updated_required_action in updated_required_actions: - kc.update_required_action(alias=updated_required_action['alias'], realm=realm, rep=updated_required_action) + kc.update_required_action( + alias=updated_required_action["alias"], realm=realm, rep=updated_required_action + ) # Initialize the final list of required actions final_required_actions = [] @@ -361,7 +371,7 @@ def main(): updated_required_action_found = False for updated_required_action in updated_required_actions: - if updated_required_action['alias'] == before_required_action['alias']: + if updated_required_action["alias"] == before_required_action["alias"]: # Merge the two dictionaries, favoring the values from updated_required_action merged_dict = {} for key in before_required_action.keys(): @@ -389,15 +399,15 @@ def main(): # Append any remaining updated_required_actions that were not merged for updated_required_action in updated_required_actions: - if not any(updated_required_action['alias'] == action['alias'] for action in final_required_actions): + if not any(updated_required_action["alias"] == action["alias"] for action in final_required_actions): final_required_actions.append(updated_required_action) # Append newly registered required actions final_required_actions.extend(register_required_actions) # Handle message and end state - result['msg'] = 'Required actions registered/updated' - result['end_state'] = final_required_actions + result["msg"] = "Required actions registered/updated" + result["end_state"] = final_required_actions else: # Filter out the deleted required actions final_required_actions = [] @@ -407,7 +417,7 @@ def main(): delete_action = False for desired_required_action in desired_required_actions: - if before_required_action['alias'] == desired_required_action['alias']: + if before_required_action["alias"] == desired_required_action["alias"]: delete_action = True break @@ -418,37 +428,34 @@ def main(): # Handle diff if module._diff: - result['diff'] = dict( - before=before_required_actions, - after=final_required_actions - ) + result["diff"] = dict(before=before_required_actions, after=final_required_actions) # Handle changed if delete_required_actions: - result['changed'] = True + result["changed"] = True # Handle check mode if module.check_mode: if final_required_actions: - result['change'] = True - result['msg'] = 'Required actions would be deleted' + result["change"] = True + result["msg"] = "Required actions would be deleted" else: - result['change'] = False - result['msg'] = 'Required actions would not be deleted' + result["change"] = False + result["msg"] = "Required actions would not be deleted" module.exit_json(**result) # Delete required actions if delete_required_actions: for delete_required_action in delete_required_actions: - kc.delete_required_action(alias=delete_required_action['alias'], realm=realm) + kc.delete_required_action(alias=delete_required_action["alias"], realm=realm) # Handle message and end state - result['msg'] = 'Required actions deleted' - result['end_state'] = final_required_actions + result["msg"] = "Required actions deleted" + result["end_state"] = final_required_actions module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py index 56ea251c070..26513a40e99 100644 --- a/plugins/modules/keycloak_authz_authorization_scope.py +++ b/plugins/modules/keycloak_authz_authorization_scope.py @@ -123,8 +123,12 @@ sample: http://localhost/icon.png """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -137,26 +141,27 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', default='present', - choices=['present', 'absent']), - name=dict(type='str', required=True), - display_name=dict(type='str'), - icon_uri=dict(type='str'), - client_id=dict(type='str', required=True), - realm=dict(type='str', required=True) + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + display_name=dict(type="str"), + icon_uri=dict(type="str"), + client_id=dict(type="str", required=True), + realm=dict(type="str", required=True), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + result = dict(changed=False, msg="", end_state={}, diff=dict(before={}, after={})) # Obtain access token, initialize API try: @@ -167,49 +172,48 @@ def main(): kc = KeycloakAPI(module, connection_header) # Convenience variables - state = module.params.get('state') - name = module.params.get('name') - display_name = module.params.get('display_name') - icon_uri = module.params.get('icon_uri') - client_id = module.params.get('client_id') - realm = module.params.get('realm') + state = module.params.get("state") + name = module.params.get("name") + display_name = module.params.get("display_name") + icon_uri = module.params.get("icon_uri") + client_id = module.params.get("client_id") + realm = module.params.get("realm") # Get the "id" of the client based on the usually more human-readable # "clientId" cid = kc.get_client_id(client_id, realm=realm) if not cid: - module.fail_json(msg=f'Invalid client {client_id} for realm {realm}') + module.fail_json(msg=f"Invalid client {client_id} for realm {realm}") # Get current state of the Authorization Scope using its name as the search # filter. This returns False if it is not found. - before_authz_scope = kc.get_authz_authorization_scope_by_name( - name=name, client_id=cid, realm=realm) + before_authz_scope = kc.get_authz_authorization_scope_by_name(name=name, client_id=cid, realm=realm) # Generate a JSON payload for Keycloak Admin API. This is needed for # "create" and "update" operations. desired_authz_scope = {} - desired_authz_scope['name'] = name - desired_authz_scope['displayName'] = display_name - desired_authz_scope['iconUri'] = icon_uri + desired_authz_scope["name"] = name + desired_authz_scope["displayName"] = display_name + desired_authz_scope["iconUri"] = icon_uri # Add "id" to payload for modify operations if before_authz_scope: - desired_authz_scope['id'] = before_authz_scope['id'] + desired_authz_scope["id"] = before_authz_scope["id"] # Ensure that undefined (null) optional parameters are presented as empty # strings in the desired state. This makes comparisons with current state # much easier. for k, v in desired_authz_scope.items(): if not v: - desired_authz_scope[k] = '' + desired_authz_scope[k] = "" # Do the above for the current state if before_authz_scope: - for k in ['displayName', 'iconUri']: + for k in ["displayName", "iconUri"]: if k not in before_authz_scope: - before_authz_scope[k] = '' + before_authz_scope[k] = "" - if before_authz_scope and state == 'present': + if before_authz_scope and state == "present": changes = False for k, v in desired_authz_scope.items(): if before_authz_scope[k] != v: @@ -220,56 +224,57 @@ def main(): if changes: if module._diff: - result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope) + result["diff"] = dict(before=before_authz_scope, after=desired_authz_scope) if module.check_mode: - result['changed'] = True - result['msg'] = 'Authorization scope would be updated' + result["changed"] = True + result["msg"] = "Authorization scope would be updated" module.exit_json(**result) else: kc.update_authz_authorization_scope( - payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm) - result['changed'] = True - result['msg'] = 'Authorization scope updated' + payload=desired_authz_scope, id=before_authz_scope["id"], client_id=cid, realm=realm + ) + result["changed"] = True + result["msg"] = "Authorization scope updated" else: - result['changed'] = False - result['msg'] = 'Authorization scope not updated' + result["changed"] = False + result["msg"] = "Authorization scope not updated" - result['end_state'] = desired_authz_scope - elif not before_authz_scope and state == 'present': + result["end_state"] = desired_authz_scope + elif not before_authz_scope and state == "present": if module._diff: - result['diff'] = dict(before={}, after=desired_authz_scope) + result["diff"] = dict(before={}, after=desired_authz_scope) if module.check_mode: - result['changed'] = True - result['msg'] = 'Authorization scope would be created' + result["changed"] = True + result["msg"] = "Authorization scope would be created" module.exit_json(**result) else: - kc.create_authz_authorization_scope( - payload=desired_authz_scope, client_id=cid, realm=realm) - result['changed'] = True - result['msg'] = 'Authorization scope created' - result['end_state'] = desired_authz_scope - elif before_authz_scope and state == 'absent': + kc.create_authz_authorization_scope(payload=desired_authz_scope, client_id=cid, realm=realm) + result["changed"] = True + result["msg"] = "Authorization scope created" + result["end_state"] = desired_authz_scope + elif before_authz_scope and state == "absent": if module._diff: - result['diff'] = dict(before=before_authz_scope, after={}) + result["diff"] = dict(before=before_authz_scope, after={}) if module.check_mode: - result['changed'] = True - result['msg'] = 'Authorization scope would be removed' + result["changed"] = True + result["msg"] = "Authorization scope would be removed" module.exit_json(**result) else: - kc.remove_authz_authorization_scope( - id=before_authz_scope['id'], client_id=cid, realm=realm) - result['changed'] = True - result['msg'] = 'Authorization scope removed' - elif not before_authz_scope and state == 'absent': - result['changed'] = False + kc.remove_authz_authorization_scope(id=before_authz_scope["id"], client_id=cid, realm=realm) + result["changed"] = True + result["msg"] = "Authorization scope removed" + elif not before_authz_scope and state == "absent": + result["changed"] = False else: - module.fail_json(msg=f'Unable to determine what to do with authorization scope {name} of client {client_id} in realm {realm}') + module.fail_json( + msg=f"Unable to determine what to do with authorization scope {name} of client {client_id} in realm {realm}" + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py index 8ba58e7dd60..022f0a1cc06 100644 --- a/plugins/modules/keycloak_authz_custom_policy.py +++ b/plugins/modules/keycloak_authz_custom_policy.py @@ -110,8 +110,12 @@ sample: File delete """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -124,25 +128,26 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', default='present', - choices=['present', 'absent']), - name=dict(type='str', required=True), - policy_type=dict(type='str', required=True), - client_id=dict(type='str', required=True), - realm=dict(type='str', required=True) + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + policy_type=dict(type="str", required=True), + client_id=dict(type="str", required=True), + realm=dict(type="str", required=True), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', end_state={}) + result = dict(changed=False, msg="", end_state={}) # Obtain access token, initialize API try: @@ -153,55 +158,54 @@ def main(): kc = KeycloakAPI(module, connection_header) # Convenience variables - state = module.params.get('state') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - client_id = module.params.get('client_id') - realm = module.params.get('realm') + state = module.params.get("state") + name = module.params.get("name") + policy_type = module.params.get("policy_type") + client_id = module.params.get("client_id") + realm = module.params.get("realm") cid = kc.get_client_id(client_id, realm=realm) if not cid: - module.fail_json(msg=f'Invalid client {client_id} for realm {realm}') + module.fail_json(msg=f"Invalid client {client_id} for realm {realm}") - before_authz_custom_policy = kc.get_authz_policy_by_name( - name=name, client_id=cid, realm=realm) + before_authz_custom_policy = kc.get_authz_policy_by_name(name=name, client_id=cid, realm=realm) desired_authz_custom_policy = {} - desired_authz_custom_policy['name'] = name - desired_authz_custom_policy['type'] = policy_type + desired_authz_custom_policy["name"] = name + desired_authz_custom_policy["type"] = policy_type # Modifying existing custom policies is not possible - if before_authz_custom_policy and state == 'present': - result['msg'] = f"Custom policy {name} already exists" - result['changed'] = False - result['end_state'] = desired_authz_custom_policy - elif not before_authz_custom_policy and state == 'present': + if before_authz_custom_policy and state == "present": + result["msg"] = f"Custom policy {name} already exists" + result["changed"] = False + result["end_state"] = desired_authz_custom_policy + elif not before_authz_custom_policy and state == "present": if module.check_mode: - result['msg'] = f"Would create custom policy {name}" + result["msg"] = f"Would create custom policy {name}" else: kc.create_authz_custom_policy( - payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm) - result['msg'] = f"Custom policy {name} created" + payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm + ) + result["msg"] = f"Custom policy {name} created" - result['changed'] = True - result['end_state'] = desired_authz_custom_policy - elif before_authz_custom_policy and state == 'absent': + result["changed"] = True + result["end_state"] = desired_authz_custom_policy + elif before_authz_custom_policy and state == "absent": if module.check_mode: - result['msg'] = f"Would remove custom policy {name}" + result["msg"] = f"Would remove custom policy {name}" else: - kc.remove_authz_custom_policy( - policy_id=before_authz_custom_policy['id'], client_id=cid, realm=realm) - result['msg'] = f"Custom policy {name} removed" + kc.remove_authz_custom_policy(policy_id=before_authz_custom_policy["id"], client_id=cid, realm=realm) + result["msg"] = f"Custom policy {name} removed" - result['changed'] = True - result['end_state'] = {} - elif not before_authz_custom_policy and state == 'absent': - result['msg'] = f"Custom policy {name} does not exist" - result['changed'] = False - result['end_state'] = {} + result["changed"] = True + result["end_state"] = {} + elif not before_authz_custom_policy and state == "absent": + result["msg"] = f"Custom policy {name} does not exist" + result["changed"] = False + result["end_state"] = {} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py index 6d3babcda14..ccf444271f6 100644 --- a/plugins/modules/keycloak_authz_permission.py +++ b/plugins/modules/keycloak_authz_permission.py @@ -218,8 +218,12 @@ - 9da05cd2-b273-4354-bbd8-0c133918a454 """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -232,56 +236,56 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', default='present', - choices=['present', 'absent']), - name=dict(type='str', required=True), - description=dict(type='str'), - permission_type=dict(type='str', choices=['scope', 'resource'], required=True), - decision_strategy=dict(type='str', default='UNANIMOUS', - choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']), - resources=dict(type='list', elements='str', default=[]), - scopes=dict(type='list', elements='str', default=[]), - policies=dict(type='list', elements='str', default=[]), - client_id=dict(type='str', required=True), - realm=dict(type='str', required=True) + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + description=dict(type="str"), + permission_type=dict(type="str", choices=["scope", "resource"], required=True), + decision_strategy=dict(type="str", default="UNANIMOUS", choices=["UNANIMOUS", "AFFIRMATIVE", "CONSENSUS"]), + resources=dict(type="list", elements="str", default=[]), + scopes=dict(type="list", elements="str", default=[]), + policies=dict(type="list", elements="str", default=[]), + client_id=dict(type="str", required=True), + realm=dict(type="str", required=True), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) # Convenience variables - state = module.params.get('state') - name = module.params.get('name') - description = module.params.get('description') - permission_type = module.params.get('permission_type') - decision_strategy = module.params.get('decision_strategy') - realm = module.params.get('realm') - client_id = module.params.get('client_id') - realm = module.params.get('realm') - resources = module.params.get('resources') - scopes = module.params.get('scopes') - policies = module.params.get('policies') - - if permission_type == 'scope' and state == 'present': + state = module.params.get("state") + name = module.params.get("name") + description = module.params.get("description") + permission_type = module.params.get("permission_type") + decision_strategy = module.params.get("decision_strategy") + realm = module.params.get("realm") + client_id = module.params.get("client_id") + realm = module.params.get("realm") + resources = module.params.get("resources") + scopes = module.params.get("scopes") + policies = module.params.get("policies") + + if permission_type == "scope" and state == "present": if scopes == []: - module.fail_json(msg='Scopes need to defined when permission type is set to scope!') + module.fail_json(msg="Scopes need to defined when permission type is set to scope!") if len(resources) > 1: - module.fail_json(msg='Only one resource can be defined for a scope permission!') + module.fail_json(msg="Only one resource can be defined for a scope permission!") - if permission_type == 'resource' and state == 'present': + if permission_type == "resource" and state == "present": if resources == []: - module.fail_json(msg='A resource need to defined when permission type is set to resource!') + module.fail_json(msg="A resource need to defined when permission type is set to resource!") if scopes != []: - module.fail_json(msg='Scopes cannot be defined when permission type is set to resource!') + module.fail_json(msg="Scopes cannot be defined when permission type is set to resource!") - result = dict(changed=False, msg='', end_state={}) + result = dict(changed=False, msg="", end_state={}) # Obtain access token, initialize API try: @@ -294,26 +298,25 @@ def main(): # Get id of the client based on client_id cid = kc.get_client_id(client_id, realm=realm) if not cid: - module.fail_json(msg=f'Invalid client {client_id} for realm {realm}') + module.fail_json(msg=f"Invalid client {client_id} for realm {realm}") # Get current state of the permission using its name as the search # filter. This returns False if it is not found. - permission = kc.get_authz_permission_by_name( - name=name, client_id=cid, realm=realm) + permission = kc.get_authz_permission_by_name(name=name, client_id=cid, realm=realm) # Generate a JSON payload for Keycloak Admin API. This is needed for # "create" and "update" operations. payload = {} - payload['name'] = name - payload['description'] = description - payload['type'] = permission_type - payload['decisionStrategy'] = decision_strategy - payload['logic'] = 'POSITIVE' - payload['scopes'] = [] - payload['resources'] = [] - payload['policies'] = [] - - if permission_type == 'scope': + payload["name"] = name + payload["description"] = description + payload["type"] = permission_type + payload["decisionStrategy"] = decision_strategy + payload["logic"] = "POSITIVE" + payload["scopes"] = [] + payload["resources"] = [] + payload["policies"] = [] + + if permission_type == "scope": # Add the resource id, if any, to the payload. While the data type is a # list, it is only possible to have one entry in it based on what Keycloak # Admin Console does. @@ -323,30 +326,36 @@ def main(): if resources: r = kc.get_authz_resource_by_name(resources[0], cid, realm) if not r: - module.fail_json(msg=f'Unable to find authorization resource with name {resources[0]} for client {cid} in realm {realm}') + module.fail_json( + msg=f"Unable to find authorization resource with name {resources[0]} for client {cid} in realm {realm}" + ) else: - payload['resources'].append(r['_id']) + payload["resources"].append(r["_id"]) - for rs in r['scopes']: - resource_scopes.append(rs['id']) + for rs in r["scopes"]: + resource_scopes.append(rs["id"]) # Generate a list of scope ids based on scope names. Fail if the # defined resource does not include all those scopes. for scope in scopes: s = kc.get_authz_authorization_scope_by_name(scope, cid, realm) - if r and not s['id'] in resource_scopes: - module.fail_json(msg=f'Resource {resources[0]} does not include scope {scope} for client {client_id} in realm {realm}') + if r and not s["id"] in resource_scopes: + module.fail_json( + msg=f"Resource {resources[0]} does not include scope {scope} for client {client_id} in realm {realm}" + ) else: - payload['scopes'].append(s['id']) + payload["scopes"].append(s["id"]) - elif permission_type == 'resource': + elif permission_type == "resource": if resources: for resource in resources: r = kc.get_authz_resource_by_name(resource, cid, realm) if not r: - module.fail_json(msg=f'Unable to find authorization resource with name {resource} for client {cid} in realm {realm}') + module.fail_json( + msg=f"Unable to find authorization resource with name {resource} for client {cid} in realm {realm}" + ) else: - payload['resources'].append(r['_id']) + payload["resources"].append(r["_id"]) # Add policy ids, if any, to the payload. if policies: @@ -354,20 +363,26 @@ def main(): p = kc.get_authz_policy_by_name(policy, cid, realm) if p: - payload['policies'].append(p['id']) + payload["policies"].append(p["id"]) else: - module.fail_json(msg=f'Unable to find authorization policy with name {policy} for client {client_id} in realm {realm}') + module.fail_json( + msg=f"Unable to find authorization policy with name {policy} for client {client_id} in realm {realm}" + ) # Add "id" to payload for update operations if permission: - payload['id'] = permission['id'] + payload["id"] = permission["id"] # Handle the special case where the user attempts to change an already # existing permission's type - something that can't be done without a # full delete -> (re)create cycle. - if permission['type'] != payload['type']: - module.fail_json(msg=(f"Modifying the type of permission (scope/resource) is not supported: " - f"permission {permission['id']} of client {cid} in realm {realm} unchanged")) + if permission["type"] != payload["type"]: + module.fail_json( + msg=( + f"Modifying the type of permission (scope/resource) is not supported: " + f"permission {permission['id']} of client {cid} in realm {realm} unchanged" + ) + ) # Updating an authorization permission is tricky for several reasons. # Firstly, the current permission is retrieved using a _policy_ endpoint, @@ -385,43 +400,47 @@ def main(): # # The approach taken here is a). # - if permission and state == 'present': + if permission and state == "present": if module.check_mode: - result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ - Would apply desired state without checking the current state.' + result["msg"] = "Notice: unable to check current resources, scopes and policies for permission. \ + Would apply desired state without checking the current state." else: - kc.update_authz_permission(payload=payload, permission_type=permission_type, id=permission['id'], client_id=cid, realm=realm) - result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ - Applying desired state without checking the current state.' + kc.update_authz_permission( + payload=payload, permission_type=permission_type, id=permission["id"], client_id=cid, realm=realm + ) + result["msg"] = "Notice: unable to check current resources, scopes and policies for permission. \ + Applying desired state without checking the current state." # Assume that something changed, although we don't know if that is the case. - result['changed'] = True - result['end_state'] = payload - elif not permission and state == 'present': + result["changed"] = True + result["end_state"] = payload + elif not permission and state == "present": if module.check_mode: - result['msg'] = 'Would create permission' + result["msg"] = "Would create permission" else: kc.create_authz_permission(payload=payload, permission_type=permission_type, client_id=cid, realm=realm) - result['msg'] = 'Permission created' + result["msg"] = "Permission created" - result['changed'] = True - result['end_state'] = payload - elif permission and state == 'absent': + result["changed"] = True + result["end_state"] = payload + elif permission and state == "absent": if module.check_mode: - result['msg'] = 'Would remove permission' + result["msg"] = "Would remove permission" else: - kc.remove_authz_permission(id=permission['id'], client_id=cid, realm=realm) - result['msg'] = 'Permission removed' + kc.remove_authz_permission(id=permission["id"], client_id=cid, realm=realm) + result["msg"] = "Permission removed" - result['changed'] = True + result["changed"] = True - elif not permission and state == 'absent': - result['changed'] = False + elif not permission and state == "absent": + result["changed"] = False else: - module.fail_json(msg=f'Unable to determine what to do with permission {name} of client {client_id} in realm {realm}') + module.fail_json( + msg=f"Unable to determine what to do with permission {name} of client {client_id} in realm {realm}" + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py index 92c9205fa88..ee2ef442ba9 100644 --- a/plugins/modules/keycloak_authz_permission_info.py +++ b/plugins/modules/keycloak_authz_permission_info.py @@ -108,8 +108,12 @@ sample: {} """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -122,27 +126,29 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - name=dict(type='str', required=True), - client_id=dict(type='str', required=True), - realm=dict(type='str', required=True) + name=dict(type="str", required=True), + client_id=dict(type="str", required=True), + realm=dict(type="str", required=True), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=( - [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) # Convenience variables - name = module.params.get('name') - client_id = module.params.get('client_id') - realm = module.params.get('realm') + name = module.params.get("name") + client_id = module.params.get("client_id") + realm = module.params.get("realm") - result = dict(changed=False, msg='', queried_state={}) + result = dict(changed=False, msg="", queried_state={}) # Obtain access token, initialize API try: @@ -155,17 +161,16 @@ def main(): # Get id of the client based on client_id cid = kc.get_client_id(client_id, realm=realm) if not cid: - module.fail_json(msg=f'Invalid client {client_id} for realm {realm}') + module.fail_json(msg=f"Invalid client {client_id} for realm {realm}") # Get current state of the permission using its name as the search # filter. This returns False if it is not found. - permission = kc.get_authz_permission_by_name( - name=name, client_id=cid, realm=realm) + permission = kc.get_authz_permission_by_name(name=name, client_id=cid, realm=realm) - result['queried_state'] = permission + result["queried_state"] = permission module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py index 59e34200f16..bfc03fd3948 100644 --- a/plugins/modules/keycloak_client.py +++ b/plugins/modules/keycloak_client.py @@ -744,16 +744,21 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule import copy -PROTOCOL_OPENID_CONNECT = 'openid-connect' -PROTOCOL_SAML = 'saml' -PROTOCOL_DOCKER_V2 = 'docker-v2' -CLIENT_META_DATA = ['authorizationServicesEnabled'] +PROTOCOL_OPENID_CONNECT = "openid-connect" +PROTOCOL_SAML = "saml" +PROTOCOL_DOCKER_V2 = "docker-v2" +CLIENT_META_DATA = ["authorizationServicesEnabled"] def normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior): @@ -785,16 +790,16 @@ def normalise_scopes_for_behavior(desired_client, before_client, clientScopesBeh """ desired_client = copy.deepcopy(desired_client) before_client = copy.deepcopy(before_client) - if clientScopesBehavior == 'ignore': - desired_client['defaultClientScopes'] = copy.deepcopy(before_client['defaultClientScopes']) - desired_client['optionalClientScopes'] = copy.deepcopy(before_client['optionalClientScopes']) - elif clientScopesBehavior == 'patch': - for scope in before_client['defaultClientScopes']: - if scope not in desired_client['defaultClientScopes']: - desired_client['defaultClientScopes'].append(scope) - for scope in before_client['optionalClientScopes']: - if scope not in desired_client['optionalClientScopes']: - desired_client['optionalClientScopes'].append(scope) + if clientScopesBehavior == "ignore": + desired_client["defaultClientScopes"] = copy.deepcopy(before_client["defaultClientScopes"]) + desired_client["optionalClientScopes"] = copy.deepcopy(before_client["optionalClientScopes"]) + elif clientScopesBehavior == "patch": + for scope in before_client["defaultClientScopes"]: + if scope not in desired_client["defaultClientScopes"]: + desired_client["defaultClientScopes"].append(scope) + for scope in before_client["optionalClientScopes"]: + if scope not in desired_client["optionalClientScopes"]: + desired_client["optionalClientScopes"].append(scope) return desired_client, before_client @@ -823,15 +828,15 @@ def check_optional_scopes_not_default(desired_client, clientScopesBehavior, modu type: None description: Returns None. Fails the module if a scope is both default and optional. """ - if clientScopesBehavior == 'ignore': + if clientScopesBehavior == "ignore": return - for scope in desired_client['optionalClientScopes']: - if scope in desired_client['defaultClientScopes']: - module.fail_json(msg=f'Client scope {scope} cannot be both default and optional') + for scope in desired_client["optionalClientScopes"]: + if scope in desired_client["defaultClientScopes"]: + module.fail_json(msg=f"Client scope {scope} cannot be both default and optional") def normalise_cr(clientrep, remove_ids=False): - """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + """Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the the change detection is more effective. :param clientrep: the clientrep dict to be sanitized @@ -843,57 +848,59 @@ def normalise_cr(clientrep, remove_ids=False): clientrep = copy.deepcopy(clientrep) if remove_ids: - clientrep.pop('id', None) + clientrep.pop("id", None) - if 'defaultClientScopes' in clientrep: - clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) + if "defaultClientScopes" in clientrep: + clientrep["defaultClientScopes"] = list(sorted(clientrep["defaultClientScopes"])) else: - clientrep['defaultClientScopes'] = [] + clientrep["defaultClientScopes"] = [] - if 'optionalClientScopes' in clientrep: - clientrep['optionalClientScopes'] = list(sorted(clientrep['optionalClientScopes'])) + if "optionalClientScopes" in clientrep: + clientrep["optionalClientScopes"] = list(sorted(clientrep["optionalClientScopes"])) else: - clientrep['optionalClientScopes'] = [] + clientrep["optionalClientScopes"] = [] - if 'redirectUris' in clientrep: - clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) + if "redirectUris" in clientrep: + clientrep["redirectUris"] = list(sorted(clientrep["redirectUris"])) else: - clientrep['redirectUris'] = [] + clientrep["redirectUris"] = [] - if 'protocolMappers' in clientrep: - clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) - for mapper in clientrep['protocolMappers']: + if "protocolMappers" in clientrep: + clientrep["protocolMappers"] = sorted( + clientrep["protocolMappers"], key=lambda x: (x.get("name"), x.get("protocol"), x.get("protocolMapper")) + ) + for mapper in clientrep["protocolMappers"]: if remove_ids: - mapper.pop('id', None) + mapper.pop("id", None) # Convert bool to string - if 'config' in mapper: - for key, value in mapper['config'].items(): + if "config" in mapper: + for key, value in mapper["config"].items(): if isinstance(value, bool): - mapper['config'][key] = str(value).lower() + mapper["config"][key] = str(value).lower() # Set to a default value. - mapper['consentRequired'] = mapper.get('consentRequired', False) + mapper["consentRequired"] = mapper.get("consentRequired", False) else: - clientrep['protocolMappers'] = [] + clientrep["protocolMappers"] = [] - if 'attributes' in clientrep: - for key, value in clientrep['attributes'].items(): + if "attributes" in clientrep: + for key, value in clientrep["attributes"].items(): if isinstance(value, bool): - clientrep['attributes'][key] = str(value).lower() - clientrep['attributes'].pop('client.secret.creation.time', None) + clientrep["attributes"][key] = str(value).lower() + clientrep["attributes"].pop("client.secret.creation.time", None) else: - clientrep['attributes'] = [] + clientrep["attributes"] = [] - if 'webOrigins' in clientrep: - clientrep['webOrigins'] = sorted(clientrep['webOrigins']) + if "webOrigins" in clientrep: + clientrep["webOrigins"] = sorted(clientrep["webOrigins"]) else: - clientrep['webOrigins'] = [] + clientrep["webOrigins"] = [] - if 'redirectUris' in clientrep: - clientrep['redirectUris'] = sorted(clientrep['redirectUris']) + if "redirectUris" in clientrep: + clientrep["redirectUris"] = sorted(clientrep["redirectUris"]) else: - clientrep['redirectUris'] = [] + clientrep["redirectUris"] = [] return clientrep @@ -901,31 +908,31 @@ def normalise_cr(clientrep, remove_ids=False): def normalize_kc_resp(clientrep): # kc drops the variable 'authorizationServicesEnabled' if set to false # to minimize diff/changes we set it to false if not set by kc - if clientrep and 'authorizationServicesEnabled' not in clientrep: - clientrep['authorizationServicesEnabled'] = False + if clientrep and "authorizationServicesEnabled" not in clientrep: + clientrep["authorizationServicesEnabled"] = False def sanitize_cr(clientrep): - """ Removes probably sensitive details from a client representation. + """Removes probably sensitive details from a client representation. :param clientrep: the clientrep dict to be sanitized :return: sanitized clientrep dict """ result = copy.deepcopy(clientrep) - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - attributes = result['attributes'] + if "secret" in result: + result["secret"] = "no_log" + if "attributes" in result: + attributes = result["attributes"] if isinstance(attributes, dict): - if 'saml.signing.private.key' in attributes: - attributes['saml.signing.private.key'] = 'no_log' - if 'saml.encryption.private.key' in attributes: - attributes['saml.encryption.private.key'] = 'no_log' + if "saml.signing.private.key" in attributes: + attributes["saml.signing.private.key"] = "no_log" + if "saml.encryption.private.key" in attributes: + attributes["saml.encryption.private.key"] = "no_log" return normalise_cr(result) def get_authentication_flow_id(flow_name, realm, kc): - """ Get the authentication flow ID based on the flow name, realm, and Keycloak client. + """Get the authentication flow ID based on the flow name, realm, and Keycloak client. Args: flow_name (str): The name of the authentication flow. @@ -941,11 +948,11 @@ def get_authentication_flow_id(flow_name, realm, kc): flow = kc.get_authentication_flow_by_alias(flow_name, realm) if flow: return flow["id"] - kc.module.fail_json(msg=f'Authentification flow {flow_name} not found in realm {realm}') + kc.module.fail_json(msg=f"Authentification flow {flow_name} not found in realm {realm}") def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): - """ Convert a dictionary representing client flow bindings to a model representation. + """Convert a dictionary representing client flow bindings to a model representation. Args: newClientFlowBinding (dict): A dictionary containing client flow bindings. @@ -962,10 +969,7 @@ def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): """ - modelFlow = { - "browser": None, - "direct_grant": None - } + modelFlow = {"browser": None, "direct_grant": None} for k, v in newClientFlowBinding.items(): if not v: @@ -1008,11 +1012,7 @@ def find_match(iterable, attribute, name): """ name_lower = str(name).lower() return next( - ( - value - for value in iterable - if attribute in value and str(value[attribute]).lower() == name_lower - ), + (value for value in iterable if attribute in value and str(value[attribute]).lower() == name_lower), None, ) @@ -1046,14 +1046,14 @@ def add_default_client_scopes(desired_client, before_client, realm, kc): None """ desired_default_scope = desired_client["defaultClientScopes"] - missing_scopes = [item for item in desired_default_scope if item not in before_client['defaultClientScopes']] + missing_scopes = [item for item in desired_default_scope if item not in before_client["defaultClientScopes"]] if not missing_scopes: return client_scopes = kc.get_clientscopes(realm) for name in missing_scopes: scope = find_match(client_scopes, "name", name) if scope: - kc.add_default_clientscope(scope['id'], realm, desired_client['clientId']) + kc.add_default_clientscope(scope["id"], realm, desired_client["clientId"]) def add_optional_client_scopes(desired_client, before_client, realm, kc): @@ -1085,14 +1085,14 @@ def add_optional_client_scopes(desired_client, before_client, realm, kc): None """ desired_optional_scope = desired_client["optionalClientScopes"] - missing_scopes = [item for item in desired_optional_scope if item not in before_client['optionalClientScopes']] + missing_scopes = [item for item in desired_optional_scope if item not in before_client["optionalClientScopes"]] if not missing_scopes: return client_scopes = kc.get_clientscopes(realm) for name in missing_scopes: scope = find_match(client_scopes, "name", name) if scope: - kc.add_optional_clientscope(scope['id'], realm, desired_client['clientId']) + kc.add_optional_clientscope(scope["id"], realm, desired_client["clientId"]) def remove_default_client_scopes(desired_client, before_client, realm, kc): @@ -1124,14 +1124,14 @@ def remove_default_client_scopes(desired_client, before_client, realm, kc): None """ before_default_scope = before_client["defaultClientScopes"] - missing_scopes = [item for item in before_default_scope if item not in desired_client['defaultClientScopes']] + missing_scopes = [item for item in before_default_scope if item not in desired_client["defaultClientScopes"]] if not missing_scopes: return - client_scopes = kc.get_default_clientscopes(realm, desired_client['clientId']) + client_scopes = kc.get_default_clientscopes(realm, desired_client["clientId"]) for name in missing_scopes: scope = find_match(client_scopes, "name", name) if scope: - kc.delete_default_clientscope(scope['id'], realm, desired_client['clientId']) + kc.delete_default_clientscope(scope["id"], realm, desired_client["clientId"]) def remove_optional_client_scopes(desired_client, before_client, realm, kc): @@ -1163,14 +1163,14 @@ def remove_optional_client_scopes(desired_client, before_client, realm, kc): None """ before_optional_scope = before_client["optionalClientScopes"] - missing_scopes = [item for item in before_optional_scope if item not in desired_client['optionalClientScopes']] + missing_scopes = [item for item in before_optional_scope if item not in desired_client["optionalClientScopes"]] if not missing_scopes: return - client_scopes = kc.get_optional_clientscopes(realm, desired_client['clientId']) + client_scopes = kc.get_optional_clientscopes(realm, desired_client["clientId"]) for name in missing_scopes: scope = find_match(client_scopes, "name", name) if scope: - kc.delete_optional_clientscope(scope['id'], realm, desired_client['clientId']) + kc.delete_optional_clientscope(scope["id"], realm, desired_client["clientId"]) def main(): @@ -1182,86 +1182,94 @@ def main(): argument_spec = keycloak_argument_spec() protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), - protocolMapper=dict(type='str'), - config=dict(type='dict'), + consentRequired=dict(type="bool"), + consentText=dict(type="str"), + id=dict(type="str"), + name=dict(type="str"), + protocol=dict(type="str", choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + protocolMapper=dict(type="str"), + config=dict(type="dict"), ) authentication_flow_spec = dict( - browser=dict(type='str'), - browser_name=dict(type='str', aliases=['browserName']), - direct_grant=dict(type='str', aliases=['directGrant']), - direct_grant_name=dict(type='str', aliases=['directGrantName']), + browser=dict(type="str"), + browser_name=dict(type="str", aliases=["browserName"]), + direct_grant=dict(type="str", aliases=["directGrant"]), + direct_grant_name=dict(type="str", aliases=["directGrantName"]), ) meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - - id=dict(type='str'), - client_id=dict(type='str', aliases=['clientId']), - name=dict(type='str'), - description=dict(type='str'), - root_url=dict(type='str', aliases=['rootUrl']), - admin_url=dict(type='str', aliases=['adminUrl']), - base_url=dict(type='str', aliases=['baseUrl']), - surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), - enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt', 'client-x509'], aliases=['clientAuthenticatorType']), - secret=dict(type='str', no_log=True), - registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), - web_origins=dict(type='list', elements='str', aliases=['webOrigins']), - not_before=dict(type='int', aliases=['notBefore']), - bearer_only=dict(type='bool', aliases=['bearerOnly']), - consent_required=dict(type='bool', aliases=['consentRequired']), - standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), - implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), - direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), - service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), - authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), - public_client=dict(type='bool', aliases=['publicClient']), - frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), - protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), - node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), - registered_nodes=dict(type='dict', aliases=['registeredNodes']), - client_template=dict(type='str', aliases=['clientTemplate']), - use_template_config=dict(type='bool', aliases=['useTemplateConfig']), - use_template_scope=dict(type='bool', aliases=['useTemplateScope']), - use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), - always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(type="str", default="master"), + id=dict(type="str"), + client_id=dict(type="str", aliases=["clientId"]), + name=dict(type="str"), + description=dict(type="str"), + root_url=dict(type="str", aliases=["rootUrl"]), + admin_url=dict(type="str", aliases=["adminUrl"]), + base_url=dict(type="str", aliases=["baseUrl"]), + surrogate_auth_required=dict(type="bool", aliases=["surrogateAuthRequired"]), + enabled=dict(type="bool"), + client_authenticator_type=dict( + type="str", choices=["client-secret", "client-jwt", "client-x509"], aliases=["clientAuthenticatorType"] + ), + secret=dict(type="str", no_log=True), + registration_access_token=dict(type="str", aliases=["registrationAccessToken"], no_log=True), + default_roles=dict(type="list", elements="str", aliases=["defaultRoles"]), + redirect_uris=dict(type="list", elements="str", aliases=["redirectUris"]), + web_origins=dict(type="list", elements="str", aliases=["webOrigins"]), + not_before=dict(type="int", aliases=["notBefore"]), + bearer_only=dict(type="bool", aliases=["bearerOnly"]), + consent_required=dict(type="bool", aliases=["consentRequired"]), + standard_flow_enabled=dict(type="bool", aliases=["standardFlowEnabled"]), + implicit_flow_enabled=dict(type="bool", aliases=["implicitFlowEnabled"]), + direct_access_grants_enabled=dict(type="bool", aliases=["directAccessGrantsEnabled"]), + service_accounts_enabled=dict(type="bool", aliases=["serviceAccountsEnabled"]), + authorization_services_enabled=dict(type="bool", aliases=["authorizationServicesEnabled"]), + public_client=dict(type="bool", aliases=["publicClient"]), + frontchannel_logout=dict(type="bool", aliases=["frontchannelLogout"]), + protocol=dict(type="str", choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + attributes=dict(type="dict"), + full_scope_allowed=dict(type="bool", aliases=["fullScopeAllowed"]), + node_re_registration_timeout=dict(type="int", aliases=["nodeReRegistrationTimeout"]), + registered_nodes=dict(type="dict", aliases=["registeredNodes"]), + client_template=dict(type="str", aliases=["clientTemplate"]), + use_template_config=dict(type="bool", aliases=["useTemplateConfig"]), + use_template_scope=dict(type="bool", aliases=["useTemplateScope"]), + use_template_mappers=dict(type="bool", aliases=["useTemplateMappers"]), + always_display_in_console=dict(type="bool", aliases=["alwaysDisplayInConsole"]), authentication_flow_binding_overrides=dict( - type='dict', - aliases=['authenticationFlowBindingOverrides'], + type="dict", + aliases=["authenticationFlowBindingOverrides"], options=authentication_flow_spec, - required_one_of=[['browser', 'direct_grant', 'browser_name', 'direct_grant_name']], - mutually_exclusive=[['browser', 'browser_name'], ['direct_grant', 'direct_grant_name']], + required_one_of=[["browser", "direct_grant", "browser_name", "direct_grant_name"]], + mutually_exclusive=[["browser", "browser_name"], ["direct_grant", "direct_grant_name"]], ), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - authorization_settings=dict(type='dict', aliases=['authorizationSettings']), - client_scopes_behavior=dict(type='str', aliases=['clientScopesBehavior'], choices=['ignore', 'patch', 'idempotent'], default='ignore'), - default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), - optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']), + protocol_mappers=dict(type="list", elements="dict", options=protmapper_spec, aliases=["protocolMappers"]), + authorization_settings=dict(type="dict", aliases=["authorizationSettings"]), + client_scopes_behavior=dict( + type="str", aliases=["clientScopesBehavior"], choices=["ignore", "patch", "idempotent"], default="ignore" + ), + default_client_scopes=dict(type="list", elements="str", aliases=["defaultClientScopes"]), + optional_client_scopes=dict(type="list", elements="str", aliases=["optionalClientScopes"]), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['client_id', 'id'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["client_id", "id"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -1271,21 +1279,23 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - cid = module.params.get('id') - clientScopesBehavior = module.params.get('client_scopes_behavior') - state = module.params.get('state') + realm = module.params.get("realm") + cid = module.params.get("id") + clientScopesBehavior = module.params.get("client_scopes_behavior") + state = module.params.get("state") # Filter and map the parameters names that apply to the client - client_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] + client_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm"] and module.params.get(x) is not None + ] # See if it already exists in Keycloak if cid is None: - before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) + before_client = kc.get_client_by_clientid(module.params.get("client_id"), realm=realm) if before_client is not None: - cid = before_client['id'] + cid = before_client["id"] else: before_client = kc.get_client_by_id(cid, realm=realm) @@ -1302,15 +1312,15 @@ def main(): # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified - if client_param == 'protocol_mappers': + if client_param == "protocol_mappers": new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] - elif client_param == 'authentication_flow_binding_overrides': + elif client_param == "authentication_flow_binding_overrides": new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) - elif client_param == 'attributes' and 'attributes' in before_client: - attributes_copy = copy.deepcopy(before_client['attributes']) + elif client_param == "attributes" and "attributes" in before_client: + attributes_copy = copy.deepcopy(before_client["attributes"]) attributes_copy.update(new_param_value) new_param_value = attributes_copy - elif client_param in ['clientScopesBehavior', 'client_scopes_behavior']: + elif client_param in ["clientScopesBehavior", "client_scopes_behavior"]: continue changeset[camel(client_param)] = new_param_value @@ -1319,65 +1329,66 @@ def main(): desired_client = copy.deepcopy(before_client) desired_client.update(changeset) - result['proposed'] = sanitize_cr(changeset) - result['existing'] = sanitize_cr(before_client) + result["proposed"] = sanitize_cr(changeset) + result["existing"] = sanitize_cr(before_client) # Cater for when it doesn't exist (an empty dict) if not before_client: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client does not exist; doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Client does not exist; doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True - if 'clientId' not in desired_client: - module.fail_json(msg='client_id needs to be specified when creating a new client') - if 'protocol' not in desired_client: - desired_client['protocol'] = PROTOCOL_OPENID_CONNECT + if "clientId" not in desired_client: + module.fail_json(msg="client_id needs to be specified when creating a new client") + if "protocol" not in desired_client: + desired_client["protocol"] = PROTOCOL_OPENID_CONNECT if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_client)) + result["diff"] = dict(before="", after=sanitize_cr(desired_client)) if module.check_mode: module.exit_json(**result) # create it kc.create_client(desired_client, realm=realm) - after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) + after_client = kc.get_client_by_clientid(desired_client["clientId"], realm=realm) - result['end_state'] = sanitize_cr(after_client) + result["end_state"] = sanitize_cr(after_client) - result['msg'] = f"Client {desired_client['clientId']} has been created." + result["msg"] = f"Client {desired_client['clientId']} has been created." module.exit_json(**result) else: - if state == 'present': + if state == "present": # We can only compare the current client with the proposed updates we have - desired_client_with_scopes, before_client_with_scopes = normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior) + desired_client_with_scopes, before_client_with_scopes = normalise_scopes_for_behavior( + desired_client, before_client, clientScopesBehavior + ) check_optional_scopes_not_default(desired_client, clientScopesBehavior, module) before_norm = normalise_cr(before_client_with_scopes, remove_ids=True) desired_norm = normalise_cr(desired_client_with_scopes, remove_ids=True) # no changes if before_norm == desired_norm: - result['changed'] = False - result['end_state'] = sanitize_cr(before_client) - result['msg'] = f"No changes required for Client {desired_client['clientId']}." + result["changed"] = False + result["end_state"] = sanitize_cr(before_client) + result["msg"] = f"No changes required for Client {desired_client['clientId']}." module.exit_json(**result) # Process an update - result['changed'] = True + result["changed"] = True if module.check_mode: - result['end_state'] = sanitize_cr(desired_client_with_scopes) + result["end_state"] = sanitize_cr(desired_client_with_scopes) if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), - after=sanitize_cr(desired_client)) + result["diff"] = dict(before=sanitize_cr(before_client), after=sanitize_cr(desired_client)) module.exit_json(**result) # do the update @@ -1392,34 +1403,33 @@ def main(): normalize_kc_resp(after_client) if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), - after=sanitize_cr(after_client)) + result["diff"] = dict(before=sanitize_cr(before_client), after=sanitize_cr(after_client)) - result['end_state'] = sanitize_cr(after_client) + result["end_state"] = sanitize_cr(after_client) - result['msg'] = f"Client {desired_client['clientId']} has been updated." + result["msg"] = f"Client {desired_client['clientId']} has been updated." module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), after='') + result["diff"] = dict(before=sanitize_cr(before_client), after="") if module.check_mode: module.exit_json(**result) # delete it kc.delete_client(cid, realm=realm) - result['proposed'] = {} + result["proposed"] = {} - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Client {before_client['clientId']} has been deleted." + result["msg"] = f"Client {before_client['clientId']} has been deleted." module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py index 250b9e8c485..cc5fc681697 100644 --- a/plugins/modules/keycloak_client_rolemapping.py +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -239,7 +239,10 @@ """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, ) from ansible.module_utils.basic import AnsibleModule @@ -253,37 +256,38 @@ def main(): argument_spec = keycloak_argument_spec() roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), + name=dict(type="str"), + id=dict(type="str"), ) meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - gid=dict(type='str'), - group_name=dict(type='str'), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(default="master"), + gid=dict(type="str"), + group_name=dict(type="str"), parents=dict( - type='list', elements='dict', - options=dict( - id=dict(type='str'), - name=dict(type='str') - ), + type="list", + elements="dict", + options=dict(id=dict(type="str"), name=dict(type="str")), ), - cid=dict(type='str'), - client_id=dict(type='str'), - roles=dict(type='list', elements='dict', options=roles_spec), + cid=dict(type="str"), + client_id=dict(type="str"), + roles=dict(type="list", elements="dict", options=roles_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -293,111 +297,117 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('cid') - client_id = module.params.get('client_id') - gid = module.params.get('gid') - group_name = module.params.get('group_name') - roles = module.params.get('roles') - parents = module.params.get('parents') + realm = module.params.get("realm") + state = module.params.get("state") + cid = module.params.get("cid") + client_id = module.params.get("client_id") + gid = module.params.get("gid") + group_name = module.params.get("group_name") + roles = module.params.get("roles") + parents = module.params.get("parents") # Check the parameters if cid is None and client_id is None: - module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + module.fail_json(msg="Either the `client_id` or `cid` has to be specified.") if gid is None and group_name is None: - module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + module.fail_json(msg="Either the `group_name` or `gid` has to be specified.") # Get the potential missing parameters if gid is None: group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) if group_rep is not None: - gid = group_rep['id'] + gid = group_rep["id"] else: - module.fail_json(msg=f'Could not fetch group {group_name}:') + module.fail_json(msg=f"Could not fetch group {group_name}:") if cid is None: cid = kc.get_client_id(client_id, realm=realm) if cid is None: - module.fail_json(msg=f'Could not fetch client {client_id}:') + module.fail_json(msg=f"Could not fetch client {client_id}:") if roles is None: module.exit_json(msg="Nothing to do (no roles specified).") else: for role_index, role in enumerate(roles, start=0): - if role['name'] is None and role['id'] is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + if role["name"] is None and role["id"] is None: + module.fail_json(msg="Either the `name` or `id` has to be specified on each role.") # Fetch missing role_id - if role['id'] is None: - role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm) + if role["id"] is None: + role_id = kc.get_client_role_id_by_name(cid, role["name"], realm=realm) if role_id is not None: - role['id'] = role_id + role["id"] = role_id else: module.fail_json(msg=f"Could not fetch role {role['name']}:") # Fetch missing role_name else: - role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] - if role['name'] is None: + role["name"] = kc.get_client_group_rolemapping_by_id(gid, cid, role["id"], realm=realm)["name"] + if role["name"] is None: module.fail_json(msg=f"Could not fetch role {role['id']}") # Get effective client-level role mappings available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm) assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) - result['existing'] = assigned_roles_before - result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + result["existing"] = assigned_roles_before + result["proposed"] = list(assigned_roles_before) if assigned_roles_before else [] update_roles = [] for role_index, role in enumerate(roles, start=0): # Fetch roles to assign if state present - if state == 'present': + if state == "present": for available_role in available_roles_before: - if role['name'] == available_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - result['proposed'].append(available_role) + if role["name"] == available_role["name"]: + update_roles.append( + { + "id": role["id"], + "name": role["name"], + } + ) + result["proposed"].append(available_role) # Fetch roles to remove if state absent else: for assigned_role in assigned_roles_before: - if role['name'] == assigned_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - if assigned_role in result['proposed']: # Handle double removal - result['proposed'].remove(assigned_role) + if role["name"] == assigned_role["name"]: + update_roles.append( + { + "id": role["id"], + "name": role["name"], + } + ) + if assigned_role in result["proposed"]: # Handle double removal + result["proposed"].remove(assigned_role) if len(update_roles): - if state == 'present': + if state == "present": # Assign roles - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + result["diff"] = dict(before=assigned_roles_before, after=result["proposed"]) if module.check_mode: module.exit_json(**result) kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = f'Roles {update_roles} assigned to group {group_name}.' + result["msg"] = f"Roles {update_roles} assigned to group {group_name}." assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after + result["end_state"] = assigned_roles_after module.exit_json(**result) else: # Remove mapping of role - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + result["diff"] = dict(before=assigned_roles_before, after=result["proposed"]) if module.check_mode: module.exit_json(**result) kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = f'Roles {update_roles} removed from group {group_name}.' + result["msg"] = f"Roles {update_roles} removed from group {group_name}." assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after + result["end_state"] = assigned_roles_after module.exit_json(**result) # Do nothing else: - result['changed'] = False - result['msg'] = f"Nothing to do, roles {roles} are {'mapped' if state == 'present' else 'not mapped'} with group {group_name}." + result["changed"] = False + result["msg"] = ( + f"Nothing to do, roles {roles} are {'mapped' if state == 'present' else 'not mapped'} with group {group_name}." + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py index 15a8a2d2709..5a684516a74 100644 --- a/plugins/modules/keycloak_client_rolescope.py +++ b/plugins/modules/keycloak_client_rolescope.py @@ -147,8 +147,12 @@ ] """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -161,19 +165,18 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - client_id=dict(type='str', required=True), - client_scope_id=dict(type='str'), - realm=dict(type='str', default='master'), - role_names=dict(type='list', elements='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), + client_id=dict(type="str", required=True), + client_scope_id=dict(type="str"), + realm=dict(type="str", default="master"), + role_names=dict(type="list", elements="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - result = dict(changed=False, msg='', diff={}, end_state={}) + result = dict(changed=False, msg="", diff={}, end_state={}) # Obtain access token, initialize API try: @@ -183,11 +186,11 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - clientid = module.params.get('client_id') - client_scope_id = module.params.get('client_scope_id') - role_names = module.params.get('role_names') - state = module.params.get('state') + realm = module.params.get("realm") + clientid = module.params.get("client_id") + client_scope_id = module.params.get("client_scope_id") + role_names = module.params.get("role_names") + state = module.params.get("state") objRealm = kc.get_realm_by_id(realm) if not objRealm: @@ -237,38 +240,46 @@ def main(): role_mapping_to_manipulate.append(role_mapping_by_name[role_name]) del role_mapping_by_name[role_name] - before_role_mapping = sorted(before_role_mapping, key=lambda d: d['name']) - desired_role_mapping = sorted(role_mapping_by_name.values(), key=lambda d: d['name']) + before_role_mapping = sorted(before_role_mapping, key=lambda d: d["name"]) + desired_role_mapping = sorted(role_mapping_by_name.values(), key=lambda d: d["name"]) - result['changed'] = len(role_mapping_to_manipulate) > 0 + result["changed"] = len(role_mapping_to_manipulate) > 0 - if result['changed']: - result['diff'] = dict(before=before_role_mapping, after=desired_role_mapping) + if result["changed"]: + result["diff"] = dict(before=before_role_mapping, after=desired_role_mapping) - if not result['changed']: + if not result["changed"]: # no changes - result['end_state'] = before_role_mapping - result['msg'] = f"No changes required for client role scope {clientid}." + result["end_state"] = before_role_mapping + result["msg"] = f"No changes required for client role scope {clientid}." elif state == "present": # doing update if module.check_mode: - result['end_state'] = desired_role_mapping + result["end_state"] = desired_role_mapping elif client_scope_id: - result['end_state'] = kc.update_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + result["end_state"] = kc.update_client_role_scope_from_client( + role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm + ) else: - result['end_state'] = kc.update_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) - result['msg'] = f"Client role scope for {clientid} has been updated" + result["end_state"] = kc.update_client_role_scope_from_realm( + role_mapping_to_manipulate, objClient["id"], realm + ) + result["msg"] = f"Client role scope for {clientid} has been updated" else: # doing delete if module.check_mode: - result['end_state'] = desired_role_mapping + result["end_state"] = desired_role_mapping elif client_scope_id: - result['end_state'] = kc.delete_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + result["end_state"] = kc.delete_client_role_scope_from_client( + role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm + ) else: - result['end_state'] = kc.delete_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) - result['msg'] = f"Client role scope for {clientid} has been deleted" + result["end_state"] = kc.delete_client_role_scope_from_realm( + role_mapping_to_manipulate, objClient["id"], realm + ) + result["msg"] = f"Client role scope for {clientid} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py index c434658713c..61763886a97 100644 --- a/plugins/modules/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -288,13 +288,19 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, + is_struct_included, +) from ansible.module_utils.basic import AnsibleModule def normalise_cr(clientscoperep, remove_ids=False): - """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + """Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the the change detection is more effective. :param clientscoperep: the clientscoperep dict to be sanitized @@ -305,30 +311,32 @@ def normalise_cr(clientscoperep, remove_ids=False): # Avoid the dict passed in to be modified clientscoperep = clientscoperep.copy() - if 'protocolMappers' in clientscoperep: - clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) - for mapper in clientscoperep['protocolMappers']: + if "protocolMappers" in clientscoperep: + clientscoperep["protocolMappers"] = sorted( + clientscoperep["protocolMappers"], key=lambda x: (x.get("name"), x.get("protocol"), x.get("protocolMapper")) + ) + for mapper in clientscoperep["protocolMappers"]: if remove_ids: - mapper.pop('id', None) + mapper.pop("id", None) # Set to a default value. - mapper['consentRequired'] = mapper.get('consentRequired', False) + mapper["consentRequired"] = mapper.get("consentRequired", False) return clientscoperep def sanitize_cr(clientscoperep): - """ Removes probably sensitive details from a clientscoperep representation. + """Removes probably sensitive details from a clientscoperep representation. :param clientscoperep: the clientscoperep dict to be sanitized :return: sanitized clientrep dict """ result = clientscoperep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' + if "secret" in result: + result["secret"] = "no_log" + if "attributes" in result: + if "saml.signing.private.key" in result["attributes"]: + result["attributes"]["saml.signing.private.key"] = "no_log" return normalise_cr(result) @@ -341,35 +349,40 @@ def main(): argument_spec = keycloak_argument_spec() protmapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), + id=dict(type="str"), + name=dict(type="str"), + protocol=dict(type="str", choices=["openid-connect", "saml", "wsfed", "docker-v2"]), + protocolMapper=dict(type="str"), + config=dict(type="dict"), ) meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), - attributes=dict(type='dict'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(default="master"), + id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + protocol=dict(type="str", choices=["openid-connect", "saml", "wsfed", "docker-v2"]), + attributes=dict(type="dict"), + protocol_mappers=dict(type="list", elements="dict", options=protmapper_spec, aliases=["protocolMappers"]), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["id", "name"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -379,16 +392,18 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') - name = module.params.get('name') - protocol_mappers = module.params.get('protocol_mappers') + realm = module.params.get("realm") + state = module.params.get("state") + cid = module.params.get("id") + name = module.params.get("name") + protocol_mappers = module.params.get("protocol_mappers") # Filter and map the parameters names that apply to the client scope - clientscope_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] + clientscope_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm"] and module.params.get(x) is not None + ] # See if it already exists in Keycloak if cid is None: @@ -407,7 +422,7 @@ def main(): # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified - if clientscope_param == 'protocol_mappers': + if clientscope_param == "protocol_mappers": new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] changeset[camel(clientscope_param)] = new_param_value @@ -417,23 +432,23 @@ def main(): # Cater for when it doesn't exist (an empty dict) if not before_clientscope: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Clientscope does not exist; doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Clientscope does not exist; doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if name is None: - module.fail_json(msg='name must be specified when creating a new clientscope') + module.fail_json(msg="name must be specified when creating a new clientscope") if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope)) + result["diff"] = dict(before="", after=sanitize_cr(desired_clientscope)) if module.check_mode: module.exit_json(**result) @@ -442,37 +457,36 @@ def main(): kc.create_clientscope(desired_clientscope, realm=realm) after_clientscope = kc.get_clientscope_by_name(name, realm) - result['end_state'] = sanitize_cr(after_clientscope) + result["end_state"] = sanitize_cr(after_clientscope) - result['msg'] = f"Clientscope {after_clientscope['name']} has been created with ID {after_clientscope['id']}" + result["msg"] = f"Clientscope {after_clientscope['name']} has been created with ID {after_clientscope['id']}" else: - if state == 'present': + if state == "present": # Process an update # no changes # remove ids for compare, problematic if desired has no ids set (not required), # normalize for consentRequired in protocolMappers if normalise_cr(desired_clientscope, remove_ids=True) == normalise_cr(before_clientscope, remove_ids=True): - result['changed'] = False - result['end_state'] = sanitize_cr(desired_clientscope) - result['msg'] = f"No changes required to clientscope {before_clientscope['name']}." + result["changed"] = False + result["end_state"] = sanitize_cr(desired_clientscope) + result["msg"] = f"No changes required to clientscope {before_clientscope['name']}." module.exit_json(**result) # doing an update - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) + result["diff"] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) if module.check_mode: # We can only compare the current clientscope with the proposed updates we have before_norm = normalise_cr(before_clientscope, remove_ids=True) desired_norm = normalise_cr(desired_clientscope, remove_ids=True) if module._diff: - result['diff'] = dict(before=sanitize_cr(before_norm), - after=sanitize_cr(desired_norm)) - result['changed'] = not is_struct_included(desired_norm, before_norm) + result["diff"] = dict(before=sanitize_cr(before_norm), after=sanitize_cr(desired_norm)) + result["changed"] = not is_struct_included(desired_norm, before_norm) module.exit_json(**result) # do the update @@ -482,41 +496,43 @@ def main(): if protocol_mappers is not None: for protocol_mapper in protocol_mappers: # update if protocolmapper exist - current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm) + current_protocolmapper = kc.get_clientscope_protocolmapper_by_name( + desired_clientscope["id"], protocol_mapper["name"], realm=realm + ) if current_protocolmapper is not None: - protocol_mapper['id'] = current_protocolmapper['id'] - kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm) + protocol_mapper["id"] = current_protocolmapper["id"] + kc.update_clientscope_protocolmappers(desired_clientscope["id"], protocol_mapper, realm=realm) # create otherwise else: - kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm) + kc.create_clientscope_protocolmapper(desired_clientscope["id"], protocol_mapper, realm=realm) - after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm) + after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope["id"], realm=realm) - result['end_state'] = after_clientscope + result["end_state"] = after_clientscope - result['msg'] = f"Clientscope {after_clientscope['id']} has been updated" + result["msg"] = f"Clientscope {after_clientscope['id']} has been updated" module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') + result["diff"] = dict(before=sanitize_cr(before_clientscope), after="") if module.check_mode: module.exit_json(**result) # delete it - cid = before_clientscope['id'] + cid = before_clientscope["id"] kc.delete_clientscope(cid=cid, realm=realm) - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Clientscope {before_clientscope['name']} has been deleted" + result["msg"] = f"Clientscope {before_clientscope['name']} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py index e979d123ab6..2032217d5c4 100644 --- a/plugins/modules/keycloak_clientscope_type.py +++ b/plugins/modules/keycloak_clientscope_type.py @@ -139,10 +139,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, KeycloakError, get_token) + KeycloakAPI, + KeycloakError, + get_token, +) -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ - keycloak_argument_spec +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import keycloak_argument_spec def keycloak_clientscope_type_module(): @@ -154,10 +156,10 @@ def keycloak_clientscope_type_module(): argument_spec = keycloak_argument_spec() meta_args = dict( - realm=dict(default='master'), - client_id=dict(type='str', aliases=['clientId']), - default_clientscopes=dict(type='list', elements='str'), - optional_clientscopes=dict(type='list', elements='str'), + realm=dict(default="master"), + client_id=dict(type="str", aliases=["clientId"]), + default_clientscopes=dict(type="list", elements="str"), + optional_clientscopes=dict(type="list", elements="str"), ) argument_spec.update(meta_args) @@ -165,17 +167,15 @@ def keycloak_clientscope_type_module(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([ - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], - ['default_clientscopes', 'optional_clientscopes'] - ]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - mutually_exclusive=[ - ['token', 'auth_realm'], - ['token', 'auth_username'], - ['token', 'auth_password'] - ], + required_one_of=( + [ + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ["default_clientscopes", "optional_clientscopes"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + mutually_exclusive=[["token", "auth_realm"], ["token", "auth_username"], ["token", "auth_password"]], ) return module @@ -183,32 +183,32 @@ def keycloak_clientscope_type_module(): def clientscopes_to_add(existing, proposed): to_add = [] - existing_clientscope_ids = extract_field(existing, 'id') + existing_clientscope_ids = extract_field(existing, "id") for clientscope in proposed: - if not clientscope['id'] in existing_clientscope_ids: + if not clientscope["id"] in existing_clientscope_ids: to_add.append(clientscope) return to_add def clientscopes_to_delete(existing, proposed): to_delete = [] - proposed_clientscope_ids = extract_field(proposed, 'id') + proposed_clientscope_ids = extract_field(proposed, "id") for clientscope in existing: - if not clientscope['id'] in proposed_clientscope_ids: + if not clientscope["id"] in proposed_clientscope_ids: to_delete.append(clientscope) return to_delete -def extract_field(dictionary, field='name'): +def extract_field(dictionary, field="name"): return [cs[field] for cs in dictionary] def normalize_scopes(scopes): scopes_copy = scopes.copy() - if isinstance(scopes_copy.get('default_clientscopes'), list): - scopes_copy['default_clientscopes'] = sorted(scopes_copy['default_clientscopes']) - if isinstance(scopes_copy.get('optional_clientscopes'), list): - scopes_copy['optional_clientscopes'] = sorted(scopes_copy['optional_clientscopes']) + if isinstance(scopes_copy.get("default_clientscopes"), list): + scopes_copy["default_clientscopes"] = sorted(scopes_copy["default_clientscopes"]) + if isinstance(scopes_copy.get("optional_clientscopes"), list): + scopes_copy["optional_clientscopes"] = sorted(scopes_copy["optional_clientscopes"]) return scopes_copy @@ -229,12 +229,12 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - client_id = module.params.get('client_id') - default_clientscopes = module.params.get('default_clientscopes') - optional_clientscopes = module.params.get('optional_clientscopes') + realm = module.params.get("realm") + client_id = module.params.get("client_id") + default_clientscopes = module.params.get("default_clientscopes") + optional_clientscopes = module.params.get("optional_clientscopes") - result = dict(changed=False, msg='', proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", proposed={}, existing={}, end_state={}) all_clientscopes = kc.get_clientscopes(realm) default_clientscopes_real = [] @@ -247,26 +247,30 @@ def main(): optional_clientscopes_real.append(client_scope) if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes): - module.fail_json(msg='At least one of the default_clientscopes does not exist!') + module.fail_json(msg="At least one of the default_clientscopes does not exist!") if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes): - module.fail_json(msg='At least one of the optional_clientscopes does not exist!') + module.fail_json(msg="At least one of the optional_clientscopes does not exist!") - result['proposed'].update({ - 'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes, - 'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes - }) + result["proposed"].update( + { + "default_clientscopes": "no-change" if default_clientscopes is None else default_clientscopes, + "optional_clientscopes": "no-change" if optional_clientscopes is None else optional_clientscopes, + } + ) default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id) optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id) - result['existing'].update({ - 'default_clientscopes': extract_field(default_clientscopes_existing), - 'optional_clientscopes': extract_field(optional_clientscopes_existing) - }) + result["existing"].update( + { + "default_clientscopes": extract_field(default_clientscopes_existing), + "optional_clientscopes": extract_field(optional_clientscopes_existing), + } + ) if module._diff: - result['diff'] = dict(before=normalize_scopes(result['existing']), after=normalize_scopes(result['proposed'])) + result["diff"] = dict(before=normalize_scopes(result["existing"]), after=normalize_scopes(result["proposed"])) default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) @@ -274,31 +278,39 @@ def main(): default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) - result["changed"] = any(len(x) > 0 for x in [ - default_clientscopes_add, optional_clientscopes_add, default_clientscopes_delete, optional_clientscopes_delete - ]) + result["changed"] = any( + len(x) > 0 + for x in [ + default_clientscopes_add, + optional_clientscopes_add, + default_clientscopes_delete, + optional_clientscopes_delete, + ] + ) if module.check_mode: module.exit_json(**result) # first delete so clientscopes can change type for clientscope in default_clientscopes_delete: - kc.delete_default_clientscope(clientscope['id'], realm, client_id) + kc.delete_default_clientscope(clientscope["id"], realm, client_id) for clientscope in optional_clientscopes_delete: - kc.delete_optional_clientscope(clientscope['id'], realm, client_id) + kc.delete_optional_clientscope(clientscope["id"], realm, client_id) for clientscope in default_clientscopes_add: - kc.add_default_clientscope(clientscope['id'], realm, client_id) + kc.add_default_clientscope(clientscope["id"], realm, client_id) for clientscope in optional_clientscopes_add: - kc.add_optional_clientscope(clientscope['id'], realm, client_id) + kc.add_optional_clientscope(clientscope["id"], realm, client_id) - result['end_state'].update({ - 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), - 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) - }) + result["end_state"].update( + { + "default_clientscopes": extract_field(kc.get_default_clientscopes(realm, client_id)), + "optional_clientscopes": extract_field(kc.get_optional_clientscopes(realm, client_id)), + } + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py index 91416b30be2..427ce66427f 100644 --- a/plugins/modules/keycloak_clientsecret_info.py +++ b/plugins/modules/keycloak_clientsecret_info.py @@ -128,9 +128,14 @@ """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, KeycloakError, get_token) + KeycloakAPI, + KeycloakError, + get_token, +) from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( - keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + keycloak_clientsecret_module, + keycloak_clientsecret_module_resolve_params, +) def main(): @@ -154,13 +159,10 @@ def main(): clientsecret = kc.get_clientsecret(id=id, realm=realm) - result = { - 'clientsecret_info': clientsecret, - 'msg': f'Get client secret successful for ID {id}' - } + result = {"clientsecret_info": clientsecret, "msg": f"Get client secret successful for ID {id}"} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py index 38c5e60dbc9..c9d180efb55 100644 --- a/plugins/modules/keycloak_clientsecret_regenerate.py +++ b/plugins/modules/keycloak_clientsecret_regenerate.py @@ -131,9 +131,14 @@ """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, KeycloakError, get_token) + KeycloakAPI, + KeycloakError, + get_token, +) from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( - keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + keycloak_clientsecret_module, + keycloak_clientsecret_module_resolve_params, +) def main(): @@ -157,20 +162,17 @@ def main(): if module.check_mode: dummy_result = { - "msg": 'No action taken while in check mode', - "end_state": {'type': 'secret', 'value': 'X' * 32} + "msg": "No action taken while in check mode", + "end_state": {"type": "secret", "value": "X" * 32}, } module.exit_json(**dummy_result) # Create new secret clientsecret = kc.create_clientsecret(id=id, realm=realm) - result = { - "msg": f'New client secret has been generated for ID {id}', - "end_state": clientsecret - } + result = {"msg": f"New client secret has been generated for ID {id}", "end_state": clientsecret} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py index 138ac8a9b4a..64074e16ff8 100644 --- a/plugins/modules/keycloak_clienttemplate.py +++ b/plugins/modules/keycloak_clienttemplate.py @@ -265,8 +265,13 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -279,39 +284,43 @@ def main(): argument_spec = keycloak_argument_spec() protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), + consentRequired=dict(type="bool"), + consentText=dict(type="str"), + id=dict(type="str"), + name=dict(type="str"), + protocol=dict(type="str", choices=["openid-connect", "saml", "docker-v2"]), + protocolMapper=dict(type="str"), + config=dict(type="dict"), ) meta_args = dict( - realm=dict(type='str', default='master'), - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool'), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), + realm=dict(type="str", default="master"), + state=dict(default="present", choices=["present", "absent"]), + id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + protocol=dict(type="str", choices=["openid-connect", "saml", "docker-v2"]), + attributes=dict(type="dict"), + full_scope_allowed=dict(type="bool"), + protocol_mappers=dict(type="list", elements="dict", options=protmapper_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["id", "name"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -321,28 +330,41 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('id') + realm = module.params.get("realm") + state = module.params.get("state") + cid = module.params.get("id") # Filter and map the parameters names that apply to the client template - clientt_params = [x for x in module.params - if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', - 'auth_client_secret', 'auth_username', 'auth_password', - 'validate_certs', 'realm'] and module.params.get(x) is not None] + clientt_params = [ + x + for x in module.params + if x + not in [ + "state", + "auth_keycloak_url", + "auth_client_id", + "auth_realm", + "auth_client_secret", + "auth_username", + "auth_password", + "validate_certs", + "realm", + ] + and module.params.get(x) is not None + ] # See if it already exists in Keycloak if cid is None: - before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) + before_clientt = kc.get_client_template_by_name(module.params.get("name"), realm=realm) if before_clientt is not None: - cid = before_clientt['id'] + cid = before_clientt["id"] else: before_clientt = kc.get_client_template_by_id(cid, realm=realm) if before_clientt is None: before_clientt = {} - result['existing'] = before_clientt + result["existing"] = before_clientt # Build a proposed changeset from parameters given to this module changeset = {} @@ -361,50 +383,49 @@ def main(): desired_clientt = before_clientt.copy() desired_clientt.update(changeset) - result['proposed'] = changeset + result["proposed"] = changeset # Cater for when it doesn't exist (an empty dict) if not before_clientt: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client template does not exist, doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Client template does not exist, doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True - if 'name' not in desired_clientt: - module.fail_json(msg='name needs to be specified when creating a new client') + if "name" not in desired_clientt: + module.fail_json(msg="name needs to be specified when creating a new client") if module._diff: - result['diff'] = dict(before='', after=desired_clientt) + result["diff"] = dict(before="", after=desired_clientt) if module.check_mode: module.exit_json(**result) # create it kc.create_client_template(desired_clientt, realm=realm) - after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm) + after_clientt = kc.get_client_template_by_name(desired_clientt["name"], realm=realm) - result['end_state'] = after_clientt + result["end_state"] = after_clientt - result['msg'] = f"Client template {desired_clientt['name']} has been created." + result["msg"] = f"Client template {desired_clientt['name']} has been created." module.exit_json(**result) else: - if state == 'present': + if state == "present": # Process an update - result['changed'] = True + result["changed"] = True if module.check_mode: # We can only compare the current client template with the proposed updates we have if module._diff: - result['diff'] = dict(before=before_clientt, - after=desired_clientt) + result["diff"] = dict(before=before_clientt, after=desired_clientt) module.exit_json(**result) @@ -413,36 +434,36 @@ def main(): after_clientt = kc.get_client_template_by_id(cid, realm=realm) if before_clientt == after_clientt: - result['changed'] = False + result["changed"] = False - result['end_state'] = after_clientt + result["end_state"] = after_clientt if module._diff: - result['diff'] = dict(before=before_clientt, after=after_clientt) + result["diff"] = dict(before=before_clientt, after=after_clientt) - result['msg'] = f"Client template {desired_clientt['name']} has been updated." + result["msg"] = f"Client template {desired_clientt['name']} has been updated." module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_clientt, after='') + result["diff"] = dict(before=before_clientt, after="") if module.check_mode: module.exit_json(**result) # delete it kc.delete_client_template(cid, realm=realm) - result['proposed'] = {} + result["proposed"] = {} - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Client template {before_clientt['name']} has been deleted." + result["msg"] = f"Client template {before_clientt['name']} has been deleted." module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py index 37ba1c8dd70..68ca826271a 100644 --- a/plugins/modules/keycloak_component.py +++ b/plugins/modules/keycloak_component.py @@ -128,8 +128,13 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from urllib.parse import urlencode from copy import deepcopy @@ -139,31 +144,34 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - parent_id=dict(type='str', required=True), - provider_id=dict(type='str', required=True), - provider_type=dict(type='str', required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + parent_id=dict(type="str", required=True), + provider_id=dict(type="str", required=True), + provider_type=dict(type="str", required=True), config=dict( - type='dict', - ) + type="dict", + ), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + result = dict(changed=False, msg="", end_state={}, diff=dict(before={}, after={})) # This will include the current state of the component if it is already # present. This is only used for diff-mode. before_component = {} - before_component['config'] = {} + before_component["config"] = {} # Obtain access token, initialize API try: @@ -176,15 +184,13 @@ def main(): params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "parent_id"] # Filter and map the parameters names that apply to the role - component_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] + component_params = [x for x in module.params if x not in params_to_ignore and module.params.get(x) is not None] provider_type = module.params.get("provider_type") # Build a proposed changeset from parameters given to this module changeset = {} - changeset['config'] = {} + changeset["config"] = {} # Generate a JSON payload for Keycloak Admin API from the module # parameters. Parameters that do not belong to the JSON payload (e.g. @@ -200,16 +206,16 @@ def main(): # by Keycloak. # for component_param in component_params: - if component_param == 'config': - for config_param in module.params.get('config'): - changeset['config'][camel(config_param)] = [] - raw_value = module.params.get('config')[config_param] + if component_param == "config": + for config_param in module.params.get("config"): + changeset["config"][camel(config_param)] = [] + raw_value = module.params.get("config")[config_param] if isinstance(raw_value, bool): value = str(raw_value).lower() else: value = str(raw_value) - changeset['config'][camel(config_param)].append(value) + changeset["config"][camel(config_param)].append(value) else: # No need for camelcase in here as these are one word parameters new_param_value = module.params.get(component_param) @@ -220,13 +226,13 @@ def main(): changeset_copy = deepcopy(changeset) # Make it easier to refer to current module parameters - name = module.params.get('name') - force = module.params.get('force') - state = module.params.get('state') - enabled = module.params.get('enabled') - provider_id = module.params.get('provider_id') - provider_type = module.params.get('provider_type') - parent_id = module.params.get('parent_id') + name = module.params.get("name") + force = module.params.get("force") + state = module.params.get("state") + enabled = module.params.get("enabled") + provider_id = module.params.get("provider_id") + provider_type = module.params.get("provider_type") + parent_id = module.params.get("parent_id") # Get a list of all Keycloak components that are of keyprovider type. current_components = kc.get_components(urlencode(dict(type=provider_type)), parent_id) @@ -239,84 +245,84 @@ def main(): changes = "" # This tells Ansible whether the key was changed (added, removed, modified) - result['changed'] = False + result["changed"] = False # Loop through the list of components. If we encounter a component whose # name matches the value of the name parameter then assume the key is # already present. for component in current_components: - if component['name'] == name: - component_id = component['id'] - changeset['id'] = component_id - changeset_copy['id'] = component_id + if component["name"] == name: + component_id = component["id"] + changeset["id"] = component_id + changeset_copy["id"] = component_id # Compare top-level parameters for param, value in changeset.items(): before_component[param] = component[param] - if changeset_copy[param] != component[param] and param != 'config': + if changeset_copy[param] != component[param] and param != "config": changes += f"{param}: {component[param]} -> {changeset_copy[param]}, " - result['changed'] = True + result["changed"] = True # Compare parameters under the "config" key - for p, v in changeset_copy['config'].items(): + for p, v in changeset_copy["config"].items(): try: - before_component['config'][p] = component['config'][p] or [] + before_component["config"][p] = component["config"][p] or [] except KeyError: - before_component['config'][p] = [] - if changeset_copy['config'][p] != component['config'][p]: + before_component["config"][p] = [] + if changeset_copy["config"][p] != component["config"][p]: changes += f"config.{p}: {component['config'][p]} -> {changeset_copy['config'][p]}, " - result['changed'] = True + result["changed"] = True # Check all the possible states of the resource and do what is needed to # converge current state with desired state (create, update or delete # the key). - if component_id and state == 'present': - if result['changed']: + if component_id and state == "present": + if result["changed"]: if module._diff: - result['diff'] = dict(before=before_component, after=changeset_copy) + result["diff"] = dict(before=before_component, after=changeset_copy) if module.check_mode: - result['msg'] = f"Component {name} would be changed: {changes.strip(', ')}" + result["msg"] = f"Component {name} would be changed: {changes.strip(', ')}" else: kc.update_component(changeset, parent_id) - result['msg'] = f"Component {name} changed: {changes.strip(', ')}" + result["msg"] = f"Component {name} changed: {changes.strip(', ')}" else: - result['msg'] = f"Component {name} was in sync" + result["msg"] = f"Component {name} was in sync" - result['end_state'] = changeset_copy - elif component_id and state == 'absent': + result["end_state"] = changeset_copy + elif component_id and state == "absent": if module._diff: - result['diff'] = dict(before=before_component, after={}) + result["diff"] = dict(before=before_component, after={}) if module.check_mode: - result['changed'] = True - result['msg'] = f"Component {name} would be deleted" + result["changed"] = True + result["msg"] = f"Component {name} would be deleted" else: kc.delete_component(component_id, parent_id) - result['changed'] = True - result['msg'] = f"Component {name} deleted" + result["changed"] = True + result["msg"] = f"Component {name} deleted" - result['end_state'] = {} - elif not component_id and state == 'present': + result["end_state"] = {} + elif not component_id and state == "present": if module._diff: - result['diff'] = dict(before={}, after=changeset_copy) + result["diff"] = dict(before={}, after=changeset_copy) if module.check_mode: - result['changed'] = True - result['msg'] = f"Component {name} would be created" + result["changed"] = True + result["msg"] = f"Component {name} would be created" else: kc.create_component(changeset, parent_id) - result['changed'] = True - result['msg'] = f"Component {name} created" + result["changed"] = True + result["msg"] = f"Component {name} created" - result['end_state'] = changeset_copy - elif not component_id and state == 'absent': - result['changed'] = False - result['msg'] = f"Component {name} not present" - result['end_state'] = {} + result["end_state"] = changeset_copy + elif not component_id and state == "absent": + result["changed"] = False + result["msg"] = f"Component {name} not present" + result["end_state"] = {} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py index a0791f39092..43512eae3c5 100644 --- a/plugins/modules/keycloak_component_info.py +++ b/plugins/modules/keycloak_component_info.py @@ -99,8 +99,12 @@ elements: dict """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from urllib.parse import quote @@ -114,16 +118,15 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - name=dict(type='str'), - realm=dict(type='str', required=True), - parent_id=dict(type='str'), - provider_type=dict(type='str'), + name=dict(type="str"), + realm=dict(type="str", required=True), + parent_id=dict(type="str"), + provider_type=dict(type="str"), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = dict(changed=False, components=[]) @@ -135,10 +138,10 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - parentId = module.params.get('parent_id') - name = module.params.get('name') - providerType = module.params.get('provider_type') + realm = module.params.get("realm") + parentId = module.params.get("parent_id") + name = module.params.get("name") + providerType = module.params.get("provider_type") objRealm = kc.get_realm_by_id(realm) if not objRealm: @@ -156,10 +159,10 @@ def main(): if providerType: filters.append(f"type={quote(providerType, safe='')}") - result['components'] = kc.get_components(filter="&".join(filters), realm=realm) + result["components"] = kc.get_components(filter="&".join(filters), realm=realm) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py index 017c29b4a9d..583c6606876 100644 --- a/plugins/modules/keycloak_group.py +++ b/plugins/modules/keycloak_group.py @@ -297,8 +297,13 @@ view: true """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -311,31 +316,34 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - id=dict(type='str'), - name=dict(type='str'), - attributes=dict(type='dict'), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(default="master"), + id=dict(type="str"), + name=dict(type="str"), + attributes=dict(type="dict"), parents=dict( - type='list', elements='dict', - options=dict( - id=dict(type='str'), - name=dict(type='str') - ), + type="list", + elements="dict", + options=dict(id=dict(type="str"), name=dict(type="str")), ), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["id", "name"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, group='') + result = dict(changed=False, msg="", diff={}, group="") # Obtain access token, initialize API try: @@ -345,25 +353,28 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - gid = module.params.get('id') - name = module.params.get('name') - attributes = module.params.get('attributes') + realm = module.params.get("realm") + state = module.params.get("state") + gid = module.params.get("id") + name = module.params.get("name") + attributes = module.params.get("attributes") - parents = module.params.get('parents') + parents = module.params.get("parents") # attributes in Keycloak have their values returned as lists # using the API. attributes is a dict, so we'll transparently convert # the values to lists. if attributes is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val + for key, val in module.params["attributes"].items(): + module.params["attributes"][key] = [val] if not isinstance(val, list) else val # Filter and map the parameters names that apply to the group - group_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'parents'] and - module.params.get(x) is not None] + group_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm", "parents"] + and module.params.get(x) is not None + ] # See if it already exists in Keycloak if gid is None: @@ -389,23 +400,23 @@ def main(): # Cater for when it doesn't exist (an empty dict) if not before_group: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Group does not exist; doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Group does not exist; doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if name is None: - module.fail_json(msg='name must be specified when creating a new group') + module.fail_json(msg="name must be specified when creating a new group") if module._diff: - result['diff'] = dict(before='', after=desired_group) + result["diff"] = dict(before="", after=desired_group) if module.check_mode: module.exit_json(**result) @@ -420,27 +431,27 @@ def main(): after_group = kc.get_group_by_name(name, realm, parents=parents) - result['end_state'] = after_group + result["end_state"] = after_group - result['msg'] = f"Group {after_group['name']} has been created with ID {after_group['id']}" + result["msg"] = f"Group {after_group['name']} has been created with ID {after_group['id']}" module.exit_json(**result) else: - if state == 'present': + if state == "present": # Process an update # no changes if desired_group == before_group: - result['changed'] = False - result['end_state'] = desired_group - result['msg'] = f"No changes required to group {before_group['name']}." + result["changed"] = False + result["end_state"] = desired_group + result["msg"] = f"No changes required to group {before_group['name']}." module.exit_json(**result) # doing an update - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_group, after=desired_group) + result["diff"] = dict(before=before_group, after=desired_group) if module.check_mode: module.exit_json(**result) @@ -448,33 +459,33 @@ def main(): # do the update kc.update_group(desired_group, realm=realm) - after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) + after_group = kc.get_group_by_groupid(desired_group["id"], realm=realm) - result['end_state'] = after_group + result["end_state"] = after_group - result['msg'] = f"Group {after_group['id']} has been updated" + result["msg"] = f"Group {after_group['id']} has been updated" module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_group, after='') + result["diff"] = dict(before=before_group, after="") if module.check_mode: module.exit_json(**result) # delete it - gid = before_group['id'] + gid = before_group["id"] kc.delete_group(groupid=gid, realm=realm) - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Group {before_group['name']} has been deleted" + result["msg"] = f"Group {before_group['name']} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py index dc3ba329fb2..aa6ca0a1b19 100644 --- a/plugins/modules/keycloak_identity_provider.py +++ b/plugins/modules/keycloak_identity_provider.py @@ -454,33 +454,38 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from copy import deepcopy def sanitize(idp): idpcopy = deepcopy(idp) - if 'config' in idpcopy: - if 'clientSecret' in idpcopy['config']: - idpcopy['config']['clientSecret'] = '**********' + if "config" in idpcopy: + if "clientSecret" in idpcopy["config"]: + idpcopy["config"]["clientSecret"] = "**********" return idpcopy def get_identity_provider_with_mappers(kc, alias, realm): idp = kc.get_identity_provider(alias, realm) if idp is not None: - idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + idp["mappers"] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get("name")) # clientSecret returned by API when using `get_identity_provider(alias, realm)` is always ********** # to detect changes to the secret, we get the actual cleartext secret from the full realm info - if 'config' in idp: - if 'clientSecret' in idp['config']: - for idp_from_realm in kc.get_realm_by_id(realm).get('identityProviders', []): - if idp_from_realm['internalId'] == idp['internalId']: - cleartext_secret = idp_from_realm.get('config', {}).get('clientSecret') + if "config" in idp: + if "clientSecret" in idp["config"]: + for idp_from_realm in kc.get_realm_by_id(realm).get("identityProviders", []): + if idp_from_realm["internalId"] == idp["internalId"]: + cleartext_secret = idp_from_realm.get("config", {}).get("clientSecret") if cleartext_secret: - idp['config']['clientSecret'] = cleartext_secret + idp["config"]["clientSecret"] = cleartext_secret if idp is None: idp = {} return idp @@ -494,19 +499,21 @@ def fetch_identity_provider_wellknown_config(kc, config): :param config: Dictionary containing identity provider configuration, must include 'fromUrl' key to trigger fetch. :return: None. The config dict is updated in-place. """ - if config and 'fromUrl' in config : - if 'providerId' in config and config['providerId'] != 'oidc': + if config and "fromUrl" in config: + if "providerId" in config and config["providerId"] != "oidc": kc.module.fail_json(msg="Only 'oidc' provider_id is supported when using 'fromUrl'.") - endpoints = ['userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer', 'jwksUrl'] + endpoints = ["userInfoUrl", "authorizationUrl", "tokenUrl", "logoutUrl", "issuer", "jwksUrl"] if any(k in config for k in endpoints): - kc.module.fail_json(msg="Cannot specify both 'fromUrl' and 'userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer' or 'jwksUrl'.") + kc.module.fail_json( + msg="Cannot specify both 'fromUrl' and 'userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer' or 'jwksUrl'." + ) openIdConfig = kc.fetch_idp_endpoints_import_config_url( - fromUrl=config['fromUrl'], - realm=kc.module.params.get('realm', 'master')) + fromUrl=config["fromUrl"], realm=kc.module.params.get("realm", "master") + ) for k in endpoints: if k in openIdConfig: config[k] = openIdConfig[k] - del config['fromUrl'] + del config["fromUrl"] def main(): @@ -518,41 +525,44 @@ def main(): argument_spec = keycloak_argument_spec() mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - identityProviderAlias=dict(type='str'), - identityProviderMapper=dict(type='str'), - config=dict(type='dict'), + id=dict(type="str"), + name=dict(type="str"), + identityProviderAlias=dict(type="str"), + identityProviderMapper=dict(type="str"), + config=dict(type="dict"), ) meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - alias=dict(type='str', required=True), - add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), - authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), - config=dict(type='dict'), - display_name=dict(type='str', aliases=['displayName']), - enabled=dict(type='bool'), - first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), - link_only=dict(type='bool', aliases=['linkOnly']), - post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), - provider_id=dict(type='str', aliases=['providerId']), - store_token=dict(type='bool', aliases=['storeToken']), - trust_email=dict(type='bool', aliases=['trustEmail']), - mappers=dict(type='list', elements='dict', options=mapper_spec), + state=dict(type="str", default="present", choices=["present", "absent"]), + realm=dict(type="str", default="master"), + alias=dict(type="str", required=True), + add_read_token_role_on_create=dict(type="bool", aliases=["addReadTokenRoleOnCreate"]), + authenticate_by_default=dict(type="bool", aliases=["authenticateByDefault"]), + config=dict(type="dict"), + display_name=dict(type="str", aliases=["displayName"]), + enabled=dict(type="bool"), + first_broker_login_flow_alias=dict(type="str", aliases=["firstBrokerLoginFlowAlias"]), + link_only=dict(type="bool", aliases=["linkOnly"]), + post_broker_login_flow_alias=dict(type="str", aliases=["postBrokerLoginFlowAlias"]), + provider_id=dict(type="str", aliases=["providerId"]), + store_token=dict(type="bool", aliases=["storeToken"]), + trust_email=dict(type="bool", aliases=["trustEmail"]), + mappers=dict(type="list", elements="dict", options=mapper_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -562,17 +572,20 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - alias = module.params.get('alias') - state = module.params.get('state') - config = module.params.get('config') + realm = module.params.get("realm") + alias = module.params.get("alias") + state = module.params.get("state") + config = module.params.get("config") fetch_identity_provider_wellknown_config(kc, config) # Filter and map the parameters names that apply to the identity provider. - idp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] + idp_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm", "mappers"] + and module.params.get(x) is not None + ] # See if it already exists in Keycloak before_idp = get_identity_provider_with_mappers(kc, alias, realm) @@ -587,19 +600,19 @@ def main(): changeset[camel(param)] = new_param_value # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - for change in module.params['mappers']: + if module.params.get("mappers") is not None: + for change in module.params["mappers"]: change = {k: v for k, v in change.items() if v is not None} - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if change.get("id") is None and change.get("name") is None: + module.fail_json(msg="Either `name` or `id` has to be specified on each mapper.") if before_idp == dict(): old_mapper = dict() - elif change.get('id') is not None: - old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) + elif change.get("id") is not None: + old_mapper = kc.get_identity_provider_mapper(change["id"], alias, realm) if old_mapper is None: old_mapper = dict() else: - found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] + found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x["name"] == change["name"]] if len(found) == 1: old_mapper = found[0] else: @@ -607,111 +620,114 @@ def main(): new_mapper = old_mapper.copy() new_mapper.update(change) - if changeset.get('mappers') is None: - changeset['mappers'] = list() + if changeset.get("mappers") is None: + changeset["mappers"] = list() # eventually this holds all desired mappers, unchanged, modified and newly added - changeset['mappers'].append(new_mapper) + changeset["mappers"].append(new_mapper) # ensure idempotency in case module.params.mappers is not sorted by name - changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('id') if x.get('name') is None else x['name']) + changeset["mappers"] = sorted( + changeset["mappers"], key=lambda x: x.get("id") if x.get("name") is None else x["name"] + ) # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) desired_idp = before_idp.copy() desired_idp.update(changeset) - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_idp) + result["proposed"] = sanitize(changeset) + result["existing"] = sanitize(before_idp) # Cater for when it doesn't exist (an empty dict) if not before_idp: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Identity provider does not exist; doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Identity provider does not exist; doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_idp)) + result["diff"] = dict(before="", after=sanitize(desired_idp)) if module.check_mode: module.exit_json(**result) # create it desired_idp = desired_idp.copy() - mappers = desired_idp.pop('mappers', []) + mappers = desired_idp.pop("mappers", []) kc.create_identity_provider(desired_idp, realm) for mapper in mappers: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias + if mapper.get("identityProviderAlias") is None: + mapper["identityProviderAlias"] = alias kc.create_identity_provider_mapper(mapper, alias, realm) after_idp = get_identity_provider_with_mappers(kc, alias, realm) - result['end_state'] = sanitize(after_idp) + result["end_state"] = sanitize(after_idp) - result['msg'] = f'Identity provider {alias} has been created' + result["msg"] = f"Identity provider {alias} has been created" module.exit_json(**result) else: - if state == 'present': + if state == "present": # Process an update # no changes if desired_idp == before_idp: - result['changed'] = False - result['end_state'] = sanitize(desired_idp) - result['msg'] = f"No changes required to identity provider {alias}." + result["changed"] = False + result["end_state"] = sanitize(desired_idp) + result["msg"] = f"No changes required to identity provider {alias}." module.exit_json(**result) # doing an update - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) + result["diff"] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) if module.check_mode: module.exit_json(**result) # do the update desired_idp = desired_idp.copy() - updated_mappers = desired_idp.pop('mappers', []) - original_mappers = list(before_idp.get('mappers', [])) + updated_mappers = desired_idp.pop("mappers", []) + original_mappers = list(before_idp.get("mappers", [])) kc.update_identity_provider(desired_idp, realm) for mapper in updated_mappers: - if mapper.get('id') is not None: + if mapper.get("id") is not None: # only update existing if there is a change for i, orig in enumerate(original_mappers): - if mapper['id'] == orig['id']: + if mapper["id"] == orig["id"]: del original_mappers[i] if mapper != orig: kc.update_identity_provider_mapper(mapper, alias, realm) else: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias + if mapper.get("identityProviderAlias") is None: + mapper["identityProviderAlias"] = alias kc.create_identity_provider_mapper(mapper, alias, realm) - for mapper in [x for x in before_idp['mappers'] - if [y for y in updated_mappers if y["name"] == x['name']] == []]: - kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + for mapper in [ + x for x in before_idp["mappers"] if [y for y in updated_mappers if y["name"] == x["name"]] == [] + ]: + kc.delete_identity_provider_mapper(mapper["id"], alias, realm) after_idp = get_identity_provider_with_mappers(kc, alias, realm) - result['end_state'] = sanitize(after_idp) + result["end_state"] = sanitize(after_idp) - result['msg'] = f"Identity provider {alias} has been updated" + result["msg"] = f"Identity provider {alias} has been updated" module.exit_json(**result) - elif state == 'absent': + elif state == "absent": # Process a deletion - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after='') + result["diff"] = dict(before=sanitize(before_idp), after="") if module.check_mode: module.exit_json(**result) @@ -719,12 +735,12 @@ def main(): # delete it kc.delete_identity_provider(alias, realm) - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Identity provider {alias} has been deleted" + result["msg"] = f"Identity provider {alias} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py index 53546a4f928..48916ea8d44 100644 --- a/plugins/modules/keycloak_realm.py +++ b/plugins/modules/keycloak_realm.py @@ -795,13 +795,18 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule def normalise_cr(realmrep): - """ Re-sorts any properties where the order is important so that diff's is minimised and the change detection is more effective. + """Re-sorts any properties where the order is important so that diff's is minimised and the change detection is more effective. :param realmrep: the realmrep dict to be sanitized :return: normalised realmrep dict @@ -809,31 +814,31 @@ def normalise_cr(realmrep): # Avoid the dict passed in to be modified realmrep = realmrep.copy() - if 'enabledEventTypes' in realmrep: - realmrep['enabledEventTypes'] = list(sorted(realmrep['enabledEventTypes'])) + if "enabledEventTypes" in realmrep: + realmrep["enabledEventTypes"] = list(sorted(realmrep["enabledEventTypes"])) - if 'otpSupportedApplications' in realmrep: - realmrep['otpSupportedApplications'] = list(sorted(realmrep['otpSupportedApplications'])) + if "otpSupportedApplications" in realmrep: + realmrep["otpSupportedApplications"] = list(sorted(realmrep["otpSupportedApplications"])) - if 'supportedLocales' in realmrep: - realmrep['supportedLocales'] = list(sorted(realmrep['supportedLocales'])) + if "supportedLocales" in realmrep: + realmrep["supportedLocales"] = list(sorted(realmrep["supportedLocales"])) return realmrep def sanitize_cr(realmrep): - """ Removes probably sensitive details from a realm representation. + """Removes probably sensitive details from a realm representation. :param realmrep: the realmrep dict to be sanitized :return: sanitized realmrep dict """ result = realmrep.copy() - if 'secret' in result: - result['secret'] = '********' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes'] = result['attributes'].copy() - result['attributes']['saml.signing.private.key'] = '********' + if "secret" in result: + result["secret"] = "********" + if "attributes" in result: + if "saml.signing.private.key" in result["attributes"]: + result["attributes"] = result["attributes"].copy() + result["attributes"]["saml.signing.private.key"] = "********" return normalise_cr(result) @@ -846,142 +851,170 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - realm=dict(type='str'), - access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), - access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), - access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), - access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), - access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), - account_theme=dict(type='str', aliases=['accountTheme']), - action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), - action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), - admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), - admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), - admin_theme=dict(type='str', aliases=['adminTheme']), - attributes=dict(type='dict'), - browser_flow=dict(type='str', aliases=['browserFlow']), - browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), - brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), - brute_force_strategy=dict(type='str', choices=['LINEAR', 'MULTIPLE'], aliases=['bruteForceStrategy']), - client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), - client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), - default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']), - default_groups=dict(type='list', elements='str', aliases=['defaultGroups']), - default_locale=dict(type='str', aliases=['defaultLocale']), - default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), - direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), - display_name=dict(type='str', aliases=['displayName']), - display_name_html=dict(type='str', aliases=['displayNameHtml']), - docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), - duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), - edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), - email_theme=dict(type='str', aliases=['emailTheme']), - enabled=dict(type='bool'), - enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), - events_enabled=dict(type='bool', aliases=['eventsEnabled']), - events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), - failure_factor=dict(type='int', aliases=['failureFactor']), - internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), - login_theme=dict(type='str', aliases=['loginTheme']), - login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), - max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), - max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), - max_temporary_lockouts=dict(type='int', aliases=['maxTemporaryLockouts']), - minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), - not_before=dict(type='int', aliases=['notBefore']), - offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), - offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), - offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), - otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), - otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), - otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), - otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), - otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), - otp_policy_type=dict(type='str', aliases=['otpPolicyType']), - otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), - password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), - organizations_enabled=dict(type='bool', aliases=['organizationsEnabled']), - permanent_lockout=dict(type='bool', aliases=['permanentLockout']), - quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), - refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), - registration_allowed=dict(type='bool', aliases=['registrationAllowed']), - registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), - registration_flow=dict(type='str', aliases=['registrationFlow']), - remember_me=dict(type='bool', aliases=['rememberMe']), - reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), - revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), - smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), - sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), - sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), - sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), - sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), - supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), - user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), - verify_email=dict(type='bool', aliases=['verifyEmail']), - wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), - client_session_idle_timeout=dict(type='int', aliases=['clientSessionIdleTimeout']), - client_session_max_lifespan=dict(type='int', aliases=['clientSessionMaxLifespan']), - client_offline_session_idle_timeout=dict(type='int', aliases=['clientOfflineSessionIdleTimeout']), - client_offline_session_max_lifespan=dict(type='int', aliases=['clientOfflineSessionMaxLifespan']), - oauth2_device_code_lifespan=dict(type='int', aliases=['oauth2DeviceCodeLifespan']), - oauth2_device_polling_interval=dict(type='int', aliases=['oauth2DevicePollingInterval']), - web_authn_policy_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyRpEntityName']), - web_authn_policy_signature_algorithms=dict(type='list', elements='str', aliases=['webAuthnPolicySignatureAlgorithms']), - web_authn_policy_rp_id=dict(type='str', aliases=['webAuthnPolicyRpId']), - web_authn_policy_attestation_conveyance_preference=dict(type='str', aliases=['webAuthnPolicyAttestationConveyancePreference']), - web_authn_policy_authenticator_attachment=dict(type='str', aliases=['webAuthnPolicyAuthenticatorAttachment']), - web_authn_policy_require_resident_key=dict(type='str', aliases=['webAuthnPolicyRequireResidentKey'], no_log=False), - web_authn_policy_user_verification_requirement=dict(type='str', aliases=['webAuthnPolicyUserVerificationRequirement']), - web_authn_policy_create_timeout=dict(type='int', aliases=['webAuthnPolicyCreateTimeout']), - web_authn_policy_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyAvoidSameAuthenticatorRegister']), - web_authn_policy_acceptable_aaguids=dict(type='list', elements='str', aliases=['webAuthnPolicyAcceptableAaguids']), - web_authn_policy_extra_origins=dict(type='list', elements='str', aliases=['webAuthnPolicyExtraOrigins']), - web_authn_policy_passwordless_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpEntityName']), + state=dict(default="present", choices=["present", "absent"]), + id=dict(type="str"), + realm=dict(type="str"), + access_code_lifespan=dict(type="int", aliases=["accessCodeLifespan"]), + access_code_lifespan_login=dict(type="int", aliases=["accessCodeLifespanLogin"]), + access_code_lifespan_user_action=dict(type="int", aliases=["accessCodeLifespanUserAction"]), + access_token_lifespan=dict(type="int", aliases=["accessTokenLifespan"], no_log=False), + access_token_lifespan_for_implicit_flow=dict( + type="int", aliases=["accessTokenLifespanForImplicitFlow"], no_log=False + ), + account_theme=dict(type="str", aliases=["accountTheme"]), + action_token_generated_by_admin_lifespan=dict( + type="int", aliases=["actionTokenGeneratedByAdminLifespan"], no_log=False + ), + action_token_generated_by_user_lifespan=dict( + type="int", aliases=["actionTokenGeneratedByUserLifespan"], no_log=False + ), + admin_events_details_enabled=dict(type="bool", aliases=["adminEventsDetailsEnabled"]), + admin_events_enabled=dict(type="bool", aliases=["adminEventsEnabled"]), + admin_theme=dict(type="str", aliases=["adminTheme"]), + attributes=dict(type="dict"), + browser_flow=dict(type="str", aliases=["browserFlow"]), + browser_security_headers=dict(type="dict", aliases=["browserSecurityHeaders"]), + brute_force_protected=dict(type="bool", aliases=["bruteForceProtected"]), + brute_force_strategy=dict(type="str", choices=["LINEAR", "MULTIPLE"], aliases=["bruteForceStrategy"]), + client_authentication_flow=dict(type="str", aliases=["clientAuthenticationFlow"]), + client_scope_mappings=dict(type="dict", aliases=["clientScopeMappings"]), + default_default_client_scopes=dict(type="list", elements="str", aliases=["defaultDefaultClientScopes"]), + default_groups=dict(type="list", elements="str", aliases=["defaultGroups"]), + default_locale=dict(type="str", aliases=["defaultLocale"]), + default_optional_client_scopes=dict(type="list", elements="str", aliases=["defaultOptionalClientScopes"]), + default_roles=dict(type="list", elements="str", aliases=["defaultRoles"]), + default_signature_algorithm=dict(type="str", aliases=["defaultSignatureAlgorithm"]), + direct_grant_flow=dict(type="str", aliases=["directGrantFlow"]), + display_name=dict(type="str", aliases=["displayName"]), + display_name_html=dict(type="str", aliases=["displayNameHtml"]), + docker_authentication_flow=dict(type="str", aliases=["dockerAuthenticationFlow"]), + duplicate_emails_allowed=dict(type="bool", aliases=["duplicateEmailsAllowed"]), + edit_username_allowed=dict(type="bool", aliases=["editUsernameAllowed"]), + email_theme=dict(type="str", aliases=["emailTheme"]), + enabled=dict(type="bool"), + enabled_event_types=dict(type="list", elements="str", aliases=["enabledEventTypes"]), + events_enabled=dict(type="bool", aliases=["eventsEnabled"]), + events_expiration=dict(type="int", aliases=["eventsExpiration"]), + events_listeners=dict(type="list", elements="str", aliases=["eventsListeners"]), + failure_factor=dict(type="int", aliases=["failureFactor"]), + internationalization_enabled=dict(type="bool", aliases=["internationalizationEnabled"]), + login_theme=dict(type="str", aliases=["loginTheme"]), + login_with_email_allowed=dict(type="bool", aliases=["loginWithEmailAllowed"]), + max_delta_time_seconds=dict(type="int", aliases=["maxDeltaTimeSeconds"]), + max_failure_wait_seconds=dict(type="int", aliases=["maxFailureWaitSeconds"]), + max_temporary_lockouts=dict(type="int", aliases=["maxTemporaryLockouts"]), + minimum_quick_login_wait_seconds=dict(type="int", aliases=["minimumQuickLoginWaitSeconds"]), + not_before=dict(type="int", aliases=["notBefore"]), + offline_session_idle_timeout=dict(type="int", aliases=["offlineSessionIdleTimeout"]), + offline_session_max_lifespan=dict(type="int", aliases=["offlineSessionMaxLifespan"]), + offline_session_max_lifespan_enabled=dict(type="bool", aliases=["offlineSessionMaxLifespanEnabled"]), + otp_policy_algorithm=dict(type="str", aliases=["otpPolicyAlgorithm"]), + otp_policy_digits=dict(type="int", aliases=["otpPolicyDigits"]), + otp_policy_initial_counter=dict(type="int", aliases=["otpPolicyInitialCounter"]), + otp_policy_look_ahead_window=dict(type="int", aliases=["otpPolicyLookAheadWindow"]), + otp_policy_period=dict(type="int", aliases=["otpPolicyPeriod"]), + otp_policy_type=dict(type="str", aliases=["otpPolicyType"]), + otp_supported_applications=dict(type="list", elements="str", aliases=["otpSupportedApplications"]), + password_policy=dict(type="str", aliases=["passwordPolicy"], no_log=False), + organizations_enabled=dict(type="bool", aliases=["organizationsEnabled"]), + permanent_lockout=dict(type="bool", aliases=["permanentLockout"]), + quick_login_check_milli_seconds=dict(type="int", aliases=["quickLoginCheckMilliSeconds"]), + refresh_token_max_reuse=dict(type="int", aliases=["refreshTokenMaxReuse"], no_log=False), + registration_allowed=dict(type="bool", aliases=["registrationAllowed"]), + registration_email_as_username=dict(type="bool", aliases=["registrationEmailAsUsername"]), + registration_flow=dict(type="str", aliases=["registrationFlow"]), + remember_me=dict(type="bool", aliases=["rememberMe"]), + reset_credentials_flow=dict(type="str", aliases=["resetCredentialsFlow"]), + reset_password_allowed=dict(type="bool", aliases=["resetPasswordAllowed"], no_log=False), + revoke_refresh_token=dict(type="bool", aliases=["revokeRefreshToken"]), + smtp_server=dict(type="dict", aliases=["smtpServer"]), + ssl_required=dict(choices=["external", "all", "none"], aliases=["sslRequired"]), + sso_session_idle_timeout=dict(type="int", aliases=["ssoSessionIdleTimeout"]), + sso_session_idle_timeout_remember_me=dict(type="int", aliases=["ssoSessionIdleTimeoutRememberMe"]), + sso_session_max_lifespan=dict(type="int", aliases=["ssoSessionMaxLifespan"]), + sso_session_max_lifespan_remember_me=dict(type="int", aliases=["ssoSessionMaxLifespanRememberMe"]), + supported_locales=dict(type="list", elements="str", aliases=["supportedLocales"]), + user_managed_access_allowed=dict(type="bool", aliases=["userManagedAccessAllowed"]), + verify_email=dict(type="bool", aliases=["verifyEmail"]), + wait_increment_seconds=dict(type="int", aliases=["waitIncrementSeconds"]), + client_session_idle_timeout=dict(type="int", aliases=["clientSessionIdleTimeout"]), + client_session_max_lifespan=dict(type="int", aliases=["clientSessionMaxLifespan"]), + client_offline_session_idle_timeout=dict(type="int", aliases=["clientOfflineSessionIdleTimeout"]), + client_offline_session_max_lifespan=dict(type="int", aliases=["clientOfflineSessionMaxLifespan"]), + oauth2_device_code_lifespan=dict(type="int", aliases=["oauth2DeviceCodeLifespan"]), + oauth2_device_polling_interval=dict(type="int", aliases=["oauth2DevicePollingInterval"]), + web_authn_policy_rp_entity_name=dict(type="str", aliases=["webAuthnPolicyRpEntityName"]), + web_authn_policy_signature_algorithms=dict( + type="list", elements="str", aliases=["webAuthnPolicySignatureAlgorithms"] + ), + web_authn_policy_rp_id=dict(type="str", aliases=["webAuthnPolicyRpId"]), + web_authn_policy_attestation_conveyance_preference=dict( + type="str", aliases=["webAuthnPolicyAttestationConveyancePreference"] + ), + web_authn_policy_authenticator_attachment=dict(type="str", aliases=["webAuthnPolicyAuthenticatorAttachment"]), + web_authn_policy_require_resident_key=dict( + type="str", aliases=["webAuthnPolicyRequireResidentKey"], no_log=False + ), + web_authn_policy_user_verification_requirement=dict( + type="str", aliases=["webAuthnPolicyUserVerificationRequirement"] + ), + web_authn_policy_create_timeout=dict(type="int", aliases=["webAuthnPolicyCreateTimeout"]), + web_authn_policy_avoid_same_authenticator_register=dict( + type="bool", aliases=["webAuthnPolicyAvoidSameAuthenticatorRegister"] + ), + web_authn_policy_acceptable_aaguids=dict( + type="list", elements="str", aliases=["webAuthnPolicyAcceptableAaguids"] + ), + web_authn_policy_extra_origins=dict(type="list", elements="str", aliases=["webAuthnPolicyExtraOrigins"]), + web_authn_policy_passwordless_rp_entity_name=dict( + type="str", aliases=["webAuthnPolicyPasswordlessRpEntityName"] + ), web_authn_policy_passwordless_signature_algorithms=dict( - type='list', elements='str', aliases=['webAuthnPolicyPasswordlessSignatureAlgorithms'], no_log=False + type="list", elements="str", aliases=["webAuthnPolicyPasswordlessSignatureAlgorithms"], no_log=False ), - web_authn_policy_passwordless_rp_id=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpId']), + web_authn_policy_passwordless_rp_id=dict(type="str", aliases=["webAuthnPolicyPasswordlessRpId"]), web_authn_policy_passwordless_attestation_conveyance_preference=dict( - type='str', aliases=['webAuthnPolicyPasswordlessAttestationConveyancePreference'], no_log=False + type="str", aliases=["webAuthnPolicyPasswordlessAttestationConveyancePreference"], no_log=False ), web_authn_policy_passwordless_authenticator_attachment=dict( - type='str', aliases=['webAuthnPolicyPasswordlessAuthenticatorAttachment'], no_log=False + type="str", aliases=["webAuthnPolicyPasswordlessAuthenticatorAttachment"], no_log=False ), web_authn_policy_passwordless_require_resident_key=dict( - type='str', aliases=['webAuthnPolicyPasswordlessRequireResidentKey'], no_log=False + type="str", aliases=["webAuthnPolicyPasswordlessRequireResidentKey"], no_log=False ), web_authn_policy_passwordless_user_verification_requirement=dict( - type='str', aliases=['webAuthnPolicyPasswordlessUserVerificationRequirement'], no_log=False + type="str", aliases=["webAuthnPolicyPasswordlessUserVerificationRequirement"], no_log=False + ), + web_authn_policy_passwordless_create_timeout=dict( + type="int", aliases=["webAuthnPolicyPasswordlessCreateTimeout"] + ), + web_authn_policy_passwordless_avoid_same_authenticator_register=dict( + type="bool", aliases=["webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister"] ), - web_authn_policy_passwordless_create_timeout=dict(type='int', aliases=['webAuthnPolicyPasswordlessCreateTimeout']), - web_authn_policy_passwordless_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister']), web_authn_policy_passwordless_acceptable_aaguids=dict( - type='list', elements='str', aliases=['webAuthnPolicyPasswordlessAcceptableAaguids'], no_log=False + type="list", elements="str", aliases=["webAuthnPolicyPasswordlessAcceptableAaguids"], no_log=False ), web_authn_policy_passwordless_extra_origins=dict( - type='list', elements='str', aliases=['webAuthnPolicyPasswordlessExtraOrigins'], no_log=False + type="list", elements="str", aliases=["webAuthnPolicyPasswordlessExtraOrigins"], no_log=False ), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'realm', 'enabled'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["id", "realm", "enabled"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -991,16 +1024,14 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') + realm = module.params.get("realm") + state = module.params.get("state") # convert module parameters to realm representation parameters (if they belong in there) - params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] # Filter and map the parameters names that apply to the role - realm_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] + realm_params = [x for x in module.params if x not in params_to_ignore and module.params.get(x) is not None] # See whether the realm already exists in Keycloak before_realm = kc.get_realm_by_id(realm=realm) @@ -1019,53 +1050,52 @@ def main(): desired_realm = before_realm.copy() desired_realm.update(changeset) - result['proposed'] = sanitize_cr(changeset) + result["proposed"] = sanitize_cr(changeset) before_realm_sanitized = sanitize_cr(before_realm) - result['existing'] = before_realm_sanitized + result["existing"] = before_realm_sanitized # Cater for when it doesn't exist (an empty dict) if not before_realm: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Realm does not exist, doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Realm does not exist, doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) + result["diff"] = dict(before="", after=sanitize_cr(desired_realm)) if module.check_mode: module.exit_json(**result) # create it kc.create_realm(desired_realm) - after_realm = kc.get_realm_by_id(desired_realm['realm']) + after_realm = kc.get_realm_by_id(desired_realm["realm"]) - result['end_state'] = sanitize_cr(after_realm) + result["end_state"] = sanitize_cr(after_realm) - result['msg'] = f"Realm {desired_realm['realm']} has been created." + result["msg"] = f"Realm {desired_realm['realm']} has been created." module.exit_json(**result) else: - if state == 'present': + if state == "present": # Process an update # doing an update - result['changed'] = True + result["changed"] = True if module.check_mode: # We can only compare the current realm with the proposed updates we have before_norm = normalise_cr(before_realm) desired_norm = normalise_cr(desired_realm) if module._diff: - result['diff'] = dict(before=sanitize_cr(before_norm), - after=sanitize_cr(desired_norm)) - result['changed'] = (before_norm != desired_norm) + result["diff"] = dict(before=sanitize_cr(before_norm), after=sanitize_cr(desired_norm)) + result["changed"] = before_norm != desired_norm module.exit_json(**result) @@ -1075,23 +1105,22 @@ def main(): after_realm = kc.get_realm_by_id(realm=realm) if before_realm == after_realm: - result['changed'] = False + result["changed"] = False - result['end_state'] = sanitize_cr(after_realm) + result["end_state"] = sanitize_cr(after_realm) if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(after_realm)) + result["diff"] = dict(before=before_realm_sanitized, after=sanitize_cr(after_realm)) - result['msg'] = f"Realm {desired_realm['realm']} has been updated." + result["msg"] = f"Realm {desired_realm['realm']} has been updated." module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_realm_sanitized, after='') + result["diff"] = dict(before=before_realm_sanitized, after="") if module.check_mode: module.exit_json(**result) @@ -1099,13 +1128,13 @@ def main(): # delete it kc.delete_realm(realm=realm) - result['proposed'] = {} - result['end_state'] = {} + result["proposed"] = {} + result["end_state"] = {} - result['msg'] = f"Realm {before_realm['realm']} has been deleted." + result["msg"] = f"Realm {before_realm['realm']} has been deleted." module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py index 0710534632c..cb67c09609d 100644 --- a/plugins/modules/keycloak_realm_info.py +++ b/plugins/modules/keycloak_realm_info.py @@ -106,27 +106,25 @@ def main(): :return: """ argument_spec = dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), - validate_certs=dict(type='bool', default=True), - - realm=dict(default='master'), + auth_keycloak_url=dict(type="str", aliases=["url"], required=True, no_log=False), + validate_certs=dict(type="bool", default=True), + realm=dict(default="master"), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - result = dict(changed=False, msg='', realm_info='') + result = dict(changed=False, msg="", realm_info="") kc = KeycloakAPI(module, {}) - realm = module.params.get('realm') + realm = module.params.get("realm") realm_info = kc.get_realm_info_by_id(realm=realm) - result['realm_info'] = realm_info - result['msg'] = f'Get realm public info successful for ID {realm}' + result["realm_info"] = realm_info + result["msg"] = f"Get realm public info successful for ID {realm}" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py index ec7318bc92d..4e7e2eb6f40 100644 --- a/plugins/modules/keycloak_realm_key.py +++ b/plugins/modules/keycloak_realm_key.py @@ -221,8 +221,13 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from urllib.parse import urlencode from copy import deepcopy @@ -237,17 +242,17 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - force=dict(type='bool', default=False), - parent_id=dict(type='str', required=True), - provider_id=dict(type='str', default='rsa', choices=['rsa', 'rsa-enc']), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + force=dict(type="bool", default=False), + parent_id=dict(type="str", required=True), + provider_id=dict(type="str", default="rsa", choices=["rsa", "rsa-enc"]), config=dict( - type='dict', + type="dict", options=dict( - active=dict(type='bool', default=True), - enabled=dict(type='bool', default=True), - priority=dict(type='int', required=True), + active=dict(type="bool", default=True), + enabled=dict(type="bool", default=True), + priority=dict(type="int", required=True), algorithm=dict( type="str", default="RS256", @@ -263,29 +268,32 @@ def main(): "RSA-OAEP-256", ], ), - private_key=dict(type='str', required=True, no_log=True), - certificate=dict(type='str', required=True) - ) - ) + private_key=dict(type="str", required=True, no_log=True), + certificate=dict(type="str", required=True), + ), + ), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) # Initialize the result object. Only "changed" seems to have special # meaning for Ansible. - result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + result = dict(changed=False, msg="", end_state={}, diff=dict(before={}, after={})) # This will include the current state of the realm key if it is already # present. This is only used for diff-mode. before_realm_key = {} - before_realm_key['config'] = {} + before_realm_key["config"] = {} # Obtain access token, initialize API try: @@ -298,16 +306,14 @@ def main(): params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force", "parent_id"] # Filter and map the parameters names that apply to the role - component_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] + component_params = [x for x in module.params if x not in params_to_ignore and module.params.get(x) is not None] # We only support one component provider type in this module - provider_type = 'org.keycloak.keys.KeyProvider' + provider_type = "org.keycloak.keys.KeyProvider" # Build a proposed changeset from parameters given to this module changeset = {} - changeset['config'] = {} + changeset["config"] = {} # Generate a JSON payload for Keycloak Admin API from the module # parameters. Parameters that do not belong to the JSON payload (e.g. @@ -323,16 +329,16 @@ def main(): # by Keycloak. # for component_param in component_params: - if component_param == 'config': - for config_param in module.params.get('config'): - changeset['config'][camel(config_param)] = [] - raw_value = module.params.get('config')[config_param] + if component_param == "config": + for config_param in module.params.get("config"): + changeset["config"][camel(config_param)] = [] + raw_value = module.params.get("config")[config_param] if isinstance(raw_value, bool): value = str(raw_value).lower() else: value = str(raw_value) - changeset['config'][camel(config_param)].append(value) + changeset["config"][camel(config_param)].append(value) else: # No need for camelcase in here as these are one word parameters new_param_value = module.params.get(component_param) @@ -340,7 +346,7 @@ def main(): # As provider_type is not a module parameter we have to add it to the # changeset explicitly. - changeset['providerType'] = provider_type + changeset["providerType"] = provider_type # Make a deep copy of the changeset. This is use when determining # changes to the current state. @@ -354,16 +360,16 @@ def main(): # parameter needs be present in the JSON payload, any changes done to any # other parameters (e.g. config.priority) will trigger update of the keys # as a side-effect. - del changeset_copy['config']['privateKey'] - del changeset_copy['config']['certificate'] + del changeset_copy["config"]["privateKey"] + del changeset_copy["config"]["certificate"] # Make it easier to refer to current module parameters - name = module.params.get('name') - force = module.params.get('force') - state = module.params.get('state') - enabled = module.params.get('enabled') - provider_id = module.params.get('provider_id') - parent_id = module.params.get('parent_id') + name = module.params.get("name") + force = module.params.get("force") + state = module.params.get("state") + enabled = module.params.get("enabled") + provider_id = module.params.get("provider_id") + parent_id = module.params.get("parent_id") # Get a list of all Keycloak components that are of keyprovider type. realm_keys = kc.get_components(urlencode(dict(type=provider_type)), parent_id) @@ -376,95 +382,95 @@ def main(): changes = "" # This tells Ansible whether the key was changed (added, removed, modified) - result['changed'] = False + result["changed"] = False # Loop through the list of components. If we encounter a component whose # name matches the value of the name parameter then assume the key is # already present. for key in realm_keys: - if key['name'] == name: - key_id = key['id'] - changeset['id'] = key_id - changeset_copy['id'] = key_id + if key["name"] == name: + key_id = key["id"] + changeset["id"] = key_id + changeset_copy["id"] = key_id # Compare top-level parameters for param, value in changeset.items(): before_realm_key[param] = key[param] - if changeset_copy[param] != key[param] and param != 'config': + if changeset_copy[param] != key[param] and param != "config": changes += f"{param}: {key[param]} -> {changeset_copy[param]}, " - result['changed'] = True + result["changed"] = True # Compare parameters under the "config" key - for p, v in changeset_copy['config'].items(): - before_realm_key['config'][p] = key['config'][p] - if changeset_copy['config'][p] != key['config'][p]: + for p, v in changeset_copy["config"].items(): + before_realm_key["config"][p] = key["config"][p] + if changeset_copy["config"][p] != key["config"][p]: changes += f"config.{p}: {key['config'][p]} -> {changeset_copy['config'][p]}, " - result['changed'] = True + result["changed"] = True # Sanitize linefeeds for the privateKey. Without this the JSON payload # will be invalid. - changeset['config']['privateKey'][0] = changeset['config']['privateKey'][0].replace('\\n', '\n') - changeset['config']['certificate'][0] = changeset['config']['certificate'][0].replace('\\n', '\n') + changeset["config"]["privateKey"][0] = changeset["config"]["privateKey"][0].replace("\\n", "\n") + changeset["config"]["certificate"][0] = changeset["config"]["certificate"][0].replace("\\n", "\n") # Check all the possible states of the resource and do what is needed to # converge current state with desired state (create, update or delete # the key). - if key_id and state == 'present': - if result['changed']: + if key_id and state == "present": + if result["changed"]: if module._diff: - del before_realm_key['config']['privateKey'] - del before_realm_key['config']['certificate'] - result['diff'] = dict(before=before_realm_key, after=changeset_copy) + del before_realm_key["config"]["privateKey"] + del before_realm_key["config"]["certificate"] + result["diff"] = dict(before=before_realm_key, after=changeset_copy) if module.check_mode: - result['msg'] = f"Realm key {name} would be changed: {changes.strip(', ')}" + result["msg"] = f"Realm key {name} would be changed: {changes.strip(', ')}" else: kc.update_component(changeset, parent_id) - result['msg'] = f"Realm key {name} changed: {changes.strip(', ')}" - elif not result['changed'] and force: + result["msg"] = f"Realm key {name} changed: {changes.strip(', ')}" + elif not result["changed"] and force: kc.update_component(changeset, parent_id) - result['changed'] = True - result['msg'] = f"Realm key {name} was forcibly updated" + result["changed"] = True + result["msg"] = f"Realm key {name} was forcibly updated" else: - result['msg'] = f"Realm key {name} was in sync" + result["msg"] = f"Realm key {name} was in sync" - result['end_state'] = changeset_copy - elif key_id and state == 'absent': + result["end_state"] = changeset_copy + elif key_id and state == "absent": if module._diff: - del before_realm_key['config']['privateKey'] - del before_realm_key['config']['certificate'] - result['diff'] = dict(before=before_realm_key, after={}) + del before_realm_key["config"]["privateKey"] + del before_realm_key["config"]["certificate"] + result["diff"] = dict(before=before_realm_key, after={}) if module.check_mode: - result['changed'] = True - result['msg'] = f"Realm key {name} would be deleted" + result["changed"] = True + result["msg"] = f"Realm key {name} would be deleted" else: kc.delete_component(key_id, parent_id) - result['changed'] = True - result['msg'] = f"Realm key {name} deleted" + result["changed"] = True + result["msg"] = f"Realm key {name} deleted" - result['end_state'] = {} - elif not key_id and state == 'present': + result["end_state"] = {} + elif not key_id and state == "present": if module._diff: - result['diff'] = dict(before={}, after=changeset_copy) + result["diff"] = dict(before={}, after=changeset_copy) if module.check_mode: - result['changed'] = True - result['msg'] = f"Realm key {name} would be created" + result["changed"] = True + result["msg"] = f"Realm key {name} would be created" else: kc.create_component(changeset, parent_id) - result['changed'] = True - result['msg'] = f"Realm key {name} created" + result["changed"] = True + result["msg"] = f"Realm key {name} created" - result['end_state'] = changeset_copy - elif not key_id and state == 'absent': - result['changed'] = False - result['msg'] = f"Realm key {name} not present" - result['end_state'] = {} + result["end_state"] = changeset_copy + elif not key_id and state == "absent": + result["changed"] = False + result["msg"] = f"Realm key {name} not present" + result["end_state"] = {} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py index 8840a3d7337..e5e9e670fe8 100644 --- a/plugins/modules/keycloak_realm_keys_metadata_info.py +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -88,7 +88,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, KeycloakError, get_token, keycloak_argument_spec) + KeycloakAPI, + KeycloakError, + get_token, + keycloak_argument_spec, +) def main(): @@ -102,9 +106,11 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, ) result = dict(changed=False, msg="", keys_metadata="") diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py index 0990366890b..c0c577e20f9 100644 --- a/plugins/modules/keycloak_realm_rolemapping.py +++ b/plugins/modules/keycloak_realm_rolemapping.py @@ -224,7 +224,10 @@ """ from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, ) from ansible.module_utils.basic import AnsibleModule @@ -238,35 +241,36 @@ def main(): argument_spec = keycloak_argument_spec() roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), + name=dict(type="str"), + id=dict(type="str"), ) meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - gid=dict(type='str'), - group_name=dict(type='str'), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(default="master"), + gid=dict(type="str"), + group_name=dict(type="str"), parents=dict( - type='list', elements='dict', - options=dict( - id=dict(type='str'), - name=dict(type='str') - ), + type="list", + elements="dict", + options=dict(id=dict(type="str"), name=dict(type="str")), ), - roles=dict(type='list', elements='dict', options=roles_spec), + roles=dict(type="list", elements="dict", options=roles_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -276,24 +280,24 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - gid = module.params.get('gid') - group_name = module.params.get('group_name') - roles = module.params.get('roles') - parents = module.params.get('parents') + realm = module.params.get("realm") + state = module.params.get("state") + gid = module.params.get("gid") + group_name = module.params.get("group_name") + roles = module.params.get("roles") + parents = module.params.get("parents") # Check the parameters if gid is None and group_name is None: - module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + module.fail_json(msg="Either the `group_name` or `gid` has to be specified.") # Get the potential missing parameters if gid is None: group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) if group_rep is not None: - gid = group_rep['id'] + gid = group_rep["id"] else: - module.fail_json(msg=f'Could not fetch group {group_name}:') + module.fail_json(msg=f"Could not fetch group {group_name}:") else: group_rep = kc.get_group_by_groupid(gid, realm=realm) @@ -301,80 +305,86 @@ def main(): module.exit_json(msg="Nothing to do (no roles specified).") else: for role_index, role in enumerate(roles, start=0): - if role['name'] is None and role['id'] is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + if role["name"] is None and role["id"] is None: + module.fail_json(msg="Either the `name` or `id` has to be specified on each role.") # Fetch missing role_id - if role['id'] is None: - role_rep = kc.get_realm_role(role['name'], realm=realm) + if role["id"] is None: + role_rep = kc.get_realm_role(role["name"], realm=realm) if role_rep is not None: - role['id'] = role_rep['id'] + role["id"] = role_rep["id"] else: module.fail_json(msg=f"Could not fetch realm role {role['name']} by name:") # Fetch missing role_name else: for realm_role in kc.get_realm_roles(realm=realm): - if realm_role['id'] == role['id']: - role['name'] = realm_role['name'] + if realm_role["id"] == role["id"]: + role["name"] = realm_role["name"] break - if role['name'] is None: + if role["name"] is None: module.fail_json(msg=f"Could not fetch realm role {role['id']} by ID") - assigned_roles_before = group_rep.get('realmRoles', []) + assigned_roles_before = group_rep.get("realmRoles", []) - result['existing'] = assigned_roles_before - result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + result["existing"] = assigned_roles_before + result["proposed"] = list(assigned_roles_before) if assigned_roles_before else [] update_roles = [] for role_index, role in enumerate(roles, start=0): # Fetch roles to assign if state present - if state == 'present': - if any(assigned == role['name'] for assigned in assigned_roles_before): + if state == "present": + if any(assigned == role["name"] for assigned in assigned_roles_before): pass else: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - result['proposed'].append(role['name']) + update_roles.append( + { + "id": role["id"], + "name": role["name"], + } + ) + result["proposed"].append(role["name"]) # Fetch roles to remove if state absent else: - if any(assigned == role['name'] for assigned in assigned_roles_before): - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - if role['name'] in result['proposed']: # Handle double removal - result['proposed'].remove(role['name']) + if any(assigned == role["name"] for assigned in assigned_roles_before): + update_roles.append( + { + "id": role["id"], + "name": role["name"], + } + ) + if role["name"] in result["proposed"]: # Handle double removal + result["proposed"].remove(role["name"]) if len(update_roles): - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + result["diff"] = dict(before=assigned_roles_before, after=result["proposed"]) if module.check_mode: module.exit_json(**result) - if state == 'present': + if state == "present": # Assign roles kc.add_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) - result['msg'] = f'Realm roles {update_roles} assigned to groupId {gid}.' + result["msg"] = f"Realm roles {update_roles} assigned to groupId {gid}." else: # Remove mapping of role kc.delete_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) - result['msg'] = f'Realm roles {update_roles} removed from groupId {gid}.' + result["msg"] = f"Realm roles {update_roles} removed from groupId {gid}." if gid is None: - assigned_roles_after = kc.get_group_by_name(group_name, realm=realm, parents=parents).get('realmRoles', []) + assigned_roles_after = kc.get_group_by_name(group_name, realm=realm, parents=parents).get("realmRoles", []) else: - assigned_roles_after = kc.get_group_by_groupid(gid, realm=realm).get('realmRoles', []) - result['end_state'] = assigned_roles_after + assigned_roles_after = kc.get_group_by_groupid(gid, realm=realm).get("realmRoles", []) + result["end_state"] = assigned_roles_after module.exit_json(**result) # Do nothing else: - result['changed'] = False - result['msg'] = f"Nothing to do, roles {roles} are {'mapped' if state == 'present' else 'not mapped'} with group {group_name}." + result["changed"] = False + result["msg"] = ( + f"Nothing to do, roles {roles} are {'mapped' if state == 'present' else 'not mapped'} with group {group_name}." + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py index 143f7661007..ded6e080b8f 100644 --- a/plugins/modules/keycloak_role.py +++ b/plugins/modules/keycloak_role.py @@ -226,8 +226,14 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, + is_struct_included, +) from ansible.module_utils.basic import AnsibleModule import copy @@ -241,32 +247,35 @@ def main(): argument_spec = keycloak_argument_spec() composites_spec = dict( - name=dict(type='str', required=True), - client_id=dict(type='str', aliases=['clientId']), - state=dict(type='str', default='present', choices=['present', 'absent']) + name=dict(type="str", required=True), + client_id=dict(type="str", aliases=["clientId"]), + state=dict(type="str", default="present", choices=["present", "absent"]), ) meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - description=dict(type='str'), - realm=dict(type='str', default='master'), - client_id=dict(type='str'), - attributes=dict(type='dict'), - composites=dict(type='list', default=[], options=composites_spec, elements='dict'), - composite=dict(type='bool', default=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + name=dict(type="str", required=True), + description=dict(type="str"), + realm=dict(type="str", default="master"), + client_id=dict(type="str"), + attributes=dict(type="dict"), + composites=dict(type="list", default=[], options=composites_spec, elements="dict"), + composite=dict(type="bool", default=False), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -276,22 +285,25 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - clientid = module.params.get('client_id') - name = module.params.get('name') - state = module.params.get('state') + realm = module.params.get("realm") + clientid = module.params.get("client_id") + name = module.params.get("name") + state = module.params.get("state") # attributes in Keycloak have their values returned as lists # using the API. attributes is a dict, so we'll transparently convert # the values to lists. - if module.params.get('attributes') is not None: - for key, val in module.params['attributes'].items(): - module.params['attributes'][key] = [val] if not isinstance(val, list) else val + if module.params.get("attributes") is not None: + for key, val in module.params["attributes"].items(): + module.params["attributes"][key] = [val] if not isinstance(val, list) else val # Filter and map the parameters names that apply to the role - role_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id'] and - module.params.get(x) is not None] + role_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm", "client_id"] + and module.params.get(x) is not None + ] # See if it already exists in Keycloak if clientid is None: @@ -315,28 +327,28 @@ def main(): desired_role = copy.deepcopy(before_role) desired_role.update(changeset) - result['proposed'] = changeset - result['existing'] = before_role + result["proposed"] = changeset + result["existing"] = before_role # Cater for when it doesn't exist (an empty dict) if not before_role: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Role does not exist, doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Role does not exist, doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if name is None: - module.fail_json(msg='name must be specified when creating a new role') + module.fail_json(msg="name must be specified when creating a new role") if module._diff: - result['diff'] = dict(before='', after=desired_role) + result["diff"] = dict(before="", after=desired_role) if module.check_mode: module.exit_json(**result) @@ -349,45 +361,49 @@ def main(): kc.create_client_role(desired_role, clientid, realm) after_role = kc.get_client_role(name, clientid, realm) - if after_role['composite']: - after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) + if after_role["composite"]: + after_role["composites"] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) - result['end_state'] = after_role + result["end_state"] = after_role - result['msg'] = f'Role {name} has been created' + result["msg"] = f"Role {name} has been created" module.exit_json(**result) else: - if state == 'present': - compare_exclude = ['clientId'] - if 'composites' in desired_role and isinstance(desired_role['composites'], list) and len(desired_role['composites']) > 0: + if state == "present": + compare_exclude = ["clientId"] + if ( + "composites" in desired_role + and isinstance(desired_role["composites"], list) + and len(desired_role["composites"]) > 0 + ): composites = kc.get_role_composites(rolerep=before_role, clientid=clientid, realm=realm) - before_role['composites'] = [] + before_role["composites"] = [] for composite in composites: before_composite = {} - if composite['clientRole']: - composite_client = kc.get_client_by_id(id=composite['containerId'], realm=realm) - before_composite['client_id'] = composite_client['clientId'] + if composite["clientRole"]: + composite_client = kc.get_client_by_id(id=composite["containerId"], realm=realm) + before_composite["client_id"] = composite_client["clientId"] else: - before_composite['client_id'] = None - before_composite['name'] = composite['name'] - before_composite['state'] = 'present' - before_role['composites'].append(before_composite) + before_composite["client_id"] = None + before_composite["name"] = composite["name"] + before_composite["state"] = "present" + before_role["composites"].append(before_composite) else: - compare_exclude.append('composites') + compare_exclude.append("composites") # Process an update # no changes if is_struct_included(desired_role, before_role, exclude=compare_exclude): - result['changed'] = False - result['end_state'] = desired_role - result['msg'] = f"No changes required to role {name}." + result["changed"] = False + result["end_state"] = desired_role + result["msg"] = f"No changes required to role {name}." module.exit_json(**result) # doing an update - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_role, after=desired_role) + result["diff"] = dict(before=before_role, after=desired_role) if module.check_mode: module.exit_json(**result) @@ -399,20 +415,20 @@ def main(): else: kc.update_client_role(desired_role, clientid, realm) after_role = kc.get_client_role(name, clientid, realm) - if after_role['composite']: - after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) + if after_role["composite"]: + after_role["composites"] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) - result['end_state'] = after_role + result["end_state"] = after_role - result['msg'] = f"Role {name} has been updated" + result["msg"] = f"Role {name} has been updated" module.exit_json(**result) else: # Process a deletion (because state was not 'present') - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=before_role, after='') + result["diff"] = dict(before=before_role, after="") if module.check_mode: module.exit_json(**result) @@ -423,12 +439,12 @@ def main(): else: kc.delete_client_role(name, clientid, realm) - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"Role {name} has been deleted" + result["msg"] = f"Role {name} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py index 8181e966d99..83825ac05ff 100644 --- a/plugins/modules/keycloak_user.py +++ b/plugins/modules/keycloak_user.py @@ -349,67 +349,77 @@ version_added: 12.0.0 """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, + is_struct_included, +) from ansible.module_utils.basic import AnsibleModule import copy def main(): argument_spec = keycloak_argument_spec() - argument_spec['auth_username']['aliases'] = [] + argument_spec["auth_username"]["aliases"] = [] credential_spec = dict( - type=dict(type='str', required=True), - value=dict(type='str', required=True, no_log=True), - temporary=dict(type='bool', default=False) + type=dict(type="str", required=True), + value=dict(type="str", required=True, no_log=True), + temporary=dict(type="bool", default=False), ) client_consents_spec = dict( - client_id=dict(type='str', required=True, aliases=['clientId']), - roles=dict(type='list', elements='str', required=True) + client_id=dict(type="str", required=True, aliases=["clientId"]), + roles=dict(type="list", elements="str", required=True), ) attributes_spec = dict( - name=dict(type='str'), - values=dict(type='list', elements='str'), - state=dict(type='str', choices=['present', 'absent'], default='present') - ) - groups_spec = dict( - name=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present') + name=dict(type="str"), + values=dict(type="list", elements="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), ) + groups_spec = dict(name=dict(type="str"), state=dict(type="str", choices=["present", "absent"], default="present")) meta_args = dict( - realm=dict(type='str', default='master'), - self=dict(type='str'), - id=dict(type='str'), - username=dict(type='str', required=True), - first_name=dict(type='str', aliases=['firstName']), - last_name=dict(type='str', aliases=['lastName']), - email=dict(type='str'), - enabled=dict(type='bool'), - email_verified=dict(type='bool', default=False, aliases=['emailVerified']), - federation_link=dict(type='str', aliases=['federationLink']), - service_account_client_id=dict(type='str', aliases=['serviceAccountClientId']), - attributes=dict(type='list', elements='dict', options=attributes_spec), - access=dict(type='dict'), - groups=dict(type='list', default=[], elements='dict', options=groups_spec), - disableable_credential_types=dict(type='list', default=[], aliases=['disableableCredentialTypes'], elements='str'), - required_actions=dict(type='list', default=[], aliases=['requiredActions'], elements='str'), - credentials=dict(type='list', default=[], elements='dict', options=credential_spec), - federated_identities=dict(type='list', default=[], aliases=['federatedIdentities'], elements='str'), - client_consents=dict(type='list', default=[], aliases=['clientConsents'], elements='dict', options=client_consents_spec), - origin=dict(type='str'), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default=False), + realm=dict(type="str", default="master"), + self=dict(type="str"), + id=dict(type="str"), + username=dict(type="str", required=True), + first_name=dict(type="str", aliases=["firstName"]), + last_name=dict(type="str", aliases=["lastName"]), + email=dict(type="str"), + enabled=dict(type="bool"), + email_verified=dict(type="bool", default=False, aliases=["emailVerified"]), + federation_link=dict(type="str", aliases=["federationLink"]), + service_account_client_id=dict(type="str", aliases=["serviceAccountClientId"]), + attributes=dict(type="list", elements="dict", options=attributes_spec), + access=dict(type="dict"), + groups=dict(type="list", default=[], elements="dict", options=groups_spec), + disableable_credential_types=dict( + type="list", default=[], aliases=["disableableCredentialTypes"], elements="str" + ), + required_actions=dict(type="list", default=[], aliases=["requiredActions"], elements="str"), + credentials=dict(type="list", default=[], elements="dict", options=credential_spec), + federated_identities=dict(type="list", default=[], aliases=["federatedIdentities"], elements="str"), + client_consents=dict( + type="list", default=[], aliases=["clientConsents"], elements="dict", options=client_consents_spec + ), + origin=dict(type="str"), + state=dict(choices=["absent", "present"], default="present"), + force=dict(type="bool", default=False), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -419,16 +429,19 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - force = module.params.get('force') - username = module.params.get('username') - groups = module.params.get('groups') + realm = module.params.get("realm") + state = module.params.get("state") + force = module.params.get("force") + username = module.params.get("username") + groups = module.params.get("groups") # Filter and map the parameters names that apply to the user - user_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'force', 'groups'] and - module.params.get(x) is not None] + user_params = [ + x + for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ["state", "realm", "force", "groups"] + and module.params.get(x) is not None + ] before_user = kc.get_user_by_username(username=username, realm=realm) @@ -439,16 +452,16 @@ def main(): for param in user_params: new_param_value = module.params.get(param) - if param == 'attributes' and param in before_user: - old_value = kc.convert_keycloak_user_attributes_dict_to_module_list(attributes=before_user['attributes']) + if param == "attributes" and param in before_user: + old_value = kc.convert_keycloak_user_attributes_dict_to_module_list(attributes=before_user["attributes"]) else: old_value = before_user[param] if param in before_user else None if new_param_value != old_value: - if old_value is not None and param == 'attributes': + if old_value is not None and param == "attributes": for old_attribute in old_value: old_attribute_found = False for new_attribute in new_param_value: - if new_attribute['name'] == old_attribute['name']: + if new_attribute["name"] == old_attribute["name"]: old_attribute_found = True if not old_attribute_found: new_param_value.append(copy.deepcopy(old_attribute)) @@ -460,25 +473,25 @@ def main(): desired_user = copy.deepcopy(before_user) desired_user.update(changeset) - result['proposed'] = changeset - result['existing'] = before_user + result["proposed"] = changeset + result["existing"] = before_user # Default values for user_created - result['user_created'] = False + result["user_created"] = False changed = False # Cater for when it doesn't exist (an empty dict) - if state == 'absent': + if state == "absent": if not before_user: # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Role does not exist, doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "Role does not exist, doing nothing." module.exit_json(**result) else: # Delete user - kc.delete_user(user_id=before_user['id'], realm=realm) + kc.delete_user(user_id=before_user["id"], realm=realm) result["msg"] = f"User {before_user['username']} deleted" changed = True @@ -493,24 +506,24 @@ def main(): changed = True if username is None: - module.fail_json(msg='username must be specified when creating a new user') + module.fail_json(msg="username must be specified when creating a new user") if module._diff: - result['diff'] = dict(before='', after=desired_user) + result["diff"] = dict(before="", after=desired_user) if module.check_mode: # Set user_created flag explicit for check_mode # create_user could have failed, but we don't know for sure until we try to create the user.' - result['user_created'] = True + result["user_created"] = True module.exit_json(**result) # Create the user after_user = kc.create_user(userrep=desired_user, realm=realm) result["msg"] = f"User {desired_user['username']} created" # Add user ID to new representation - desired_user['id'] = after_user["id"] + desired_user["id"] = after_user["id"] # Set user_created flag - result['user_created'] = True + result["user_created"] = True else: excludes = [ "access", @@ -522,12 +535,15 @@ def main(): "groups", "clientConsents", "federatedIdentities", - "requiredActions"] + "requiredActions", + ] # Add user ID to new representation - desired_user['id'] = before_user["id"] + desired_user["id"] = before_user["id"] # Compare users - if not (is_struct_included(desired_user, before_user, excludes)): # If the new user does not introduce a change to the existing user + if not ( + is_struct_included(desired_user, before_user, excludes) + ): # If the new user does not introduce a change to the existing user # Update the user after_user = kc.update_user(userrep=desired_user, realm=realm) changed = True @@ -543,9 +559,9 @@ def main(): else: result["msg"] = f"No changes made for user {desired_user['username']}" - result['changed'] = changed + result["changed"] = changed module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_user_execute_actions_email.py b/plugins/modules/keycloak_user_execute_actions_email.py index c139c442c91..7e64a10dc27 100644 --- a/plugins/modules/keycloak_user_execute_actions_email.py +++ b/plugins/modules/keycloak_user_execute_actions_email.py @@ -127,30 +127,34 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( - keycloak_argument_spec, get_token, KeycloakError, KeycloakAPI) + keycloak_argument_spec, + get_token, + KeycloakError, + KeycloakAPI, +) def main(): argument_spec = keycloak_argument_spec() # Avoid alias collision as in keycloak_user: clear auth_username aliases locally - argument_spec['auth_username']['aliases'] = [] + argument_spec["auth_username"]["aliases"] = [] meta_args = dict( - realm=dict(type='str', default='master'), - id=dict(type='str'), - username=dict(type='str'), - actions=dict(type='list', elements='str', default=['UPDATE_PASSWORD']), - client_id=dict(type='str', aliases=['clientId']), - redirect_uri=dict(type='str', aliases=['redirectUri']), - lifespan=dict(type='int'), + realm=dict(type="str", default="master"), + id=dict(type="str"), + username=dict(type="str"), + actions=dict(type="list", elements="str", default=["UPDATE_PASSWORD"]), + client_id=dict(type="str", aliases=["clientId"]), + redirect_uri=dict(type="str", aliases=["redirectUri"]), + lifespan=dict(type="int"), ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['id', 'username']], - mutually_exclusive=[['id', 'username']], + required_one_of=[["id", "username"]], + mutually_exclusive=[["id", "username"]], ) try: @@ -160,23 +164,25 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - user_id = module.params.get('id') - username = module.params.get('username') - actions = module.params.get('actions') - client_id = module.params.get('client_id') - redirect_uri = module.params.get('redirect_uri') - lifespan = module.params.get('lifespan') + realm = module.params.get("realm") + user_id = module.params.get("id") + username = module.params.get("username") + actions = module.params.get("actions") + client_id = module.params.get("client_id") + redirect_uri = module.params.get("redirect_uri") + lifespan = module.params.get("lifespan") # Resolve user ID if only username is provided if user_id is None: user_obj = kc.get_user_by_username(username=username, realm=realm) if user_obj is None: module.fail_json(msg=f"User '{username}' not found in realm {realm}") - user_id = user_obj['id'] + user_id = user_obj["id"] if module.check_mode: - module.exit_json(changed=True, msg=f"Would send execute-actions email to user {user_id}", user_id=user_id, actions=actions) + module.exit_json( + changed=True, msg=f"Would send execute-actions email to user {user_id}", user_id=user_id, actions=actions + ) try: kc.send_execute_actions_email( @@ -185,13 +191,15 @@ def main(): client_id=client_id, data=actions, redirect_uri=redirect_uri, - lifespan=lifespan + lifespan=lifespan, ) except Exception as e: module.fail_json(msg=str(e)) - module.exit_json(changed=True, msg=f"Execute-actions email sent to user {user_id}", user_id=user_id, actions=actions) + module.exit_json( + changed=True, msg=f"Execute-actions email sent to user {user_id}", user_id=user_id, actions=actions + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py index 9ebcd22568a..6c282d0fbbe 100644 --- a/plugins/modules/keycloak_user_federation.py +++ b/plugins/modules/keycloak_user_federation.py @@ -718,34 +718,39 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from urllib.parse import urlencode from copy import deepcopy def normalize_kc_comp(comp): - if 'config' in comp: + if "config" in comp: # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; # to make change detection and diff more accurate we set it again in the kc responses - if 'krbPrincipalAttribute' not in comp['config']: - comp['config']['krbPrincipalAttribute'] = [''] + if "krbPrincipalAttribute" not in comp["config"]: + comp["config"]["krbPrincipalAttribute"] = [""] # kc stores a timestamp of the last sync in `lastSync` to time the periodic sync, it is removed to minimize diff/changes - comp['config'].pop('lastSync', None) + comp["config"].pop("lastSync", None) def sanitize(comp): compcopy = deepcopy(comp) - if 'config' in compcopy: - compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} - if 'bindCredential' in compcopy['config']: - compcopy['config']['bindCredential'] = '**********' - if 'mappers' in compcopy: - for mapper in compcopy['mappers']: - if 'config' in mapper: - mapper['config'] = {k: v[0] for k, v in mapper['config'].items()} + if "config" in compcopy: + compcopy["config"] = {k: v[0] for k, v in compcopy["config"].items()} + if "bindCredential" in compcopy["config"]: + compcopy["config"]["bindCredential"] = "**********" + if "mappers" in compcopy: + for mapper in compcopy["mappers"]: + if "config" in mapper: + mapper["config"] = {k: v[0] for k, v in mapper["config"].items()} return compcopy @@ -758,93 +763,102 @@ def main(): argument_spec = keycloak_argument_spec() config_spec = dict( - allowKerberosAuthentication=dict(type='bool', default=False), - allowPasswordAuthentication=dict(type='bool'), - authType=dict(type='str', choices=['none', 'simple'], default='none'), - batchSizeForSync=dict(type='int', default=1000), - bindCredential=dict(type='str', no_log=True), - bindDn=dict(type='str'), - cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), - changedSyncPeriod=dict(type='int', default=-1), - connectionPooling=dict(type='bool', default=True), - connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), - connectionPoolingDebug=dict(type='str'), - connectionPoolingInitSize=dict(type='int'), - connectionPoolingMaxSize=dict(type='int'), - connectionPoolingPrefSize=dict(type='int'), - connectionPoolingProtocol=dict(type='str'), - connectionPoolingTimeout=dict(type='int'), - connectionTimeout=dict(type='int'), - connectionUrl=dict(type='str'), - customUserSearchFilter=dict(type='str'), - debug=dict(type='bool'), - editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), - enabled=dict(type='bool', default=True), - evictionDay=dict(type='str'), - evictionHour=dict(type='str'), - evictionMinute=dict(type='str'), - fullSyncPeriod=dict(type='int', default=-1), - importEnabled=dict(type='bool', default=True), - kerberosRealm=dict(type='str'), - keyTab=dict(type='str', no_log=False), - maxLifespan=dict(type='int'), - pagination=dict(type='bool', default=True), - priority=dict(type='int', default=0), - rdnLDAPAttribute=dict(type='str'), - readTimeout=dict(type='int'), - referral=dict(type='str', choices=['ignore', 'follow']), - searchScope=dict(type='str', choices=['1', '2'], default='1'), - serverPrincipal=dict(type='str'), - krbPrincipalAttribute=dict(type='str'), - startTls=dict(type='bool', default=False), - syncRegistrations=dict(type='bool', default=False), - trustEmail=dict(type='bool', default=False), - updateProfileFirstLogin=dict(type='bool'), - useKerberosForPasswordAuthentication=dict(type='bool', default=False), - usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), - useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), - userObjectClasses=dict(type='str'), - usernameLDAPAttribute=dict(type='str'), - usersDn=dict(type='str'), - uuidLDAPAttribute=dict(type='str'), - validatePasswordPolicy=dict(type='bool', default=False), - vendor=dict(type='str'), + allowKerberosAuthentication=dict(type="bool", default=False), + allowPasswordAuthentication=dict(type="bool"), + authType=dict(type="str", choices=["none", "simple"], default="none"), + batchSizeForSync=dict(type="int", default=1000), + bindCredential=dict(type="str", no_log=True), + bindDn=dict(type="str"), + cachePolicy=dict( + type="str", + choices=["DEFAULT", "EVICT_DAILY", "EVICT_WEEKLY", "MAX_LIFESPAN", "NO_CACHE"], + default="DEFAULT", + ), + changedSyncPeriod=dict(type="int", default=-1), + connectionPooling=dict(type="bool", default=True), + connectionPoolingAuthentication=dict(type="str", choices=["none", "simple", "DIGEST-MD5"]), + connectionPoolingDebug=dict(type="str"), + connectionPoolingInitSize=dict(type="int"), + connectionPoolingMaxSize=dict(type="int"), + connectionPoolingPrefSize=dict(type="int"), + connectionPoolingProtocol=dict(type="str"), + connectionPoolingTimeout=dict(type="int"), + connectionTimeout=dict(type="int"), + connectionUrl=dict(type="str"), + customUserSearchFilter=dict(type="str"), + debug=dict(type="bool"), + editMode=dict(type="str", choices=["READ_ONLY", "WRITABLE", "UNSYNCED"]), + enabled=dict(type="bool", default=True), + evictionDay=dict(type="str"), + evictionHour=dict(type="str"), + evictionMinute=dict(type="str"), + fullSyncPeriod=dict(type="int", default=-1), + importEnabled=dict(type="bool", default=True), + kerberosRealm=dict(type="str"), + keyTab=dict(type="str", no_log=False), + maxLifespan=dict(type="int"), + pagination=dict(type="bool", default=True), + priority=dict(type="int", default=0), + rdnLDAPAttribute=dict(type="str"), + readTimeout=dict(type="int"), + referral=dict(type="str", choices=["ignore", "follow"]), + searchScope=dict(type="str", choices=["1", "2"], default="1"), + serverPrincipal=dict(type="str"), + krbPrincipalAttribute=dict(type="str"), + startTls=dict(type="bool", default=False), + syncRegistrations=dict(type="bool", default=False), + trustEmail=dict(type="bool", default=False), + updateProfileFirstLogin=dict(type="bool"), + useKerberosForPasswordAuthentication=dict(type="bool", default=False), + usePasswordModifyExtendedOp=dict(type="bool", default=False, no_log=False), + useTruststoreSpi=dict(type="str", choices=["always", "ldapsOnly", "never"], default="ldapsOnly"), + userObjectClasses=dict(type="str"), + usernameLDAPAttribute=dict(type="str"), + usersDn=dict(type="str"), + uuidLDAPAttribute=dict(type="str"), + validatePasswordPolicy=dict(type="bool", default=False), + vendor=dict(type="str"), ) mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - parentId=dict(type='str'), - providerId=dict(type='str'), - providerType=dict(type='str', default='org.keycloak.storage.ldap.mappers.LDAPStorageMapper'), - config=dict(type='dict'), + id=dict(type="str"), + name=dict(type="str"), + parentId=dict(type="str"), + providerId=dict(type="str"), + providerType=dict(type="str", default="org.keycloak.storage.ldap.mappers.LDAPStorageMapper"), + config=dict(type="dict"), ) meta_args = dict( - config=dict(type='dict', options=config_spec), - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - id=dict(type='str'), - name=dict(type='str'), - provider_id=dict(type='str', aliases=['providerId']), - provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), - parent_id=dict(type='str', aliases=['parentId']), - remove_unspecified_mappers=dict(type='bool', default=True), - bind_credential_update_mode=dict(type='str', default='always', choices=['always', 'only_indirect']), - mappers=dict(type='list', elements='dict', options=mapper_spec), + config=dict(type="dict", options=config_spec), + state=dict(type="str", default="present", choices=["present", "absent"]), + realm=dict(type="str", default="master"), + id=dict(type="str"), + name=dict(type="str"), + provider_id=dict(type="str", aliases=["providerId"]), + provider_type=dict(type="str", aliases=["providerType"], default="org.keycloak.storage.UserStorageProvider"), + parent_id=dict(type="str", aliases=["parentId"]), + remove_unspecified_mappers=dict(type="bool", default=True), + bind_credential_update_mode=dict(type="str", default="always", choices=["always", "only_indirect"]), + mappers=dict(type="list", elements="dict", options=mapper_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["id", "name"], + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -854,44 +868,48 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - config = module.params.get('config') - mappers = module.params.get('mappers') - cid = module.params.get('id') - name = module.params.get('name') + realm = module.params.get("realm") + state = module.params.get("state") + config = module.params.get("config") + mappers = module.params.get("mappers") + cid = module.params.get("id") + name = module.params.get("name") # Keycloak API expects config parameters to be arrays containing a single string element if config is not None: - module.params['config'] = { - k: [str(v).lower() if not isinstance(v, str) else v] - for k, v in config.items() - if config[k] is not None + module.params["config"] = { + k: [str(v).lower() if not isinstance(v, str) else v] for k, v in config.items() if config[k] is not None } if mappers is not None: for mapper in mappers: - if mapper.get('config') is not None: - mapper['config'] = { + if mapper.get("config") is not None: + mapper["config"] = { k: [str(v).lower() if not isinstance(v, str) else v] - for k, v in mapper['config'].items() - if mapper['config'][k] is not None + for k, v in mapper["config"].items() + if mapper["config"][k] is not None } # Filter and map the parameters names that apply - comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) - + ['state', 'realm', 'mappers', 'remove_unspecified_mappers', 'bind_credential_update_mode'] - and module.params.get(x) is not None] + comp_params = [ + x + for x in module.params + if x + not in list(keycloak_argument_spec().keys()) + + ["state", "realm", "mappers", "remove_unspecified_mappers", "bind_credential_update_mode"] + and module.params.get(x) is not None + ] # See if it already exists in Keycloak if cid is None: - found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm) + found = kc.get_components(urlencode(dict(type="org.keycloak.storage.UserStorageProvider", name=name)), realm) if len(found) > 1: - module.fail_json(msg=f'No ID given and found multiple user federations with name `{name}`. Cannot continue.') + module.fail_json( + msg=f"No ID given and found multiple user federations with name `{name}`. Cannot continue." + ) before_comp = next(iter(found), None) if before_comp is not None: - cid = before_comp['id'] + cid = before_comp["id"] else: before_comp = kc.get_component(cid, realm) @@ -900,7 +918,9 @@ def main(): # if user federation exists, get associated mappers if cid is not None and before_comp: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + before_comp["mappers"] = sorted( + kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get("name") or "" + ) normalize_kc_comp(before_comp) @@ -910,27 +930,38 @@ def main(): for param in comp_params: new_param_value = module.params.get(param) old_value = before_comp[camel(param)] if camel(param) in before_comp else None - if param == 'mappers': + if param == "mappers": new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] if new_param_value != old_value: changeset[camel(param)] = new_param_value # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - if module.params['provider_id'] in ['kerberos', 'sssd']: + if module.params.get("mappers") is not None: + if module.params["provider_id"] in ["kerberos", "sssd"]: module.fail_json(msg=f"Cannot configure mappers for {module.params['provider_id']} provider.") - for change in module.params['mappers']: + for change in module.params["mappers"]: change = {k: v for k, v in change.items() if v is not None} - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if change.get("id") is None and change.get("name") is None: + module.fail_json(msg="Either `name` or `id` has to be specified on each mapper.") if cid is None: old_mapper = {} - elif change.get('id') is not None: - old_mapper = next((before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper["id"] == change['id']), None) + elif change.get("id") is not None: + old_mapper = next( + ( + before_mapper + for before_mapper in before_comp.get("mappers", []) + if before_mapper["id"] == change["id"] + ), + None, + ) if old_mapper is None: old_mapper = {} else: - found = [before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper['name'] == change['name']] + found = [ + before_mapper + for before_mapper in before_comp.get("mappers", []) + if before_mapper["name"] == change["name"] + ] if len(found) > 1: module.fail_json(msg=f"Found multiple mappers with name `{change['name']}`. Cannot continue.") if len(found) == 1: @@ -940,53 +971,57 @@ def main(): new_mapper = old_mapper.copy() new_mapper.update(change) # changeset contains all desired mappers: those existing, to update or to create - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('name') or '') + if changeset.get("mappers") is None: + changeset["mappers"] = list() + changeset["mappers"].append(new_mapper) + changeset["mappers"] = sorted(changeset["mappers"], key=lambda x: x.get("name") or "") # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present - if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: - changeset_mapper_ids = [mapper['id'] for mapper in changeset['mappers'] if 'id' in mapper] - changeset['mappers'].extend([mapper for mapper in before_comp['mappers'] if mapper['id'] not in changeset_mapper_ids]) + if not module.params["remove_unspecified_mappers"] and "mappers" in before_comp: + changeset_mapper_ids = [mapper["id"] for mapper in changeset["mappers"] if "id" in mapper] + changeset["mappers"].extend( + [mapper for mapper in before_comp["mappers"] if mapper["id"] not in changeset_mapper_ids] + ) # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) desired_comp = before_comp.copy() desired_comp.update(changeset) - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_comp) + result["proposed"] = sanitize(changeset) + result["existing"] = sanitize(before_comp) # Cater for when it doesn't exist (an empty dict) if not before_comp: - if state == 'absent': + if state == "absent": # Do nothing and exit if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'User federation does not exist; doing nothing.' + result["diff"] = dict(before="", after="") + result["changed"] = False + result["end_state"] = {} + result["msg"] = "User federation does not exist; doing nothing." module.exit_json(**result) # Process a creation - result['changed'] = True + result["changed"] = True if module.check_mode: if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) + result["diff"] = dict(before="", after=sanitize(desired_comp)) module.exit_json(**result) # create it - desired_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop("mappers", []) after_comp = kc.create_component(desired_comp, realm) - cid = after_comp['id'] + cid = after_comp["id"] updated_mappers = [] # when creating a user federation, keycloak automatically creates default mappers default_mappers = kc.get_components(urlencode(dict(parent=cid)), realm) # create new mappers or update existing default mappers for desired_mapper in desired_mappers: - found = [default_mapper for default_mapper in default_mappers if default_mapper['name'] == desired_mapper['name']] + found = [ + default_mapper for default_mapper in default_mappers if default_mapper["name"] == desired_mapper["name"] + ] if len(found) > 1: module.fail_json(msg=f"Found multiple mappers with name `{desired_mapper['name']}`. Cannot continue.") if len(found) == 1: @@ -997,93 +1032,95 @@ def main(): new_mapper = old_mapper.copy() new_mapper.update(desired_mapper) - if new_mapper.get('id') is not None: + if new_mapper.get("id") is not None: kc.update_component(new_mapper, realm) updated_mappers.append(new_mapper) else: - if new_mapper.get('parentId') is None: - new_mapper['parentId'] = cid + if new_mapper.get("parentId") is None: + new_mapper["parentId"] = cid updated_mappers.append(kc.create_component(new_mapper, realm)) - if module.params['remove_unspecified_mappers']: + if module.params["remove_unspecified_mappers"]: # we remove all unwanted default mappers # we use ids so we dont accidently remove one of the previously updated default mapper for default_mapper in default_mappers: - if not default_mapper['id'] in [x['id'] for x in updated_mappers]: - kc.delete_component(default_mapper['id'], realm) + if not default_mapper["id"] in [x["id"] for x in updated_mappers]: + kc.delete_component(default_mapper["id"], realm) - after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + after_comp["mappers"] = kc.get_components(urlencode(dict(parent=cid)), realm) normalize_kc_comp(after_comp) if module._diff: - result['diff'] = dict(before='', after=sanitize(after_comp)) - result['end_state'] = sanitize(after_comp) - result['msg'] = f"User federation {cid} has been created" + result["diff"] = dict(before="", after=sanitize(after_comp)) + result["end_state"] = sanitize(after_comp) + result["msg"] = f"User federation {cid} has been created" module.exit_json(**result) else: - if state == 'present': + if state == "present": # Process an update desired_copy = deepcopy(desired_comp) before_copy = deepcopy(before_comp) # exclude bindCredential when checking wether an update is required, therefore # updating it only if there are other changes - if module.params['bind_credential_update_mode'] == 'only_indirect': - desired_copy.get('config', []).pop('bindCredential', None) - before_copy.get('config', []).pop('bindCredential', None) + if module.params["bind_credential_update_mode"] == "only_indirect": + desired_copy.get("config", []).pop("bindCredential", None) + before_copy.get("config", []).pop("bindCredential", None) # no changes if desired_copy == before_copy: - result['changed'] = False - result['end_state'] = sanitize(desired_comp) - result['msg'] = f"No changes required to user federation {cid}." + result["changed"] = False + result["end_state"] = sanitize(desired_comp) + result["msg"] = f"No changes required to user federation {cid}." module.exit_json(**result) # doing an update - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) + result["diff"] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) if module.check_mode: module.exit_json(**result) # do the update - desired_mappers = desired_comp.pop('mappers', []) + desired_mappers = desired_comp.pop("mappers", []) kc.update_component(desired_comp, realm) - for before_mapper in before_comp.get('mappers', []): + for before_mapper in before_comp.get("mappers", []): # remove unwanted existing mappers that will not be updated - if not before_mapper['id'] in [x['id'] for x in desired_mappers if 'id' in x]: - kc.delete_component(before_mapper['id'], realm) + if not before_mapper["id"] in [x["id"] for x in desired_mappers if "id" in x]: + kc.delete_component(before_mapper["id"], realm) for mapper in desired_mappers: - if mapper in before_comp.get('mappers', []): + if mapper in before_comp.get("mappers", []): continue - if mapper.get('id') is not None: + if mapper.get("id") is not None: kc.update_component(mapper, realm) else: - if mapper.get('parentId') is None: - mapper['parentId'] = desired_comp['id'] + if mapper.get("parentId") is None: + mapper["parentId"] = desired_comp["id"] kc.create_component(mapper, realm) after_comp = kc.get_component(cid, realm) - after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + after_comp["mappers"] = sorted( + kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get("name") or "" + ) normalize_kc_comp(after_comp) after_comp_sanitized = sanitize(after_comp) before_comp_sanitized = sanitize(before_comp) - result['end_state'] = after_comp_sanitized + result["end_state"] = after_comp_sanitized if module._diff: - result['diff'] = dict(before=before_comp_sanitized, after=after_comp_sanitized) - result['changed'] = before_comp_sanitized != after_comp_sanitized - result['msg'] = f"User federation {cid} has been updated" + result["diff"] = dict(before=before_comp_sanitized, after=after_comp_sanitized) + result["changed"] = before_comp_sanitized != after_comp_sanitized + result["msg"] = f"User federation {cid} has been updated" module.exit_json(**result) - elif state == 'absent': + elif state == "absent": # Process a deletion - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after='') + result["diff"] = dict(before=sanitize(before_comp), after="") if module.check_mode: module.exit_json(**result) @@ -1091,12 +1128,12 @@ def main(): # delete it kc.delete_component(cid, realm) - result['end_state'] = {} + result["end_state"] = {} - result['msg'] = f"User federation {cid} has been deleted" + result["msg"] = f"User federation {cid} has been deleted" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py index 41445cda425..7ad62aa0cbe 100644 --- a/plugins/modules/keycloak_user_rolemapping.py +++ b/plugins/modules/keycloak_user_rolemapping.py @@ -238,8 +238,12 @@ } """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule @@ -252,32 +256,37 @@ def main(): argument_spec = keycloak_argument_spec() roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), + name=dict(type="str"), + id=dict(type="str"), ) meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - uid=dict(type='str'), - target_username=dict(type='str'), - service_account_user_client_id=dict(type='str'), - cid=dict(type='str'), - client_id=dict(type='str'), - roles=dict(type='list', elements='dict', options=roles_spec), + state=dict(default="present", choices=["present", "absent"]), + realm=dict(default="master"), + uid=dict(type="str"), + target_username=dict(type="str"), + service_account_user_client_id=dict(type="str"), + cid=dict(type="str"), + client_id=dict(type="str"), + roles=dict(type="list", elements="dict", options=roles_spec), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], - ['uid', 'target_username', 'service_account_user_client_id']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [ + ["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"], + ["uid", "target_username", "service_account_user_client_id"], + ] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + result = dict(changed=False, msg="", diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: @@ -287,62 +296,70 @@ def main(): kc = KeycloakAPI(module, connection_header) - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('cid') - client_id = module.params.get('client_id') - uid = module.params.get('uid') - target_username = module.params.get('target_username') - service_account_user_client_id = module.params.get('service_account_user_client_id') - roles = module.params.get('roles') + realm = module.params.get("realm") + state = module.params.get("state") + cid = module.params.get("cid") + client_id = module.params.get("client_id") + uid = module.params.get("uid") + target_username = module.params.get("target_username") + service_account_user_client_id = module.params.get("service_account_user_client_id") + roles = module.params.get("roles") # Check the parameters if uid is None and target_username is None and service_account_user_client_id is None: - module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.') + module.fail_json( + msg="Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified." + ) # Get the potential missing parameters if uid is None and service_account_user_client_id is None: user_rep = kc.get_user_by_username(username=target_username, realm=realm) if user_rep is not None: - uid = user_rep.get('id') + uid = user_rep.get("id") else: - module.fail_json(msg=f'Could not fetch user for username {target_username}:') + module.fail_json(msg=f"Could not fetch user for username {target_username}:") else: if uid is None and target_username is None: user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm) if user_rep is not None: - uid = user_rep['id'] + uid = user_rep["id"] else: - module.fail_json(msg=f'Could not fetch service-account-user for client_id {target_username}:') + module.fail_json(msg=f"Could not fetch service-account-user for client_id {target_username}:") if cid is None and client_id is not None: cid = kc.get_client_id(client_id=client_id, realm=realm) if cid is None: - module.fail_json(msg=f'Could not fetch client {client_id}:') + module.fail_json(msg=f"Could not fetch client {client_id}:") if roles is None: module.exit_json(msg="Nothing to do (no roles specified).") else: for role_index, role in enumerate(roles, start=0): - if role.get('name') is None and role.get('id') is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + if role.get("name") is None and role.get("id") is None: + module.fail_json(msg="Either the `name` or `id` has to be specified on each role.") # Fetch missing role_id - if role.get('id') is None: + if role.get("id") is None: if cid is None: - role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id'] + role_id = kc.get_realm_role(name=role.get("name"), realm=realm)["id"] else: - role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm) + role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get("name"), realm=realm) if role_id is not None: - role['id'] = role_id + role["id"] = role_id else: - module.fail_json(msg=f"Could not fetch role {role.get('name')} for client_id {client_id} or realm {realm}") + module.fail_json( + msg=f"Could not fetch role {role.get('name')} for client_id {client_id} or realm {realm}" + ) # Fetch missing role_name else: if cid is None: - role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name'] + role["name"] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get("id"), realm=realm)["name"] else: - role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name'] - if role.get('name') is None: - module.fail_json(msg=f"Could not fetch role {role.get('id')} for client_id {client_id} or realm {realm}") + role["name"] = kc.get_client_user_rolemapping_by_id( + uid=uid, cid=cid, rid=role.get("id"), realm=realm + )["name"] + if role.get("name") is None: + module.fail_json( + msg=f"Could not fetch role {role.get('id')} for client_id {client_id} or realm {realm}" + ) # Get effective role mappings if cid is None: @@ -352,65 +369,69 @@ def main(): available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm) assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) - result['existing'] = assigned_roles_before - result['proposed'] = roles + result["existing"] = assigned_roles_before + result["proposed"] = roles update_roles = [] for role_index, role in enumerate(roles, start=0): # Fetch roles to assign if state present - if state == 'present': + if state == "present": for available_role in available_roles_before: - if role.get('name') == available_role.get('name'): - update_roles.append({ - 'id': role.get('id'), - 'name': role.get('name'), - }) + if role.get("name") == available_role.get("name"): + update_roles.append( + { + "id": role.get("id"), + "name": role.get("name"), + } + ) # Fetch roles to remove if state absent else: for assigned_role in assigned_roles_before: - if role.get('name') == assigned_role.get('name'): - update_roles.append({ - 'id': role.get('id'), - 'name': role.get('name'), - }) + if role.get("name") == assigned_role.get("name"): + update_roles.append( + { + "id": role.get("id"), + "name": role.get("name"), + } + ) if len(update_roles): - if state == 'present': + if state == "present": # Assign roles - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + result["diff"] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) if module.check_mode: module.exit_json(**result) kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) - result['msg'] = f'Roles {update_roles} assigned to userId {uid}.' + result["msg"] = f"Roles {update_roles} assigned to userId {uid}." if cid is None: assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) else: assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) - result['end_state'] = assigned_roles_after + result["end_state"] = assigned_roles_after module.exit_json(**result) else: # Remove mapping of role - result['changed'] = True + result["changed"] = True if module._diff: - result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + result["diff"] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) if module.check_mode: module.exit_json(**result) kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) - result['msg'] = f'Roles {update_roles} removed from userId {uid}.' + result["msg"] = f"Roles {update_roles} removed from userId {uid}." if cid is None: assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) else: assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) - result['end_state'] = assigned_roles_after + result["end_state"] = assigned_roles_after module.exit_json(**result) # Do nothing else: - result['changed'] = False - result['msg'] = f'Nothing to do, roles {roles} are correctly mapped to user for username {target_username}.' + result["changed"] = False + result["msg"] = f"Nothing to do, roles {roles} are correctly mapped to user for username {target_username}." module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py index fbc2041a767..1660d2d6fd6 100644 --- a/plugins/modules/keycloak_userprofile.py +++ b/plugins/modules/keycloak_userprofile.py @@ -405,8 +405,13 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, + camel, + keycloak_argument_spec, + get_token, + KeycloakError, +) from ansible.module_utils.basic import AnsibleModule from copy import deepcopy from urllib.parse import urlencode @@ -441,105 +446,109 @@ def main(): argument_spec = keycloak_argument_spec() meta_args = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - parent_id=dict(type='str', aliases=['parentId', 'realm'], required=True), - provider_id=dict(type='str', aliases=['providerId'], default='declarative-user-profile', choices=['declarative-user-profile']), + state=dict(type="str", choices=["present", "absent"], default="present"), + parent_id=dict(type="str", aliases=["parentId", "realm"], required=True), + provider_id=dict( + type="str", aliases=["providerId"], default="declarative-user-profile", choices=["declarative-user-profile"] + ), provider_type=dict( - type='str', - aliases=['providerType'], - default='org.keycloak.userprofile.UserProfileProvider', - choices=['org.keycloak.userprofile.UserProfileProvider'] + type="str", + aliases=["providerType"], + default="org.keycloak.userprofile.UserProfileProvider", + choices=["org.keycloak.userprofile.UserProfileProvider"], ), config=dict( - type='dict', + type="dict", options={ - 'kc_user_profile_config': dict( - type='list', - aliases=['kcUserProfileConfig'], - elements='dict', + "kc_user_profile_config": dict( + type="list", + aliases=["kcUserProfileConfig"], + elements="dict", options={ - 'attributes': dict( - type='list', - elements='dict', + "attributes": dict( + type="list", + elements="dict", options={ - 'name': dict(type='str', required=True), - 'display_name': dict(type='str', aliases=['displayName'], required=True), - 'validations': dict( - type='dict', + "name": dict(type="str", required=True), + "display_name": dict(type="str", aliases=["displayName"], required=True), + "validations": dict( + type="dict", options={ - 'length': dict( - type='dict', - options={ - 'min': dict(type='int'), - 'max': dict(type='int', required=True) - } + "length": dict( + type="dict", + options={"min": dict(type="int"), "max": dict(type="int", required=True)}, + ), + "email": dict(type="dict"), + "username_prohibited_characters": dict( + type="dict", aliases=["usernameProhibitedCharacters"] + ), + "up_username_not_idn_homograph": dict( + type="dict", aliases=["upUsernameNotIdnHomograph"] ), - 'email': dict(type='dict'), - 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']), - 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']), - 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']), - 'uri': dict(type='dict'), - 'pattern': dict(type='dict'), - 'options': dict(type='dict') - } + "person_name_prohibited_characters": dict( + type="dict", aliases=["personNameProhibitedCharacters"] + ), + "uri": dict(type="dict"), + "pattern": dict(type="dict"), + "options": dict(type="dict"), + }, ), - 'annotations': dict(type='dict'), - 'group': dict(type='str'), - 'permissions': dict( - type='dict', + "annotations": dict(type="dict"), + "group": dict(type="str"), + "permissions": dict( + type="dict", options={ - 'view': dict(type='list', elements='str', default=['admin', 'user']), - 'edit': dict(type='list', elements='str', default=['admin', 'user']) - } + "view": dict(type="list", elements="str", default=["admin", "user"]), + "edit": dict(type="list", elements="str", default=["admin", "user"]), + }, ), - 'multivalued': dict(type='bool', default=False), - 'required': dict( - type='dict', - options={ - 'roles': dict(type='list', elements='str', default=['user']) - } - ) - } + "multivalued": dict(type="bool", default=False), + "required": dict( + type="dict", options={"roles": dict(type="list", elements="str", default=["user"])} + ), + }, ), - 'groups': dict( - type='list', - elements='dict', + "groups": dict( + type="list", + elements="dict", options={ - 'name': dict(type='str', required=True), - 'display_header': dict(type='str', aliases=['displayHeader'], required=True), - 'display_description': dict(type='str', aliases=['displayDescription']), - 'annotations': dict(type='dict') - } + "name": dict(type="str", required=True), + "display_header": dict(type="str", aliases=["displayHeader"], required=True), + "display_description": dict(type="str", aliases=["displayDescription"]), + "annotations": dict(type="dict"), + }, ), - 'unmanaged_attribute_policy': dict( - type='str', - aliases=['unmanagedAttributePolicy'], - choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], - - ) - } + "unmanaged_attribute_policy": dict( + type="str", + aliases=["unmanagedAttributePolicy"], + choices=["ENABLED", "ADMIN_EDIT", "ADMIN_VIEW"], + ), + }, ) - } - ) + }, + ), ) argument_spec.update(meta_args) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), - required_together=([['auth_username', 'auth_password']]), - required_by={'refresh_token': 'auth_realm'}, - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"]] + ), + required_together=([["auth_username", "auth_password"]]), + required_by={"refresh_token": "auth_realm"}, + ) # Initialize the result object. Only "changed" seems to have special # meaning for Ansible. - result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + result = dict(changed=False, msg="", end_state={}, diff=dict(before={}, after={})) # This will include the current state of the realm userprofile if it is already # present. This is only used for diff-mode. before_realm_userprofile = {} - before_realm_userprofile['config'] = {} + before_realm_userprofile["config"] = {} # Obtain access token, initialize API try: @@ -552,18 +561,14 @@ def main(): params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] # Filter and map the parameters names that apply to the role - component_params = [ - x - for x in module.params - if x not in params_to_ignore and module.params.get(x) is not None - ] + component_params = [x for x in module.params if x not in params_to_ignore and module.params.get(x) is not None] # Build a proposed changeset from parameters given to this module changeset = {} # Build the changeset with proper JSON serialization for kc_user_profile_config - config = module.params.get('config') - changeset['config'] = {} + config = module.params.get("config") + changeset["config"] = {} # Generate a JSON payload for Keycloak Admin API from the module # parameters. Parameters that do not belong to the JSON payload (e.g. @@ -575,17 +580,17 @@ def main(): # becomes providerId. It also handles some special cases, e.g. aliases. for component_param in component_params: # realm/parent_id parameter - if component_param == 'realm' or component_param == 'parent_id': - changeset['parent_id'] = module.params.get(component_param) + if component_param == "realm" or component_param == "parent_id": + changeset["parent_id"] = module.params.get(component_param) changeset.pop(component_param, None) # complex parameters in config suboptions - elif component_param == 'config': + elif component_param == "config": for config_param in config: # special parameter kc_user_profile_config - if config_param in ('kcUserProfileConfig', 'kc_user_profile_config'): + if config_param in ("kcUserProfileConfig", "kc_user_profile_config"): config_param_org = config_param # rename parameter to be accepted by Keycloak API - config_param = 'kc.user.profile.config' + config_param = "kc.user.profile.config" # make sure no null values are passed to Keycloak API kc_user_profile_config = remove_null_values(config[config_param_org]) changeset[camel(component_param)][config_param] = [] @@ -593,21 +598,21 @@ def main(): # convert aliases to camelCase kc_user_profile_config = camel_recursive(kc_user_profile_config) # rename validations to be accepted by Keycloak API - if 'attributes' in kc_user_profile_config[0]: - for attribute in kc_user_profile_config[0]['attributes']: - if 'validations' in attribute: - if 'usernameProhibitedCharacters' in attribute['validations']: - attribute['validations']['username-prohibited-characters'] = ( - attribute['validations'].pop('usernameProhibitedCharacters') - ) - if 'upUsernameNotIdnHomograph' in attribute['validations']: - attribute['validations']['up-username-not-idn-homograph'] = ( - attribute['validations'].pop('upUsernameNotIdnHomograph') - ) - if 'personNameProhibitedCharacters' in attribute['validations']: - attribute['validations']['person-name-prohibited-characters'] = ( - attribute['validations'].pop('personNameProhibitedCharacters') - ) + if "attributes" in kc_user_profile_config[0]: + for attribute in kc_user_profile_config[0]["attributes"]: + if "validations" in attribute: + if "usernameProhibitedCharacters" in attribute["validations"]: + attribute["validations"]["username-prohibited-characters"] = attribute[ + "validations" + ].pop("usernameProhibitedCharacters") + if "upUsernameNotIdnHomograph" in attribute["validations"]: + attribute["validations"]["up-username-not-idn-homograph"] = attribute[ + "validations" + ].pop("upUsernameNotIdnHomograph") + if "personNameProhibitedCharacters" in attribute["validations"]: + attribute["validations"]["person-name-prohibited-characters"] = attribute[ + "validations" + ].pop("personNameProhibitedCharacters") changeset[camel(component_param)][config_param].append(kc_user_profile_config[0]) # usual camelCase parameters else: @@ -624,11 +629,11 @@ def main(): changeset[camel(component_param)] = new_param_value # Make it easier to refer to current module parameters - state = module.params.get('state') - enabled = module.params.get('enabled') - parent_id = module.params.get('parent_id') - provider_type = module.params.get('provider_type') - provider_id = module.params.get('provider_id') + state = module.params.get("state") + enabled = module.params.get("enabled") + parent_id = module.params.get("parent_id") + provider_type = module.params.get("provider_type") + provider_id = module.params.get("provider_id") # Make a deep copy of the changeset. This is use when determining # changes to the current state. @@ -645,90 +650,92 @@ def main(): changes = "" # This tells Ansible whether the userprofile was changed (added, removed, modified) - result['changed'] = False + result["changed"] = False # Loop through the list of components. If we encounter a component whose # name matches the value of the name parameter then assume the userprofile is # already present. for userprofile in realm_userprofiles: if provider_id == "declarative-user-profile": - userprofile_id = userprofile['id'] - changeset['id'] = userprofile_id - changeset_copy['id'] = userprofile_id + userprofile_id = userprofile["id"] + changeset["id"] = userprofile_id + changeset_copy["id"] = userprofile_id # keycloak returns kc.user.profile.config as a single JSON formatted string, so we have to deserialize it - if 'config' in userprofile and 'kc.user.profile.config' in userprofile['config']: - userprofile['config']['kc.user.profile.config'][0] = json.loads(userprofile['config']['kc.user.profile.config'][0]) + if "config" in userprofile and "kc.user.profile.config" in userprofile["config"]: + userprofile["config"]["kc.user.profile.config"][0] = json.loads( + userprofile["config"]["kc.user.profile.config"][0] + ) # Compare top-level parameters for param, value in changeset.items(): before_realm_userprofile[param] = userprofile[param] - if changeset_copy[param] != userprofile[param] and param != 'config': + if changeset_copy[param] != userprofile[param] and param != "config": changes += f"{param}: {userprofile[param]} -> {changeset_copy[param]}, " - result['changed'] = True + result["changed"] = True # Compare parameters under the "config" userprofile - for p, v in changeset_copy['config'].items(): - before_realm_userprofile['config'][p] = userprofile['config'][p] - if changeset_copy['config'][p] != userprofile['config'][p]: + for p, v in changeset_copy["config"].items(): + before_realm_userprofile["config"][p] = userprofile["config"][p] + if changeset_copy["config"][p] != userprofile["config"][p]: changes += f"config.{p}: {userprofile['config'][p]} -> {changeset_copy['config'][p]}, " - result['changed'] = True + result["changed"] = True # Check all the possible states of the resource and do what is needed to # converge current state with desired state (create, update or delete # the userprofile). # keycloak expects kc.user.profile.config as a single JSON formatted string, so we have to serialize it - if 'config' in changeset and 'kc.user.profile.config' in changeset['config']: - changeset['config']['kc.user.profile.config'][0] = json.dumps(changeset['config']['kc.user.profile.config'][0]) - if userprofile_id and state == 'present': - if result['changed']: + if "config" in changeset and "kc.user.profile.config" in changeset["config"]: + changeset["config"]["kc.user.profile.config"][0] = json.dumps(changeset["config"]["kc.user.profile.config"][0]) + if userprofile_id and state == "present": + if result["changed"]: if module._diff: - result['diff'] = dict(before=before_realm_userprofile, after=changeset_copy) + result["diff"] = dict(before=before_realm_userprofile, after=changeset_copy) if module.check_mode: - result['msg'] = f"Userprofile {provider_id} would be changed: {changes.strip(', ')}" + result["msg"] = f"Userprofile {provider_id} would be changed: {changes.strip(', ')}" else: kc.update_component(changeset, parent_id) - result['msg'] = f"Userprofile {provider_id} changed: {changes.strip(', ')}" + result["msg"] = f"Userprofile {provider_id} changed: {changes.strip(', ')}" else: - result['msg'] = f"Userprofile {provider_id} was in sync" + result["msg"] = f"Userprofile {provider_id} was in sync" - result['end_state'] = changeset_copy - elif userprofile_id and state == 'absent': + result["end_state"] = changeset_copy + elif userprofile_id and state == "absent": if module._diff: - result['diff'] = dict(before=before_realm_userprofile, after={}) + result["diff"] = dict(before=before_realm_userprofile, after={}) if module.check_mode: - result['changed'] = True - result['msg'] = f"Userprofile {provider_id} would be deleted" + result["changed"] = True + result["msg"] = f"Userprofile {provider_id} would be deleted" else: kc.delete_component(userprofile_id, parent_id) - result['changed'] = True - result['msg'] = f"Userprofile {provider_id} deleted" + result["changed"] = True + result["msg"] = f"Userprofile {provider_id} deleted" - result['end_state'] = {} - elif not userprofile_id and state == 'present': + result["end_state"] = {} + elif not userprofile_id and state == "present": if module._diff: - result['diff'] = dict(before={}, after=changeset_copy) + result["diff"] = dict(before={}, after=changeset_copy) if module.check_mode: - result['changed'] = True - result['msg'] = f"Userprofile {provider_id} would be created" + result["changed"] = True + result["msg"] = f"Userprofile {provider_id} would be created" else: kc.create_component(changeset, parent_id) - result['changed'] = True - result['msg'] = f"Userprofile {provider_id} created" + result["changed"] = True + result["msg"] = f"Userprofile {provider_id} created" - result['end_state'] = changeset_copy - elif not userprofile_id and state == 'absent': - result['changed'] = False - result['msg'] = f"Userprofile {provider_id} not present" - result['end_state'] = {} + result["end_state"] = changeset_copy + elif not userprofile_id and state == "absent": + result["changed"] = False + result["msg"] = f"Userprofile {provider_id} not present" + result["end_state"] = {} module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py index 9653ef1ad5e..c753c52e46a 100644 --- a/plugins/modules/keyring.py +++ b/plugins/modules/keyring.py @@ -103,7 +103,7 @@ def del_passphrase(module): except keyring.errors.KeyringLocked: delete_argument = ( f'echo "{quote(module.params["keyring_password"])}" | gnome-keyring-daemon --unlock\n' - f'keyring del {quote(module.params["service"])} {quote(module.params["username"])}\n' + f"keyring del {quote(module.params['service'])} {quote(module.params['username'])}\n" ) dummy, dummy, stderr = module.run_command( "dbus-run-session -- /bin/bash", @@ -133,7 +133,7 @@ def set_passphrase(module): except keyring.errors.KeyringLocked: set_argument = ( f'echo "{quote(module.params["keyring_password"])}" | gnome-keyring-daemon --unlock\n' - f'keyring set {quote(module.params["service"])} {quote(module.params["username"])}\n{quote(module.params["user_password"])}\n' + f"keyring set {quote(module.params['service'])} {quote(module.params['username'])}\n{quote(module.params['user_password'])}\n" ) dummy, dummy, stderr = module.run_command( "dbus-run-session -- /bin/bash", @@ -151,9 +151,7 @@ def get_passphrase(module): Attempt to retrieve passphrase from keyring using the Python API and fallback to using a shell. """ try: - passphrase = keyring.get_password( - module.params["service"], module.params["username"] - ) + passphrase = keyring.get_password(module.params["service"], module.params["username"]) return passphrase except keyring.errors.KeyringLocked: pass @@ -163,7 +161,7 @@ def get_passphrase(module): pass get_argument = ( f'echo "{quote(module.params["keyring_password"])}" | gnome-keyring-daemon --unlock\n' - f'keyring get {quote(module.params["service"])} {quote(module.params["username"])}\n' + f"keyring get {quote(module.params['service'])} {quote(module.params['username'])}\n" ) dummy, stdout, dummy = module.run_command( "dbus-run-session -- /bin/bash", @@ -190,12 +188,8 @@ def run_module(): service=dict(type="str", required=True), username=dict(type="str", required=True), keyring_password=dict(type="str", required=True, no_log=True), - user_password=dict( - type="str", no_log=True, aliases=["password"] - ), - state=dict( - type="str", default="present", choices=["absent", "present"] - ), + user_password=dict(type="str", no_log=True, aliases=["password"]), + state=dict(type="str", default="present", choices=["absent", "present"]), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) @@ -212,14 +206,18 @@ def run_module(): set_result = set_passphrase(module) if set_result is None: result["changed"] = True - result["msg"] = f"Passphrase has been updated for {module.params['service']}@{module.params['username']}" + result["msg"] = ( + f"Passphrase has been updated for {module.params['service']}@{module.params['username']}" + ) if set_result is not None: module.fail_json(msg=set_result) if passphrase is None: set_result = set_passphrase(module) if set_result is None: result["changed"] = True - result["msg"] = f"Passphrase has been updated for {module.params['service']}@{module.params['username']}" + result["msg"] = ( + f"Passphrase has been updated for {module.params['service']}@{module.params['username']}" + ) if set_result is not None: module.fail_json(msg=set_result) @@ -230,7 +228,9 @@ def run_module(): del_result = del_passphrase(module) if del_result is None: result["changed"] = True - result["msg"] = f"Passphrase has been removed for {module.params['service']}@{module.params['username']}" + result["msg"] = ( + f"Passphrase has been removed for {module.params['service']}@{module.params['username']}" + ) if del_result is not None: module.fail_json(msg=del_result) diff --git a/plugins/modules/keyring_info.py b/plugins/modules/keyring_info.py index cddc2f973b4..4f45af10f67 100644 --- a/plugins/modules/keyring_info.py +++ b/plugins/modules/keyring_info.py @@ -80,7 +80,7 @@ def _alternate_retrieval_method(module): get_argument = ( f'echo "{quote(module.params["keyring_password"])}" | gnome-keyring-daemon --unlock\n' - f'keyring get {quote(module.params["service"])} {quote(module.params["username"])}\n' + f"keyring get {quote(module.params['service'])} {quote(module.params['username'])}\n" ) dummy, stdout, dummy = module.run_command( "dbus-run-session -- /bin/bash", @@ -111,9 +111,7 @@ def run_module(): if not HAS_KEYRING: module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) try: - passphrase = keyring.get_password( - module.params["service"], module.params["username"] - ) + passphrase = keyring.get_password(module.params["service"], module.params["username"]) except keyring.errors.KeyringLocked: pass except keyring.errors.InitError: diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py index a7fef622fd6..0eb5a32ff70 100644 --- a/plugins/modules/kibana_plugin.py +++ b/plugins/modules/kibana_plugin.py @@ -119,10 +119,7 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -PACKAGE_STATE_MAP = dict( - present="--install", - absent="--remove" -) +PACKAGE_STATE_MAP = dict(present="--install", absent="--remove") def parse_plugin_repo(string): @@ -139,7 +136,7 @@ def parse_plugin_repo(string): # remove es- prefix for string in ("elasticsearch-", "es-"): if repo.startswith(string): - return repo[len(string):] + return repo[len(string) :] return repo @@ -151,14 +148,14 @@ def is_plugin_present(plugin_dir, working_dir): def parse_error(string): reason = "reason: " try: - return string[string.index(reason) + len(reason):].strip() + return string[string.index(reason) + len(reason) :].strip() except ValueError: return string -def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') +def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version="4.6"): + if LooseVersion(kibana_version) > LooseVersion("4.6"): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), "kibana-plugin") cmd_args = [kibana_plugin_bin, "install"] if url: cmd_args.append(url) @@ -174,7 +171,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, ki cmd_args.extend(["--timeout", timeout]) if allow_root: - cmd_args.append('--allow-root') + cmd_args.append("--allow-root") if module.check_mode: return True, " ".join(cmd_args), "check mode", "" @@ -187,15 +184,15 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, ki return True, " ".join(cmd_args), out, err -def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): - if LooseVersion(kibana_version) > LooseVersion('4.6'): - kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') +def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version="4.6"): + if LooseVersion(kibana_version) > LooseVersion("4.6"): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), "kibana-plugin") cmd_args = [kibana_plugin_bin, "remove", plugin_name] else: cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] if allow_root: - cmd_args.append('--allow-root') + cmd_args.append("--allow-root") if module.check_mode: return True, " ".join(cmd_args), "check mode", "" @@ -209,10 +206,10 @@ def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4 def get_kibana_version(module, plugin_bin, allow_root): - cmd_args = [plugin_bin, '--version'] + cmd_args = [plugin_bin, "--version"] if allow_root: - cmd_args.append('--allow-root') + cmd_args.append("--allow-root") rc, out, err = module.run_command(cmd_args) if rc != 0: @@ -247,7 +244,7 @@ def main(): force = module.params["force"] allow_root = module.params["allow_root"] - changed, cmd, out, err = False, '', '', '' + changed, cmd, out, err = False, "", "", "" kibana_version = get_kibana_version(module, plugin_bin, allow_root) @@ -271,5 +268,5 @@ def main(): module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py index b187c17b55f..094ccf6f300 100644 --- a/plugins/modules/krb_ticket.py +++ b/plugins/modules/krb_ticket.py @@ -191,61 +191,61 @@ class IPAKeytab: def __init__(self, module, **kwargs): self.module = module - self.password = kwargs['password'] - self.principal = kwargs['principal'] - self.state = kwargs['state'] - self.kdestroy_all = kwargs['kdestroy_all'] - self.cache_name = kwargs['cache_name'] - self.start_time = kwargs['start_time'] - self.renewable = kwargs['renewable'] - self.forwardable = kwargs['forwardable'] - self.proxiable = kwargs['proxiable'] - self.address_restricted = kwargs['address_restricted'] - self.canonicalization = kwargs['canonicalization'] - self.enterprise = kwargs['enterprise'] - self.renewal = kwargs['renewal'] - self.validate = kwargs['validate'] - self.keytab = kwargs['keytab'] - self.keytab_path = kwargs['keytab_path'] + self.password = kwargs["password"] + self.principal = kwargs["principal"] + self.state = kwargs["state"] + self.kdestroy_all = kwargs["kdestroy_all"] + self.cache_name = kwargs["cache_name"] + self.start_time = kwargs["start_time"] + self.renewable = kwargs["renewable"] + self.forwardable = kwargs["forwardable"] + self.proxiable = kwargs["proxiable"] + self.address_restricted = kwargs["address_restricted"] + self.canonicalization = kwargs["canonicalization"] + self.enterprise = kwargs["enterprise"] + self.renewal = kwargs["renewal"] + self.validate = kwargs["validate"] + self.keytab = kwargs["keytab"] + self.keytab_path = kwargs["keytab_path"] self.kinit = CmdRunner( module, - command='kinit', + command="kinit", arg_formats=dict( - lifetime=cmd_runner_fmt.as_opt_val('-l'), - start_time=cmd_runner_fmt.as_opt_val('-s'), - renewable=cmd_runner_fmt.as_opt_val('-r'), - forwardable=cmd_runner_fmt.as_bool('-f', '-F', ignore_none=True), - proxiable=cmd_runner_fmt.as_bool('-p', '-P', ignore_none=True), - address_restricted=cmd_runner_fmt.as_bool('-a', '-A', ignore_none=True), - anonymous=cmd_runner_fmt.as_bool('-n'), - canonicalization=cmd_runner_fmt.as_bool('-C'), - enterprise=cmd_runner_fmt.as_bool('-E'), - renewal=cmd_runner_fmt.as_bool('-R'), - validate=cmd_runner_fmt.as_bool('-v'), - keytab=cmd_runner_fmt.as_bool('-k'), - keytab_path=cmd_runner_fmt.as_func(lambda v: ['-t', v] if v else ['-i']), - cache_name=cmd_runner_fmt.as_opt_val('-c'), + lifetime=cmd_runner_fmt.as_opt_val("-l"), + start_time=cmd_runner_fmt.as_opt_val("-s"), + renewable=cmd_runner_fmt.as_opt_val("-r"), + forwardable=cmd_runner_fmt.as_bool("-f", "-F", ignore_none=True), + proxiable=cmd_runner_fmt.as_bool("-p", "-P", ignore_none=True), + address_restricted=cmd_runner_fmt.as_bool("-a", "-A", ignore_none=True), + anonymous=cmd_runner_fmt.as_bool("-n"), + canonicalization=cmd_runner_fmt.as_bool("-C"), + enterprise=cmd_runner_fmt.as_bool("-E"), + renewal=cmd_runner_fmt.as_bool("-R"), + validate=cmd_runner_fmt.as_bool("-v"), + keytab=cmd_runner_fmt.as_bool("-k"), + keytab_path=cmd_runner_fmt.as_func(lambda v: ["-t", v] if v else ["-i"]), + cache_name=cmd_runner_fmt.as_opt_val("-c"), principal=cmd_runner_fmt.as_list(), - ) + ), ) self.kdestroy = CmdRunner( module, - command='kdestroy', + command="kdestroy", arg_formats=dict( - kdestroy_all=cmd_runner_fmt.as_bool('-A'), - cache_name=cmd_runner_fmt.as_opt_val('-c'), - principal=cmd_runner_fmt.as_opt_val('-p'), - ) + kdestroy_all=cmd_runner_fmt.as_bool("-A"), + cache_name=cmd_runner_fmt.as_opt_val("-c"), + principal=cmd_runner_fmt.as_opt_val("-p"), + ), ) self.klist = CmdRunner( module, - command='klist', + command="klist", arg_formats=dict( - show_list=cmd_runner_fmt.as_bool('-l'), - ) + show_list=cmd_runner_fmt.as_bool("-l"), + ), ) def exec_kinit(self): @@ -261,10 +261,7 @@ def exec_kinit(self): def exec_kdestroy(self): params = dict(self.module.params) - with self.kdestroy( - "kdestroy_all cache_name principal", - check_rc=True - ) as ctx: + with self.kdestroy("kdestroy_all cache_name principal", check_rc=True) as ctx: rc, out, err = ctx.run(**params) return out @@ -272,10 +269,7 @@ def exec_klist(self, show_list): # Use chech_rc = False because # If no tickets present, klist command will always return rc = 1 params = dict(show_list=show_list) - with self.klist( - "show_list", - check_rc=False - ) as ctx: + with self.klist("show_list", check_rc=False) as ctx: rc, out, err = ctx.run(**params) return rc, out, err @@ -300,71 +294,70 @@ def check_ticket_present(self): def main(): arg_spec = dict( - principal=dict(type='str'), - password=dict(type='str', no_log=True), - state=dict(default='present', choices=['present', 'absent']), - kdestroy_all=dict(type='bool'), - cache_name=dict(type='str', fallback=(env_fallback, ['KRB5CCNAME'])), - lifetime=dict(type='str'), - start_time=dict(type='str'), - renewable=dict(type='str'), - forwardable=dict(type='bool'), - proxiable=dict(type='bool'), - address_restricted=dict(type='bool'), - anonymous=dict(type='bool'), - canonicalization=dict(type='bool'), - enterprise=dict(type='bool'), - renewal=dict(type='bool'), - validate=dict(type='bool'), - keytab=dict(type='bool'), - keytab_path=dict(type='path'), + principal=dict(type="str"), + password=dict(type="str", no_log=True), + state=dict(default="present", choices=["present", "absent"]), + kdestroy_all=dict(type="bool"), + cache_name=dict(type="str", fallback=(env_fallback, ["KRB5CCNAME"])), + lifetime=dict(type="str"), + start_time=dict(type="str"), + renewable=dict(type="str"), + forwardable=dict(type="bool"), + proxiable=dict(type="bool"), + address_restricted=dict(type="bool"), + anonymous=dict(type="bool"), + canonicalization=dict(type="bool"), + enterprise=dict(type="bool"), + renewal=dict(type="bool"), + validate=dict(type="bool"), + keytab=dict(type="bool"), + keytab_path=dict(type="path"), ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True, - required_by={ - 'keytab_path': 'keytab' - }, + required_by={"keytab_path": "keytab"}, required_if=[ - ('state', 'present', ('password', 'keytab_path'), True), + ("state", "present", ("password", "keytab_path"), True), ], ) - state = module.params['state'] - kdestroy_all = module.params['kdestroy_all'] - - keytab = IPAKeytab(module, - state=state, - kdestroy_all=kdestroy_all, - principal=module.params['principal'], - password=module.params['password'], - cache_name=module.params['cache_name'], - lifetime=module.params['lifetime'], - start_time=module.params['start_time'], - renewable=module.params['renewable'], - forwardable=module.params['forwardable'], - proxiable=module.params['proxiable'], - address_restricted=module.params['address_restricted'], - anonymous=module.params['anonymous'], - canonicalization=module.params['canonicalization'], - enterprise=module.params['enterprise'], - renewal=module.params['renewal'], - validate=module.params['validate'], - keytab=module.params['keytab'], - keytab_path=module.params['keytab_path'], - ) - - if module.params['keytab_path'] is not None and module.params['keytab'] is not True: + state = module.params["state"] + kdestroy_all = module.params["kdestroy_all"] + + keytab = IPAKeytab( + module, + state=state, + kdestroy_all=kdestroy_all, + principal=module.params["principal"], + password=module.params["password"], + cache_name=module.params["cache_name"], + lifetime=module.params["lifetime"], + start_time=module.params["start_time"], + renewable=module.params["renewable"], + forwardable=module.params["forwardable"], + proxiable=module.params["proxiable"], + address_restricted=module.params["address_restricted"], + anonymous=module.params["anonymous"], + canonicalization=module.params["canonicalization"], + enterprise=module.params["enterprise"], + renewal=module.params["renewal"], + validate=module.params["validate"], + keytab=module.params["keytab"], + keytab_path=module.params["keytab_path"], + ) + + if module.params["keytab_path"] is not None and module.params["keytab"] is not True: module.fail_json(msg="If keytab_path is specified then keytab parameter must be True") changed = False - if state == 'present': + if state == "present": if not keytab.check_ticket_present(): changed = True if not module.check_mode: keytab.exec_kinit() - if state == 'absent': + if state == "absent": if kdestroy_all: changed = True if not module.check_mode: @@ -377,5 +370,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py index 47777e2732a..35422aa0ded 100644 --- a/plugins/modules/launchd.py +++ b/plugins/modules/launchd.py @@ -139,11 +139,11 @@ class ServiceState: @staticmethod def to_string(state): strings = { - ServiceState.UNKNOWN: 'unknown', - ServiceState.LOADED: 'loaded', - ServiceState.STOPPED: 'stopped', - ServiceState.STARTED: 'started', - ServiceState.UNLOADED: 'unloaded' + ServiceState.UNKNOWN: "unknown", + ServiceState.LOADED: "loaded", + ServiceState.STOPPED: "stopped", + ServiceState.STARTED: "started", + ServiceState.UNLOADED: "unloaded", } return strings[state] @@ -155,15 +155,15 @@ def __init__(self, module, service, filename=None): if filename is not None: self.__filename = filename else: - self.__filename = f'{service}.plist' + self.__filename = f"{service}.plist" state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() self.__file = self.__find_service_plist(self.__filename) if self.__file is None: - msg = f'Unable to find the plist file {self.__filename} for service {self.__service}' + msg = f"Unable to find the plist file {self.__filename} for service {self.__service}" if pid is None and state == ServiceState.UNLOADED: - msg += ' and it was not found among active services' + msg += " and it was not found among active services" module.fail_json(msg=msg) self.__update(module) @@ -172,11 +172,11 @@ def __find_service_plist(filename): """Finds the plist file associated with a service""" launchd_paths = [ - os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'), - '/Library/LaunchAgents', - '/Library/LaunchDaemons', - '/System/Library/LaunchAgents', - '/System/Library/LaunchDaemons' + os.path.join(os.getenv("HOME"), "Library/LaunchAgents"), + "/Library/LaunchAgents", + "/Library/LaunchDaemons", + "/System/Library/LaunchAgents", + "/System/Library/LaunchDaemons", ] for path in launchd_paths: @@ -196,7 +196,7 @@ def __update(self, module): def __read_plist_file(self, module): service_plist = {} try: - with open(self.__file, 'rb') as plist_fp: + with open(self.__file, "rb") as plist_fp: service_plist = plistlib.load(plist_fp) except Exception as e: module.fail_json(msg=f"Failed to read plist file {self.__file} due to {e}") @@ -207,23 +207,23 @@ def __write_plist_file(self, module, service_plist=None): service_plist = {} try: - with open(self.__file, 'wb') as plist_fp: + with open(self.__file, "wb") as plist_fp: plistlib.dump(service_plist, plist_fp) except Exception as e: module.fail_json(msg=f"Failed to write to plist file {self.__file} due to {e}") def __handle_param_enabled(self, module): - if module.params['enabled'] is not None: + if module.params["enabled"] is not None: service_plist = self.__read_plist_file(module) # Enable/disable service startup at boot if requested # Launchctl does not expose functionality to set the RunAtLoad # attribute of a job definition. So we parse and modify the job # definition plist file directly for this purpose. - if module.params['enabled'] is not None: - enabled = service_plist.get('RunAtLoad', False) - if module.params['enabled'] != enabled: - service_plist['RunAtLoad'] = module.params['enabled'] + if module.params["enabled"] is not None: + enabled = service_plist.get("RunAtLoad", False) + if module.params["enabled"] != enabled: + service_plist["RunAtLoad"] = module.params["enabled"] # Update the plist with one of the changes done. if not module.check_mode: @@ -231,15 +231,15 @@ def __handle_param_enabled(self, module): self.__changed = True def __handle_param_force_stop(self, module): - if module.params['force_stop'] is not None: + if module.params["force_stop"] is not None: service_plist = self.__read_plist_file(module) # Set KeepAlive to false in case force_stop is defined to avoid # that the service gets restarted when stopping was requested. - if module.params['force_stop'] is not None: - keep_alive = service_plist.get('KeepAlive', False) - if module.params['force_stop'] and keep_alive: - service_plist['KeepAlive'] = not module.params['force_stop'] + if module.params["force_stop"] is not None: + keep_alive = service_plist.get("KeepAlive", False) + if module.params["force_stop"] and keep_alive: + service_plist["KeepAlive"] = not module.params["force_stop"] # Update the plist with one of the changes done. if not module.check_mode: @@ -260,7 +260,7 @@ def __init__(self, module, service, plist): self._module = module self._service = service self._plist = plist - self._launch = self._module.get_bin_path('launchctl', True) + self._launch = self._module.get_bin_path("launchctl", True) def run(self): """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc. @@ -276,15 +276,14 @@ def runCommand(self): def get_state(self): rc, out, err = self._launchctl("list") if rc != 0: - self._module.fail_json( - msg=f'Failed to get status of {self._launch}') + self._module.fail_json(msg=f"Failed to get status of {self._launch}") state = ServiceState.UNLOADED service_pid = "-" status_code = None for line in out.splitlines(): if line.strip(): - pid, last_exit_code, label = line.split('\t') + pid, last_exit_code, label = line.split("\t") if label.strip() == self._service: service_pid = pid status_code = last_exit_code @@ -294,12 +293,12 @@ def get_state(self): # negative of the signal which killed the job. Thus, # "-15" would indicate that the job was terminated with # SIGTERM. - if last_exit_code not in ['0', '-2', '-3', '-9', '-15']: + if last_exit_code not in ["0", "-2", "-3", "-9", "-15"]: # Something strange happened and we have no clue in # which state the service is now. Therefore we mark # the service state as UNKNOWN. state = ServiceState.UNKNOWN - elif pid != '-': + elif pid != "-": # PID seems to be an integer so we assume the service # is started. state = ServiceState.STARTED @@ -339,11 +338,15 @@ def unload(self): return self._launchctl("unload") def _launchctl(self, command): - service_or_plist = self._plist.get_file() if command in [ - 'load', 'unload'] else self._service if command in ['start', 'stop'] else "" + service_or_plist = ( + self._plist.get_file() + if command in ["load", "unload"] + else self._service + if command in ["start", "stop"] + else "" + ) - rc, out, err = self._module.run_command( - f'{self._launch} {command} {service_or_plist}') + rc, out, err = self._module.run_command(f"{self._launch} {command} {service_or_plist}") if rc != 0: msg = f"Unable to {command} '{self._service}' ({self._plist.get_file()}): '{err}'" @@ -452,66 +455,68 @@ def runCommand(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - plist=dict(type='str'), - state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']), - enabled=dict(type='bool'), - force_stop=dict(type='bool', default=False), + name=dict(type="str", required=True), + plist=dict(type="str"), + state=dict(type="str", choices=["reloaded", "restarted", "started", "stopped", "unloaded"]), + enabled=dict(type="bool"), + force_stop=dict(type="bool", default=False), ), supports_check_mode=True, required_one_of=[ - ['state', 'enabled'], + ["state", "enabled"], ], ) - service = module.params['name'] - plist_filename = module.params['plist'] - action = module.params['state'] + service = module.params["name"] + plist_filename = module.params["plist"] + action = module.params["state"] rc = 0 - out = err = '' + out = err = "" result = { - 'name': service, - 'changed': False, - 'status': {}, + "name": service, + "changed": False, + "status": {}, } # We will tailor the plist file in case one of the options # (enabled, force_stop) was specified. plist = Plist(module, service, plist_filename) - result['changed'] = plist.is_changed() + result["changed"] = plist.is_changed() # Gather information about the service to be controlled. state, pid, dummy, dummy = LaunchCtlList(module, service).run() - result['status']['previous_state'] = ServiceState.to_string(state) - result['status']['previous_pid'] = pid + result["status"]["previous_state"] = ServiceState.to_string(state) + result["status"]["previous_pid"] = pid # Map the actions to specific tasks tasks = { - 'started': LaunchCtlStart(module, service, plist), - 'stopped': LaunchCtlStop(module, service, plist), - 'restarted': LaunchCtlRestart(module, service, plist), - 'reloaded': LaunchCtlReload(module, service, plist), - 'unloaded': LaunchCtlUnload(module, service, plist) + "started": LaunchCtlStart(module, service, plist), + "stopped": LaunchCtlStop(module, service, plist), + "restarted": LaunchCtlRestart(module, service, plist), + "reloaded": LaunchCtlReload(module, service, plist), + "unloaded": LaunchCtlUnload(module, service, plist), } - status_code = '0' + status_code = "0" # Run the requested task if not module.check_mode: state, pid, status_code, err = tasks[action].run() - result['status']['current_state'] = ServiceState.to_string(state) - result['status']['current_pid'] = pid - result['status']['status_code'] = status_code - result['status']['error'] = err + result["status"]["current_state"] = ServiceState.to_string(state) + result["status"]["current_pid"] = pid + result["status"]["status_code"] = status_code + result["status"]["error"] = err - if (result['status']['current_state'] != result['status']['previous_state'] or - result['status']['current_pid'] != result['status']['previous_pid']): - result['changed'] = True + if ( + result["status"]["current_state"] != result["status"]["previous_state"] + or result["status"]["current_pid"] != result["status"]["previous_pid"] + ): + result["changed"] = True if module.check_mode: - if result['status']['current_state'] != action: - result['changed'] = True + if result["status"]["current_state"] != action: + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py index 842432575e4..2432077efa3 100644 --- a/plugins/modules/layman.py +++ b/plugins/modules/layman.py @@ -86,6 +86,7 @@ try: from layman.api import LaymanAPI from layman.config import BareConfig + HAS_LAYMAN_API = True except ImportError: LAYMAN_IMP_ERR = traceback.format_exc() @@ -95,7 +96,7 @@ from ansible.module_utils.urls import fetch_url -USERAGENT = 'ansible-httpget' +USERAGENT = "ansible-httpget" class ModuleError(Exception): @@ -103,39 +104,39 @@ class ModuleError(Exception): def init_layman(config=None): - '''Returns the initialized ``LaymanAPI``. + """Returns the initialized ``LaymanAPI``. :param config: the layman's configuration to use (optional) - ''' + """ if config is None: config = BareConfig(read_configfile=True, quietness=1) return LaymanAPI(config) def download_url(module, url, dest): - ''' + """ :param url: the URL to download :param dest: the absolute path of where to save the downloaded content to; it must be writable and not a directory :raises ModuleError - ''' + """ # Hack to add params in the form that fetch_url expects - module.params['http_agent'] = USERAGENT + module.params["http_agent"] = USERAGENT response, info = fetch_url(module, url) - if info['status'] != 200: + if info["status"] != 200: raise ModuleError(f"Failed to get {url}: {info['msg']}") try: - with open(dest, 'w') as f: + with open(dest, "w") as f: shutil.copyfileobj(response, f) except IOError as e: raise ModuleError(f"Failed to write: {e}") def install_overlay(module, name, list_url=None): - '''Installs the overlay repository. If not on the central overlays list, + """Installs the overlay repository. If not on the central overlays list, then :list_url of an alternative list must be provided. The list will be fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the ``overlay_defs`` is read from the Layman's configuration). @@ -147,7 +148,7 @@ def install_overlay(module, name, list_url=None): :returns: True if the overlay was installed, or False if already exists (i.e. nothing has changed) :raises ModuleError - ''' + """ # read Layman configuration layman_conf = BareConfig(read_configfile=True) layman = init_layman(layman_conf) @@ -161,9 +162,11 @@ def install_overlay(module, name, list_url=None): if not layman.is_repo(name): if not list_url: - raise ModuleError(f"Overlay '{name}' is not on the list of known overlays and URL of the remote list was not provided.") + raise ModuleError( + f"Overlay '{name}' is not on the list of known overlays and URL of the remote list was not provided." + ) - overlay_defs = layman_conf.get_option('overlay_defs') + overlay_defs = layman_conf.get_option("overlay_defs") dest = path.join(overlay_defs, f"{name}.xml") download_url(module, list_url, dest) @@ -178,14 +181,14 @@ def install_overlay(module, name, list_url=None): def uninstall_overlay(module, name): - '''Uninstalls the given overlay repository from the system. + """Uninstalls the given overlay repository from the system. :param name: the overlay id to uninstall :returns: True if the overlay was uninstalled, or False if doesn't exist (i.e. nothing has changed) :raises ModuleError - ''' + """ layman = init_layman() if not layman.is_installed(name): @@ -203,11 +206,11 @@ def uninstall_overlay(module, name): def sync_overlay(name): - '''Synchronizes the specified overlay repository. + """Synchronizes the specified overlay repository. :param name: the overlay repository id to sync :raises ModuleError - ''' + """ layman = init_layman() if not layman.sync(name): @@ -216,10 +219,10 @@ def sync_overlay(name): def sync_overlays(): - '''Synchronize all of the installed overlays. + """Synchronize all of the installed overlays. :raises ModuleError - ''' + """ layman = init_layman() for name in layman.get_installed(): @@ -231,25 +234,25 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - list_url=dict(aliases=['url']), - state=dict(default="present", choices=['present', 'absent', 'updated']), - validate_certs=dict(default=True, type='bool'), + list_url=dict(aliases=["url"]), + state=dict(default="present", choices=["present", "absent", "updated"]), + validate_certs=dict(default=True, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_LAYMAN_API: - module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR) + module.fail_json(msg=missing_required_lib("Layman"), exception=LAYMAN_IMP_ERR) - state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + state, name, url = (module.params[key] for key in ["state", "name", "list_url"]) changed = False try: - if state == 'present': + if state == "present": changed = install_overlay(module, name, url) - elif state == 'updated': - if name == 'ALL': + elif state == "updated": + if name == "ALL": sync_overlays() elif install_overlay(module, name, url): changed = True @@ -264,5 +267,5 @@ def main(): module.exit_json(changed=changed, name=name) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lbu.py b/plugins/modules/lbu.py index 5fb2484a589..3996db08cbc 100644 --- a/plugins/modules/lbu.py +++ b/plugins/modules/lbu.py @@ -80,19 +80,17 @@ def run_module(): module = AnsibleModule( argument_spec={ - 'commit': {'type': 'bool'}, - 'exclude': {'type': 'list', 'elements': 'str'}, - 'include': {'type': 'list', 'elements': 'str'} + "commit": {"type": "bool"}, + "exclude": {"type": "list", "elements": "str"}, + "include": {"type": "list", "elements": "str"}, }, - supports_check_mode=True + supports_check_mode=True, ) changed = False def run_lbu(*args): - code, stdout, stderr = module.run_command( - [module.get_bin_path('lbu', required=True)] + list(args) - ) + code, stdout, stderr = module.run_command([module.get_bin_path("lbu", required=True)] + list(args)) if code: module.fail_json(changed=changed, msg=stderr) return stdout @@ -100,27 +98,27 @@ def run_lbu(*args): update = False commit = False - for param in ('include', 'exclude'): + for param in ("include", "exclude"): if module.params[param]: - paths = run_lbu(param, '-l').split('\n') + paths = run_lbu(param, "-l").split("\n") for path in module.params[param]: if os.path.normpath(f"/{path}")[1:] not in paths: update = True - if module.params['commit']: - commit = update or run_lbu('status') > '' + if module.params["commit"]: + commit = update or run_lbu("status") > "" if module.check_mode: module.exit_json(changed=update or commit) if update: - for param in ('include', 'exclude'): + for param in ("include", "exclude"): if module.params[param]: run_lbu(param, *module.params[param]) changed = True if commit: - run_lbu('commit') + run_lbu("commit") changed = True module.exit_json(changed=changed) @@ -130,5 +128,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py index 3597fc5b38f..386263ad987 100644 --- a/plugins/modules/ldap_attrs.py +++ b/plugins/modules/ldap_attrs.py @@ -163,7 +163,11 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together +from ansible_collections.community.general.plugins.module_utils.ldap import ( + LdapGeneric, + gen_specs, + ldap_required_together, +) import re @@ -183,30 +187,28 @@ def __init__(self, module): LdapGeneric.__init__(self, module) # Shortcuts - self.attrs = self.module.params['attributes'] - self.state = self.module.params['state'] - self.ordered = self.module.params['ordered'] + self.attrs = self.module.params["attributes"] + self.state = self.module.params["state"] + self.ordered = self.module.params["ordered"] def _order_values(self, values): - """ Prepend X-ORDERED index numbers to attribute's values. """ + """Prepend X-ORDERED index numbers to attribute's values.""" ordered_values = [] if isinstance(values, list): for index, value in enumerate(values): - cleaned_value = re.sub(r'^\{\d+\}', '', value) + cleaned_value = re.sub(r"^\{\d+\}", "", value) ordered_values.append(f"{{{index!s}}}{cleaned_value}") return ordered_values def _normalize_values(self, values): - """ Normalize attribute's values. """ + """Normalize attribute's values.""" norm_values = [] if isinstance(values, list): if self.ordered: - norm_values = list(map(to_bytes, - self._order_values(list(map(str, - values))))) + norm_values = list(map(to_bytes, self._order_values(list(map(str, values))))) else: norm_values = list(map(to_bytes, values)) else: @@ -217,7 +219,7 @@ def _normalize_values(self, values): def add(self): modlist = [] new_attrs = {} - for name, values in self.module.params['attributes'].items(): + for name, values in self.module.params["attributes"].items(): norm_values = self._normalize_values(values) added_values = [] for value in norm_values: @@ -232,7 +234,7 @@ def delete(self): modlist = [] old_attrs = {} new_attrs = {} - for name, values in self.module.params['attributes'].items(): + for name, values in self.module.params["attributes"].items(): norm_values = self._normalize_values(values) removed_values = [] for value in norm_values: @@ -248,11 +250,10 @@ def exact(self): modlist = [] old_attrs = {} new_attrs = {} - for name, values in self.module.params['attributes'].items(): + for name, values in self.module.params["attributes"].items(): norm_values = self._normalize_values(values) try: - results = self.connection.search_s( - self.dn, ldap.SCOPE_BASE, attrlist=[name]) + results = self.connection.search_s(self.dn, ldap.SCOPE_BASE, attrlist=[name]) except ldap.LDAPError as e: self.fail(f"Cannot search for attribute {name}", e) @@ -274,7 +275,7 @@ def exact(self): return modlist, old_attrs, new_attrs def _is_value_present(self, name, value): - """ True if the target attribute has the given value. """ + """True if the target attribute has the given value.""" try: escaped_value = ldap.filter.escape_filter_chars(to_text(value)) filterstr = f"({name}={escaped_value})" @@ -286,38 +287,37 @@ def _is_value_present(self, name, value): return is_present def _is_value_absent(self, name, value): - """ True if the target attribute doesn't have the given value. """ + """True if the target attribute doesn't have the given value.""" return not self._is_value_present(name, value) def main(): module = AnsibleModule( argument_spec=gen_specs( - attributes=dict(type='dict', required=True), - ordered=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), + attributes=dict(type="dict", required=True), + ordered=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "exact", "present"]), ), supports_check_mode=True, required_together=ldap_required_together(), ) if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-ldap"), exception=LDAP_IMP_ERR) # Instantiate the LdapAttr object ldap = LdapAttrs(module) old_attrs = None new_attrs = None - state = module.params['state'] + state = module.params["state"] # Perform action - if state == 'present': + if state == "present": modlist, old_attrs, new_attrs = ldap.add() - elif state == 'absent': + elif state == "absent": modlist, old_attrs, new_attrs = ldap.delete() - elif state == 'exact': + elif state == "exact": modlist, old_attrs, new_attrs = ldap.exact() changed = False @@ -334,5 +334,5 @@ def main(): module.exit_json(changed=changed, modlist=modlist, diff={"before": old_attrs, "after": new_attrs}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ldap_entry.py b/plugins/modules/ldap_entry.py index 05242304bd2..5d95aeeb33a 100644 --- a/plugins/modules/ldap_entry.py +++ b/plugins/modules/ldap_entry.py @@ -133,7 +133,11 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together +from ansible_collections.community.general.plugins.module_utils.ldap import ( + LdapGeneric, + gen_specs, + ldap_required_together, +) LDAP_IMP_ERR = None try: @@ -151,22 +155,21 @@ def __init__(self, module): LdapGeneric.__init__(self, module) # Shortcuts - self.state = self.module.params['state'] - self.recursive = self.module.params['recursive'] + self.state = self.module.params["state"] + self.recursive = self.module.params["recursive"] # Add the objectClass into the list of attributes - self.module.params['attributes']['objectClass'] = ( - self.module.params['objectClass']) + self.module.params["attributes"]["objectClass"] = self.module.params["objectClass"] # Load attributes - if self.state == 'present': + if self.state == "present": self.attrs = self._load_attrs() def _load_attrs(self): - """ Turn attribute's value to array. """ + """Turn attribute's value to array.""" attrs = {} - for name, value in self.module.params['attributes'].items(): + for name, value in self.module.params["attributes"].items(): if isinstance(value, list): attrs[name] = list(map(to_bytes, value)) else: @@ -175,7 +178,8 @@ def _load_attrs(self): return attrs def add(self): - """ If self.dn does not exist, returns a callable that will add it. """ + """If self.dn does not exist, returns a callable that will add it.""" + def _add(): self.connection.add_s(self.dn, modlist) @@ -188,20 +192,21 @@ def _add(): return action def delete(self): - """ If self.dn exists, returns a callable that will delete either + """If self.dn exists, returns a callable that will delete either the item itself if the recursive option is not set or the whole branch - if it is. """ + if it is.""" + def _delete(): self.connection.delete_s(self.dn) def _delete_recursive(): - """ Attempt recursive deletion using the subtree-delete control. - If that fails, do it manually. """ + """Attempt recursive deletion using the subtree-delete control. + If that fails, do it manually.""" try: - subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805') + subtree_delete = ldap.controls.ValueLessRequestControl("1.2.840.113556.1.4.805") self.connection.delete_ext_s(self.dn, serverctrls=[subtree_delete]) except ldap.NOT_ALLOWED_ON_NONLEAF: - search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=('dn',)) + search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=("dn",)) search.reverse() for entry in search: self.connection.delete_s(entry[0]) @@ -230,29 +235,28 @@ def _is_entry_present(self): def main(): module = AnsibleModule( argument_spec=gen_specs( - attributes=dict(default={}, type='dict'), - objectClass=dict(type='list', elements='str'), - state=dict(default='present', choices=['present', 'absent']), - recursive=dict(default=False, type='bool'), + attributes=dict(default={}, type="dict"), + objectClass=dict(type="list", elements="str"), + state=dict(default="present", choices=["present", "absent"]), + recursive=dict(default=False, type="bool"), ), - required_if=[('state', 'present', ['objectClass'])], + required_if=[("state", "present", ["objectClass"])], supports_check_mode=True, required_together=ldap_required_together(), ) if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-ldap"), exception=LDAP_IMP_ERR) - state = module.params['state'] + state = module.params["state"] # Instantiate the LdapEntry object ldap = LdapEntry(module) # Get the action function - if state == 'present': + if state == "present": action = ldap.add() - elif state == 'absent': + elif state == "absent": action = ldap.delete() # Perform the action @@ -265,5 +269,5 @@ def main(): module.exit_json(changed=(action is not None)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py index 4fe0dc80fef..db2c6cb7c75 100644 --- a/plugins/modules/ldap_inc.py +++ b/plugins/modules/ldap_inc.py @@ -119,9 +119,13 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible_collections.community.general.plugins.module_utils import deps -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together +from ansible_collections.community.general.plugins.module_utils.ldap import ( + LdapGeneric, + gen_specs, + ldap_required_together, +) -with deps.declare("ldap", reason=missing_required_lib('python-ldap')): +with deps.declare("ldap", reason=missing_required_lib("python-ldap")): import ldap import ldap.controls.readentry @@ -130,16 +134,15 @@ class LdapInc(LdapGeneric): def __init__(self, module): LdapGeneric.__init__(self, module) # Shortcuts - self.attr = self.module.params['attribute'] - self.increment = self.module.params['increment'] - self.method = self.module.params['method'] + self.attr = self.module.params["attribute"] + self.increment = self.module.params["increment"] + self.method = self.module.params["method"] def inc_rfc4525(self): return [(ldap.MOD_INCREMENT, self.attr, [to_bytes(str(self.increment))])] def inc_legacy(self, curr_val, new_val): - return [(ldap.MOD_DELETE, self.attr, [to_bytes(curr_val)]), - (ldap.MOD_ADD, self.attr, [to_bytes(new_val)])] + return [(ldap.MOD_DELETE, self.attr, [to_bytes(curr_val)]), (ldap.MOD_ADD, self.attr, [to_bytes(new_val)])] def serverControls(self): return [ldap.controls.readentry.PostReadControl(attrList=[self.attr])] @@ -150,9 +153,9 @@ def serverControls(self): def main(): module = AnsibleModule( argument_spec=gen_specs( - attribute=dict(type='str', required=True), - increment=dict(type='int', default=1), - method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']), + attribute=dict(type="str", required=True), + increment=dict(type="int", default=1), + method=dict(type="str", default="auto", choices=["auto", "rfc4525", "legacy"]), ), supports_check_mode=True, required_together=ldap_required_together(), @@ -174,23 +177,18 @@ def main(): if mod.method != "auto": rfc4525 = mod.method == "rfc425" else: - rootDSE = mod.connection.search_ext_s( - base="", - scope=ldap.SCOPE_BASE, - attrlist=["*", "+"]) + rootDSE = mod.connection.search_ext_s(base="", scope=ldap.SCOPE_BASE, attrlist=["*", "+"]) if len(rootDSE) == 1: if to_bytes(ldap.CONTROL_POST_READ) in rootDSE[0][1]["supportedControl"] and ( - mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedFeatures"] or - mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedExtension"] + mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedFeatures"] + or mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedExtension"] ): rfc4525 = True if rfc4525: dummy, dummy, dummy, resp_ctrls = mod.connection.modify_ext_s( - dn=mod.dn, - modlist=mod.inc_rfc4525(), - serverctrls=mod.serverControls(), - clientctrls=None) + dn=mod.dn, modlist=mod.inc_rfc4525(), serverctrls=mod.serverControls(), clientctrls=None + ) if len(resp_ctrls) == 1: ret = resp_ctrls[0].entry[mod.attr][0] @@ -200,19 +198,15 @@ def main(): while tries < max_tries: tries = tries + 1 result = mod.connection.search_ext_s( - base=mod.dn, - scope=ldap.SCOPE_BASE, - filterstr=f"({mod.attr}=*)", - attrlist=[mod.attr]) + base=mod.dn, scope=ldap.SCOPE_BASE, filterstr=f"({mod.attr}=*)", attrlist=[mod.attr] + ) if len(result) != 1: module.fail_json(msg="The entry does not exist or does not contain the specified attribute.") return try: ret = str(int(result[0][1][mod.attr][0]) + mod.increment) # if the current value first arg in inc_legacy has changed then the modify will fail - mod.connection.modify_s( - dn=mod.dn, - modlist=mod.inc_legacy(result[0][1][mod.attr][0], ret)) + mod.connection.modify_s(dn=mod.dn, modlist=mod.inc_legacy(result[0][1][mod.attr][0], ret)) break except ldap.NO_SUCH_ATTRIBUTE: if tries == max_tries: @@ -221,10 +215,8 @@ def main(): else: result = mod.connection.search_ext_s( - base=mod.dn, - scope=ldap.SCOPE_BASE, - filterstr=f"({mod.attr}=*)", - attrlist=[mod.attr]) + base=mod.dn, scope=ldap.SCOPE_BASE, filterstr=f"({mod.attr}=*)", attrlist=[mod.attr] + ) if len(result) == 1: ret = str(int(result[0][1][mod.attr][0]) + mod.increment) changed = mod.increment != 0 @@ -237,5 +229,5 @@ def main(): module.exit_json(changed=changed, incremented=changed, attribute=mod.attr, value=ret, rfc4525=rfc4525) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ldap_passwd.py b/plugins/modules/ldap_passwd.py index 86cd923c95d..23826cfe461 100644 --- a/plugins/modules/ldap_passwd.py +++ b/plugins/modules/ldap_passwd.py @@ -60,7 +60,11 @@ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together +from ansible_collections.community.general.plugins.module_utils.ldap import ( + LdapGeneric, + gen_specs, + ldap_required_together, +) LDAP_IMP_ERR = None try: @@ -77,7 +81,7 @@ def __init__(self, module): LdapGeneric.__init__(self, module) # Shortcuts - self.passwd = self.module.params['passwd'] + self.passwd = self.module.params["passwd"] def passwd_check(self): try: @@ -125,8 +129,7 @@ def main(): ) if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-ldap"), exception=LDAP_IMP_ERR) ldap = LdapPasswd(module) @@ -136,5 +139,5 @@ def main(): module.exit_json(changed=ldap.passwd_set()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py index 61d37b44a9c..cc6fe4c8ff9 100644 --- a/plugins/modules/ldap_search.py +++ b/plugins/modules/ldap_search.py @@ -108,7 +108,11 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together +from ansible_collections.community.general.plugins.module_utils.ldap import ( + LdapGeneric, + gen_specs, + ldap_required_together, +) LDAP_IMP_ERR = None try: @@ -123,21 +127,20 @@ def main(): module = AnsibleModule( argument_spec=gen_specs( - dn=dict(type='str', required=True), - scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), - filter=dict(type='str', default='(objectClass=*)'), - attrs=dict(type='list', elements='str'), - schema=dict(type='bool', default=False), - page_size=dict(type='int', default=0), - base64_attributes=dict(type='list', elements='str'), + dn=dict(type="str", required=True), + scope=dict(type="str", default="base", choices=["base", "onelevel", "subordinate", "children"]), + filter=dict(type="str", default="(objectClass=*)"), + attrs=dict(type="list", elements="str"), + schema=dict(type="bool", default=False), + page_size=dict(type="int", default=0), + base64_attributes=dict(type="list", elements="str"), ), supports_check_mode=True, required_together=ldap_required_together(), ) if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-ldap"), exception=LDAP_IMP_ERR) try: LdapSearch(module).main() @@ -148,22 +151,22 @@ def main(): def _normalize_string(val, convert_to_base64): if isinstance(val, (str, bytes)): if isinstance(val, str): - val = to_bytes(val, encoding='utf-8') + val = to_bytes(val, encoding="utf-8") if convert_to_base64: val = to_text(base64.b64encode(val)) else: # See https://github.com/ansible/ansible/issues/80258#issuecomment-1477038952 for details. # We want to make sure that all strings are properly UTF-8 encoded, even if they were not, # or happened to be byte strings. - val = to_text(val, 'utf-8', errors='replace') + val = to_text(val, "utf-8", errors="replace") # See also https://github.com/ansible-collections/community.general/issues/5704. return val def _extract_entry(dn, attrs, base64_attributes): - extracted = {'dn': dn} + extracted = {"dn": dn} for attr, val in list(attrs.items()): - convert_to_base64 = '*' in base64_attributes or attr in base64_attributes + convert_to_base64 = "*" in base64_attributes or attr in base64_attributes if len(val) == 1: extracted[attr] = _normalize_string(val[0], convert_to_base64) else: @@ -175,16 +178,16 @@ class LdapSearch(LdapGeneric): def __init__(self, module): LdapGeneric.__init__(self, module) - self.filterstr = self.module.params['filter'] + self.filterstr = self.module.params["filter"] self.attrlist = [] - self.page_size = self.module.params['page_size'] + self.page_size = self.module.params["page_size"] self._load_scope() self._load_attrs() self._load_schema() - self._base64_attributes = set(self.module.params['base64_attributes'] or []) + self._base64_attributes = set(self.module.params["base64_attributes"] or []) def _load_schema(self): - self.schema = self.module.params['schema'] + self.schema = self.module.params["schema"] if self.schema: self.attrsonly = 1 else: @@ -197,10 +200,10 @@ def _load_scope(self): subordinate=ldap.SCOPE_SUBORDINATE, children=ldap.SCOPE_SUBTREE, ) - self.scope = spec[self.module.params['scope']] + self.scope = spec[self.module.params["scope"]] def _load_attrs(self): - self.attrlist = self.module.params['attrs'] or None + self.attrlist = self.module.params["attrs"] or None def main(self): results = self.perform_search() @@ -210,7 +213,7 @@ def perform_search(self): ldap_entries = [] controls = [] if self.page_size > 0: - controls.append(ldap.controls.libldap.SimplePagedResultsControl(True, size=self.page_size, cookie='')) + controls.append(ldap.controls.libldap.SimplePagedResultsControl(True, size=self.page_size, cookie="")) try: while True: response = self.connection.search_ext( @@ -228,7 +231,11 @@ def perform_search(self): ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) else: ldap_entries.append(_extract_entry(result[0], result[1], self._base64_attributes)) - cookies = [c.cookie for c in serverctrls if c.controlType == ldap.controls.libldap.SimplePagedResultsControl.controlType] + cookies = [ + c.cookie + for c in serverctrls + if c.controlType == ldap.controls.libldap.SimplePagedResultsControl.controlType + ] if self.page_size > 0 and cookies and cookies[0]: controls[0].cookie = cookies[0] else: @@ -237,5 +244,5 @@ def perform_search(self): self.module.fail_json(msg=f"Base not found: {self.dn}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py index d688e51a25f..c3ac6fa0b5f 100644 --- a/plugins/modules/librato_annotation.py +++ b/plugins/modules/librato_annotation.py @@ -112,39 +112,39 @@ def post_annotation(module): - user = module.params['user'] - api_key = module.params['api_key'] - name = module.params['name'] - title = module.params['title'] + user = module.params["user"] + api_key = module.params["api_key"] + name = module.params["name"] + title = module.params["title"] - url = f'https://metrics-api.librato.com/v1/annotations/{name}' + url = f"https://metrics-api.librato.com/v1/annotations/{name}" params = {} - params['title'] = title - - if module.params['source'] is not None: - params['source'] = module.params['source'] - if module.params['description'] is not None: - params['description'] = module.params['description'] - if module.params['start_time'] is not None: - params['start_time'] = module.params['start_time'] - if module.params['end_time'] is not None: - params['end_time'] = module.params['end_time'] - if module.params['links'] is not None: - params['links'] = module.params['links'] + params["title"] = title + + if module.params["source"] is not None: + params["source"] = module.params["source"] + if module.params["description"] is not None: + params["description"] = module.params["description"] + if module.params["start_time"] is not None: + params["start_time"] = module.params["start_time"] + if module.params["end_time"] is not None: + params["end_time"] = module.params["end_time"] + if module.params["links"] is not None: + params["links"] = module.params["links"] json_body = module.jsonify(params) headers = {} - headers['Content-Type'] = 'application/json' + headers["Content-Type"] = "application/json" # Hack send parameters the way fetch_url wants them - module.params['url_username'] = user - module.params['url_password'] = api_key + module.params["url_username"] = user + module.params["url_password"] = api_key response, info = fetch_url(module, url, data=json_body, headers=headers) - response_code = str(info['status']) - response_body = info['body'] - if info['status'] != 201: - if info['status'] >= 400: + response_code = str(info["status"]) + response_body = info["body"] + if info["status"] != 201: + if info["status"] >= 400: module.fail_json(msg=f"Request Failed. Response code: {response_code} Response body: {response_body}") else: module.fail_json(msg=f"Request Failed. Response code: {response_code}") @@ -153,7 +153,6 @@ def post_annotation(module): def main(): - module = AnsibleModule( argument_spec=dict( user=dict(required=True), @@ -162,14 +161,14 @@ def main(): title=dict(required=True), source=dict(), description=dict(), - start_time=dict(type='int'), - end_time=dict(type='int'), - links=dict(type='list', elements='dict') + start_time=dict(type="int"), + end_time=dict(type="int"), + links=dict(type="list", elements="dict"), ) ) post_annotation(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py index 55a7dbaf3b9..8bdf01fee0b 100644 --- a/plugins/modules/linode.py +++ b/plugins/modules/linode.py @@ -278,6 +278,7 @@ LINODE_IMP_ERR = None try: from linode import api as linode_api + HAS_LINODE = True except ImportError: LINODE_IMP_ERR = traceback.format_exc() @@ -287,54 +288,66 @@ def randompass(): - ''' + """ Generate a long random password that comply to Linode requirements - ''' + """ # Linode API currently requires the following: # It must contain at least two of these four character classes: # lower case letters - upper case letters - numbers - punctuation # we play it safe :) import random import string + # as of python 2.4, this reseeds the PRNG from urandom random.seed() - lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) - upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) - number = ''.join(random.choice(string.digits) for x in range(6)) - punct = ''.join(random.choice(string.punctuation) for x in range(6)) + lower = "".join(random.choice(string.ascii_lowercase) for x in range(6)) + upper = "".join(random.choice(string.ascii_uppercase) for x in range(6)) + number = "".join(random.choice(string.digits) for x in range(6)) + punct = "".join(random.choice(string.punctuation) for x in range(6)) p = lower + upper + number + punct - return ''.join(random.sample(p, len(p))) + return "".join(random.sample(p, len(p))) def getInstanceDetails(api, server): - ''' + """ Return the details of an instance, populating IPs, etc. - ''' - instance = {'id': server['LINODEID'], - 'name': server['LABEL'], - 'public': [], - 'private': []} + """ + instance = {"id": server["LINODEID"], "name": server["LABEL"], "public": [], "private": []} # Populate with ips - for ip in api.linode_ip_list(LinodeId=server['LINODEID']): - if ip['ISPUBLIC'] and 'ipv4' not in instance: - instance['ipv4'] = ip['IPADDRESS'] - instance['fqdn'] = ip['RDNS_NAME'] - if ip['ISPUBLIC']: - instance['public'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) + for ip in api.linode_ip_list(LinodeId=server["LINODEID"]): + if ip["ISPUBLIC"] and "ipv4" not in instance: + instance["ipv4"] = ip["IPADDRESS"] + instance["fqdn"] = ip["RDNS_NAME"] + if ip["ISPUBLIC"]: + instance["public"].append({"ipv4": ip["IPADDRESS"], "fqdn": ip["RDNS_NAME"], "ip_id": ip["IPADDRESSID"]}) else: - instance['private'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) + instance["private"].append({"ipv4": ip["IPADDRESS"], "fqdn": ip["RDNS_NAME"], "ip_id": ip["IPADDRESSID"]}) return instance -def linodeServers(module, api, state, name, - displaygroup, plan, additional_disks, distribution, - datacenter, kernel_id, linode_id, payment_term, password, - private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): +def linodeServers( + module, + api, + state, + name, + displaygroup, + plan, + additional_disks, + distribution, + datacenter, + kernel_id, + linode_id, + payment_term, + password, + private_ip, + ssh_pub_key, + swap, + wait, + wait_timeout, + watchdog, + **kwargs, +): instances = [] changed = False new_server = False @@ -355,7 +368,7 @@ def linodeServers(module, api, state, name, configs = api.linode_config_list(LinodeId=linode_id) # Act on the state - if state in ('active', 'present', 'started'): + if state in ("active", "present", "started"): # TODO: validate all the plan / distribution / datacenter are valid # Multi step process/validation: @@ -367,19 +380,18 @@ def linodeServers(module, api, state, name, if not servers: for arg in (name, plan, distribution, datacenter): if not arg: - module.fail_json(msg=f'{arg} is required for {state} state') + module.fail_json(msg=f"{arg} is required for {state} state") # Create linode entity new_server = True # Get size of all individually listed disks to subtract from Distribution disk - used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) + used_disk_space = 0 if additional_disks is None else sum(disk["Size"] for disk in additional_disks) try: - res = api.linode_create(DatacenterID=datacenter, PlanID=plan, - PaymentTerm=payment_term) - linode_id = res['LinodeID'] + res = api.linode_create(DatacenterID=datacenter, PlanID=plan, PaymentTerm=payment_term) + linode_id = res["LinodeID"] # Update linode Label to match name - api.linode_update(LinodeId=linode_id, Label=f'{linode_id}-{name}') + api.linode_update(LinodeId=linode_id, Label=f"{linode_id}-{name}") # Update Linode with Ansible configuration options api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) # Save server @@ -397,7 +409,7 @@ def linodeServers(module, api, state, name, if not disks: for arg in (name, linode_id, distribution): if not arg: - module.fail_json(msg=f'{arg} is required for {state} state') + module.fail_json(msg=f"{arg} is required for {state} state") # Create disks (1 from distrib, 1 for SWAP) new_server = True try: @@ -407,34 +419,41 @@ def linodeServers(module, api, state, name, if not swap: swap = 512 # Create data disk - size = servers[0]['TOTALHD'] - used_disk_space - swap + size = servers[0]["TOTALHD"] - used_disk_space - swap if ssh_pub_key: res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, rootSSHKey=ssh_pub_key, - Label=f'{name} data disk (lid: {linode_id})', - Size=size) + LinodeId=linode_id, + DistributionID=distribution, + rootPass=password, + rootSSHKey=ssh_pub_key, + Label=f"{name} data disk (lid: {linode_id})", + Size=size, + ) else: res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, + LinodeId=linode_id, + DistributionID=distribution, rootPass=password, - Label=f'{name} data disk (lid: {linode_id})', - Size=size) - jobs.append(res['JobID']) + Label=f"{name} data disk (lid: {linode_id})", + Size=size, + ) + jobs.append(res["JobID"]) # Create SWAP disk - res = api.linode_disk_create(LinodeId=linode_id, Type='swap', - Label=f'{name} swap disk (lid: {linode_id})', - Size=swap) + res = api.linode_disk_create( + LinodeId=linode_id, Type="swap", Label=f"{name} swap disk (lid: {linode_id})", Size=swap + ) # Create individually listed disks at specified size if additional_disks: for disk in additional_disks: # If a disk Type is not passed in, default to ext4 - if disk.get('Type') is None: - disk['Type'] = 'ext4' - res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) + if disk.get("Type") is None: + disk["Type"] = "ext4" + res = api.linode_disk_create( + LinodeID=linode_id, Label=disk["Label"], Size=disk["Size"], Type=disk["Type"] + ) - jobs.append(res['JobID']) + jobs.append(res["JobID"]) except Exception as e: # TODO: destroy linode ? module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) @@ -442,42 +461,43 @@ def linodeServers(module, api, state, name, if not configs: for arg in (name, linode_id, distribution): if not arg: - module.fail_json(msg=f'{arg} is required for {state} state') + module.fail_json(msg=f"{arg} is required for {state} state") # Check architecture for distrib in api.avail_distributions(): - if distrib['DISTRIBUTIONID'] != distribution: + if distrib["DISTRIBUTIONID"] != distribution: continue - arch = '32' - if distrib['IS64BIT']: - arch = '64' + arch = "32" + if distrib["IS64BIT"]: + arch = "64" break # Get latest kernel matching arch if kernel_id is not specified if not kernel_id: for kernel in api.avail_kernels(): - if not kernel['LABEL'].startswith(f'Latest {arch}'): + if not kernel["LABEL"].startswith(f"Latest {arch}"): continue - kernel_id = kernel['KERNELID'] + kernel_id = kernel["KERNELID"] break # Get disk list disks_id = [] for disk in api.linode_disk_list(LinodeId=linode_id): - if disk['TYPE'] == 'ext3': - disks_id.insert(0, str(disk['DISKID'])) + if disk["TYPE"] == "ext3": + disks_id.insert(0, str(disk["DISKID"])) continue - disks_id.append(str(disk['DISKID'])) + disks_id.append(str(disk["DISKID"])) # Trick to get the 9 items in the list while len(disks_id) < 9: - disks_id.append('') - disks_list = ','.join(disks_id) + disks_id.append("") + disks_list = ",".join(disks_id) # Create config new_server = True try: - api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, - Disklist=disks_list, Label=f'{name} config') + api.linode_config_create( + LinodeId=linode_id, KernelId=kernel_id, Disklist=disks_list, Label=f"{name} config" + ) configs = api.linode_config_list(LinodeId=linode_id) except Exception as e: module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) @@ -485,85 +505,85 @@ def linodeServers(module, api, state, name, # Start / Ensure servers are running for server in servers: # Refresh server state - server = api.linode_list(LinodeId=server['LINODEID'])[0] + server = api.linode_list(LinodeId=server["LINODEID"])[0] # Ensure existing servers are up and running, boot if necessary - if server['STATUS'] != 1: + if server["STATUS"] != 1: res = api.linode_boot(LinodeId=linode_id) - jobs.append(res['JobID']) + jobs.append(res["JobID"]) changed = True # wait here until the instances are up wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time(): # refresh the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] + server = api.linode_list(LinodeId=server["LINODEID"])[0] # status: # -2: Boot failed # 1: Running - if server['STATUS'] in (-2, 1): + if server["STATUS"] in (-2, 1): break time.sleep(5) if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg=f"Timeout waiting on {server['LABEL']} (lid: {server['LINODEID']})") # Get a fresh copy of the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - if server['STATUS'] == -2: + server = api.linode_list(LinodeId=server["LINODEID"])[0] + if server["STATUS"] == -2: module.fail_json(msg=f"{server['LABEL']} (lid: {server['LINODEID']}) failed to boot") # From now on we know the task is a success # Build instance report instance = getInstanceDetails(api, server) # depending on wait flag select the status if wait: - instance['status'] = 'Running' + instance["status"] = "Running" else: - instance['status'] = 'Starting' + instance["status"] = "Starting" # Return the root password if this is a new box and no SSH key # has been provided if new_server and not ssh_pub_key: - instance['password'] = password + instance["password"] = password instances.append(instance) - elif state in ('stopped',): + elif state in ("stopped",): if not servers: - module.fail_json(msg=f'Server (lid: {linode_id}) not found') + module.fail_json(msg=f"Server (lid: {linode_id}) not found") for server in servers: instance = getInstanceDetails(api, server) - if server['STATUS'] != 2: + if server["STATUS"] != 2: try: res = api.linode_shutdown(LinodeId=linode_id) except Exception as e: module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) - instance['status'] = 'Stopping' + instance["status"] = "Stopping" changed = True else: - instance['status'] = 'Stopped' + instance["status"] = "Stopped" instances.append(instance) - elif state in ('restarted',): + elif state in ("restarted",): if not servers: - module.fail_json(msg=f'Server (lid: {linode_id}) not found') + module.fail_json(msg=f"Server (lid: {linode_id}) not found") for server in servers: instance = getInstanceDetails(api, server) try: - res = api.linode_reboot(LinodeId=server['LINODEID']) + res = api.linode_reboot(LinodeId=server["LINODEID"]) except Exception as e: module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) - instance['status'] = 'Restarting' + instance["status"] = "Restarting" changed = True instances.append(instance) - elif state in ('absent', 'deleted'): + elif state in ("absent", "deleted"): for server in servers: instance = getInstanceDetails(api, server) try: - api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) + api.linode_delete(LinodeId=server["LINODEID"], skipChecks=True) except Exception as e: module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) - instance['status'] = 'Deleting' + instance["status"] = "Deleting" changed = True instances.append(instance) @@ -577,77 +597,80 @@ def linodeServers(module, api, state, name, def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', - choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), - api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), - name=dict(type='str', required=True), - alert_bwin_enabled=dict(type='bool'), - alert_bwin_threshold=dict(type='int'), - alert_bwout_enabled=dict(type='bool'), - alert_bwout_threshold=dict(type='int'), - alert_bwquota_enabled=dict(type='bool'), - alert_bwquota_threshold=dict(type='int'), - alert_cpu_enabled=dict(type='bool'), - alert_cpu_threshold=dict(type='int'), - alert_diskio_enabled=dict(type='bool'), - alert_diskio_threshold=dict(type='int'), - backupweeklyday=dict(type='int'), - backupwindow=dict(type='int'), - displaygroup=dict(type='str', default=''), - plan=dict(type='int'), - additional_disks=dict(type='list', elements='dict'), - distribution=dict(type='int'), - datacenter=dict(type='int'), - kernel_id=dict(type='int'), - linode_id=dict(type='int', aliases=['lid']), - payment_term=dict(type='int', default=1, choices=[1, 12, 24]), - password=dict(type='str', no_log=True), - private_ip=dict(type='bool'), - ssh_pub_key=dict(type='str'), - swap=dict(type='int', default=512), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - watchdog=dict(type='bool', default=True), + state=dict( + type="str", + default="present", + choices=["absent", "active", "deleted", "present", "restarted", "started", "stopped"], + ), + api_key=dict(type="str", no_log=True, required=True, fallback=(env_fallback, ["LINODE_API_KEY"])), + name=dict(type="str", required=True), + alert_bwin_enabled=dict(type="bool"), + alert_bwin_threshold=dict(type="int"), + alert_bwout_enabled=dict(type="bool"), + alert_bwout_threshold=dict(type="int"), + alert_bwquota_enabled=dict(type="bool"), + alert_bwquota_threshold=dict(type="int"), + alert_cpu_enabled=dict(type="bool"), + alert_cpu_threshold=dict(type="int"), + alert_diskio_enabled=dict(type="bool"), + alert_diskio_threshold=dict(type="int"), + backupweeklyday=dict(type="int"), + backupwindow=dict(type="int"), + displaygroup=dict(type="str", default=""), + plan=dict(type="int"), + additional_disks=dict(type="list", elements="dict"), + distribution=dict(type="int"), + datacenter=dict(type="int"), + kernel_id=dict(type="int"), + linode_id=dict(type="int", aliases=["lid"]), + payment_term=dict(type="int", default=1, choices=[1, 12, 24]), + password=dict(type="str", no_log=True), + private_ip=dict(type="bool"), + ssh_pub_key=dict(type="str"), + swap=dict(type="int", default=512), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), + watchdog=dict(type="bool", default=True), ), required_if=[ - ('state', 'restarted', ['linode_id']), - ('state', 'stopped', ['linode_id']), - ] + ("state", "restarted", ["linode_id"]), + ("state", "stopped", ["linode_id"]), + ], ) if not HAS_LINODE: - module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) - - state = module.params.get('state') - api_key = module.params.get('api_key') - name = module.params.get('name') - alert_bwin_enabled = module.params.get('alert_bwin_enabled') - alert_bwin_threshold = module.params.get('alert_bwin_threshold') - alert_bwout_enabled = module.params.get('alert_bwout_enabled') - alert_bwout_threshold = module.params.get('alert_bwout_threshold') - alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') - alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') - alert_cpu_enabled = module.params.get('alert_cpu_enabled') - alert_cpu_threshold = module.params.get('alert_cpu_threshold') - alert_diskio_enabled = module.params.get('alert_diskio_enabled') - alert_diskio_threshold = module.params.get('alert_diskio_threshold') - backupweeklyday = module.params.get('backupweeklyday') - backupwindow = module.params.get('backupwindow') - displaygroup = module.params.get('displaygroup') - plan = module.params.get('plan') - additional_disks = module.params.get('additional_disks') - distribution = module.params.get('distribution') - datacenter = module.params.get('datacenter') - kernel_id = module.params.get('kernel_id') - linode_id = module.params.get('linode_id') - payment_term = module.params.get('payment_term') - password = module.params.get('password') - private_ip = module.params.get('private_ip') - ssh_pub_key = module.params.get('ssh_pub_key') - swap = module.params.get('swap') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - watchdog = int(module.params.get('watchdog')) + module.fail_json(msg=missing_required_lib("linode-python"), exception=LINODE_IMP_ERR) + + state = module.params.get("state") + api_key = module.params.get("api_key") + name = module.params.get("name") + alert_bwin_enabled = module.params.get("alert_bwin_enabled") + alert_bwin_threshold = module.params.get("alert_bwin_threshold") + alert_bwout_enabled = module.params.get("alert_bwout_enabled") + alert_bwout_threshold = module.params.get("alert_bwout_threshold") + alert_bwquota_enabled = module.params.get("alert_bwquota_enabled") + alert_bwquota_threshold = module.params.get("alert_bwquota_threshold") + alert_cpu_enabled = module.params.get("alert_cpu_enabled") + alert_cpu_threshold = module.params.get("alert_cpu_threshold") + alert_diskio_enabled = module.params.get("alert_diskio_enabled") + alert_diskio_threshold = module.params.get("alert_diskio_threshold") + backupweeklyday = module.params.get("backupweeklyday") + backupwindow = module.params.get("backupwindow") + displaygroup = module.params.get("displaygroup") + plan = module.params.get("plan") + additional_disks = module.params.get("additional_disks") + distribution = module.params.get("distribution") + datacenter = module.params.get("datacenter") + kernel_id = module.params.get("kernel_id") + linode_id = module.params.get("linode_id") + payment_term = module.params.get("payment_term") + password = module.params.get("password") + private_ip = module.params.get("private_ip") + ssh_pub_key = module.params.get("ssh_pub_key") + swap = module.params.get("swap") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + watchdog = int(module.params.get("watchdog")) check_items = dict( alert_bwin_enabled=alert_bwin_enabled, @@ -673,12 +696,29 @@ def main(): except Exception as e: module.fail_json(msg=f"{e.value[0]['ERRORMESSAGE']}", exception=traceback.format_exc()) - linodeServers(module, api, state, name, - displaygroup, plan, - additional_disks, distribution, datacenter, kernel_id, linode_id, - payment_term, password, private_ip, ssh_pub_key, swap, wait, - wait_timeout, watchdog, **kwargs) + linodeServers( + module, + api, + state, + name, + displaygroup, + plan, + additional_disks, + distribution, + datacenter, + kernel_id, + linode_id, + payment_term, + password, + private_ip, + ssh_pub_key, + swap, + wait, + wait_timeout, + watchdog, + **kwargs, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py index b69a1659c10..cf9cfbeadc5 100644 --- a/plugins/modules/linode_v4.py +++ b/plugins/modules/linode_v4.py @@ -176,6 +176,7 @@ LINODE_IMP_ERR = None try: from linode_api4 import Instance, LinodeClient + HAS_LINODE_DEPENDENCY = True except ImportError: LINODE_IMP_ERR = traceback.format_exc() @@ -184,83 +185,73 @@ def create_linode(module, client, **kwargs): """Creates a Linode instance and handles return format.""" - if kwargs['root_pass'] is None: - kwargs.pop('root_pass') + if kwargs["root_pass"] is None: + kwargs.pop("root_pass") try: response = client.linode.instance_create(**kwargs) except Exception as exception: - module.fail_json(msg=f'Unable to query the Linode API. Saw: {exception}') + module.fail_json(msg=f"Unable to query the Linode API. Saw: {exception}") try: if isinstance(response, tuple): instance, root_pass = response instance_json = instance._raw_json - instance_json.update({'root_pass': root_pass}) + instance_json.update({"root_pass": root_pass}) return instance_json else: return response._raw_json except TypeError: - module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' - ' module on https://github.com/ansible-collections/community.general/issues' - ) + module.fail_json( + msg="Unable to parse Linode instance creation response. Please raise a bug against this" + " module on https://github.com/ansible-collections/community.general/issues" + ) def maybe_instance_from_label(module, client): """Try to retrieve an instance based on a label.""" try: - label = module.params['label'] + label = module.params["label"] result = client.linode.instances(Instance.label == label) return result[0] except IndexError: return None except Exception as exception: - module.fail_json(msg=f'Unable to query the Linode API. Saw: {exception}') + module.fail_json(msg=f"Unable to query the Linode API. Saw: {exception}") def initialise_module(): """Initialise the module parameter specification.""" return AnsibleModule( argument_spec=dict( - label=dict(type='str', required=True), - state=dict( - type='str', - required=True, - choices=['present', 'absent'] - ), + label=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["present", "absent"]), access_token=dict( - type='str', + type="str", required=True, no_log=True, - fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + fallback=(env_fallback, ["LINODE_ACCESS_TOKEN"]), ), - authorized_keys=dict(type='list', elements='str', no_log=False), - group=dict(type='str'), - image=dict(type='str'), - private_ip=dict(type='bool', default=False), - region=dict(type='str'), - root_pass=dict(type='str', no_log=True), - tags=dict(type='list', elements='str'), - type=dict(type='str'), - stackscript_id=dict(type='int'), - stackscript_data=dict(type='dict'), + authorized_keys=dict(type="list", elements="str", no_log=False), + group=dict(type="str"), + image=dict(type="str"), + private_ip=dict(type="bool", default=False), + region=dict(type="str"), + root_pass=dict(type="str", no_log=True), + tags=dict(type="list", elements="str"), + type=dict(type="str"), + stackscript_id=dict(type="int"), + stackscript_data=dict(type="dict"), ), supports_check_mode=False, - required_one_of=( - ['state', 'label'], - ), - required_together=( - ['region', 'image', 'type'], - ) + required_one_of=(["state", "label"],), + required_together=(["region", "image", "type"],), ) def build_client(module): """Build a LinodeClient.""" - return LinodeClient( - module.params['access_token'], - user_agent=get_user_agent('linode_v4_module') - ) + return LinodeClient(module.params["access_token"], user_agent=get_user_agent("linode_v4_module")) def main(): @@ -268,36 +259,37 @@ def main(): module = initialise_module() if not HAS_LINODE_DEPENDENCY: - module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + module.fail_json(msg=missing_required_lib("linode-api4"), exception=LINODE_IMP_ERR) client = build_client(module) instance = maybe_instance_from_label(module, client) - if module.params['state'] == 'present' and instance is not None: + if module.params["state"] == "present" and instance is not None: module.exit_json(changed=False, instance=instance._raw_json) - elif module.params['state'] == 'present' and instance is None: + elif module.params["state"] == "present" and instance is None: instance_json = create_linode( - module, client, - authorized_keys=module.params['authorized_keys'], - group=module.params['group'], - image=module.params['image'], - label=module.params['label'], - private_ip=module.params['private_ip'], - region=module.params['region'], - root_pass=module.params['root_pass'], - tags=module.params['tags'], - ltype=module.params['type'], - stackscript=module.params['stackscript_id'], - stackscript_data=module.params['stackscript_data'], + module, + client, + authorized_keys=module.params["authorized_keys"], + group=module.params["group"], + image=module.params["image"], + label=module.params["label"], + private_ip=module.params["private_ip"], + region=module.params["region"], + root_pass=module.params["root_pass"], + tags=module.params["tags"], + ltype=module.params["type"], + stackscript=module.params["stackscript_id"], + stackscript_data=module.params["stackscript_data"], ) module.exit_json(changed=True, instance=instance_json) - elif module.params['state'] == 'absent' and instance is not None: + elif module.params["state"] == "absent" and instance is not None: instance.delete() module.exit_json(changed=True, instance=instance._raw_json) - elif module.params['state'] == 'absent' and instance is None: + elif module.params["state"] == "absent" and instance is None: module.exit_json(changed=False, instance={}) diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py index 73728bc9086..c0631903ae0 100644 --- a/plugins/modules/listen_ports_facts.py +++ b/plugins/modules/listen_ports_facts.py @@ -226,8 +226,14 @@ def netStatParse(raw): pid_and_name = "" process = "" formatted_line = line.split() - protocol, recv_q, send_q, address, foreign_address, rest = \ - formatted_line[0], formatted_line[1], formatted_line[2], formatted_line[3], formatted_line[4], formatted_line[5:] + protocol, recv_q, send_q, address, foreign_address, rest = ( + formatted_line[0], + formatted_line[1], + formatted_line[2], + formatted_line[3], + formatted_line[4], + formatted_line[5:], + ) address, port = address.rsplit(":", 1) if protocol.startswith("tcp"): @@ -248,13 +254,13 @@ def netStatParse(raw): pid, name = split_pid_name(pid_name=pid_and_name) result = { - 'protocol': protocol, - 'state': state, - 'address': address, - 'foreign_address': foreign_address, - 'port': int(port), - 'name': name, - 'pid': int(pid), + "protocol": protocol, + "state": state, + "address": address, + "foreign_address": foreign_address, + "port": int(port), + "name": name, + "pid": int(pid), } if result not in results: results.append(result) @@ -270,14 +276,14 @@ def ss_parse(raw): connection. """ results = list() - regex_conns = re.compile(pattern=r'\[?(.+?)\]?:([0-9]+)$') + regex_conns = re.compile(pattern=r"\[?(.+?)\]?:([0-9]+)$") regex_pid = re.compile(pattern=r'"(.*?)",pid=(\d+)') lines = raw.splitlines() - if len(lines) == 0 or not lines[0].startswith('Netid '): + if len(lines) == 0 or not lines[0].startswith("Netid "): # unexpected stdout from ss - raise EnvironmentError(f'Unknown stdout format of `ss`: {raw}') + raise EnvironmentError(f"Unknown stdout format of `ss`: {raw}") # skip headers (-H arg is not present on e.g. Ubuntu 16) lines = lines[1:] @@ -312,80 +318,74 @@ def ss_parse(raw): port = conns.group(2) for name, pid in pids: result = { - 'protocol': protocol, - 'state': state, - 'address': address, - 'foreign_address': peer_addr_port, - 'port': int(port), - 'name': name, - 'pid': int(pid), + "protocol": protocol, + "state": state, + "address": address, + "foreign_address": peer_addr_port, + "port": int(port), + "name": name, + "pid": int(pid), } results.append(result) return results def main(): - command_args = ['-p', '-l', '-u', '-n', '-t'] + command_args = ["-p", "-l", "-u", "-n", "-t"] commands_map = { - 'netstat': { - 'args': [], - 'parse_func': netStatParse - }, - 'ss': { - 'args': [], - 'parse_func': ss_parse - }, + "netstat": {"args": [], "parse_func": netStatParse}, + "ss": {"args": [], "parse_func": ss_parse}, } module = AnsibleModule( argument_spec=dict( - command=dict(type='str', choices=list(sorted(commands_map))), - include_non_listening=dict(default=False, type='bool'), + command=dict(type="str", choices=list(sorted(commands_map))), + include_non_listening=dict(default=False, type="bool"), ), supports_check_mode=True, ) - if module.params['include_non_listening']: - command_args = ['-p', '-u', '-n', '-t', '-a'] + if module.params["include_non_listening"]: + command_args = ["-p", "-u", "-n", "-t", "-a"] - commands_map['netstat']['args'] = command_args - commands_map['ss']['args'] = command_args + commands_map["netstat"]["args"] = command_args + commands_map["ss"]["args"] = command_args - if platform.system() != 'Linux': - module.fail_json(msg='This module requires Linux.') + if platform.system() != "Linux": + module.fail_json(msg="This module requires Linux.") def getPidSTime(pid): - ps_cmd = module.get_bin_path('ps', True) - rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)]) - stime = '' + ps_cmd = module.get_bin_path("ps", True) + rc, ps_output, stderr = module.run_command([ps_cmd, "-o", "lstart", "-p", str(pid)]) + stime = "" if rc == 0: for line in ps_output.splitlines(): - if 'started' not in line: + if "started" not in line: stime = line return stime def getPidUser(pid): - ps_cmd = module.get_bin_path('ps', True) - rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)]) - user = '' + ps_cmd = module.get_bin_path("ps", True) + rc, ps_output, stderr = module.run_command([ps_cmd, "-o", "user", "-p", str(pid)]) + user = "" if rc == 0: for line in ps_output.splitlines(): - if line != 'USER': + if line != "USER": user = line return user result = { - 'changed': False, - 'ansible_facts': { - 'tcp_listen': [], - 'udp_listen': [], + "changed": False, + "ansible_facts": { + "tcp_listen": [], + "udp_listen": [], }, } try: command = None bin_path = None - if module.params['command'] is not None: - command = module.params['command'] + if module.params["command"] is not None: + command = module.params["command"] bin_path = module.get_bin_path(command, required=True) else: for c in sorted(commands_map): @@ -395,31 +395,33 @@ def getPidUser(pid): break if bin_path is None: - raise EnvironmentError(f"Unable to find any of the supported commands in PATH: {', '.join(sorted(commands_map))}") + raise EnvironmentError( + f"Unable to find any of the supported commands in PATH: {', '.join(sorted(commands_map))}" + ) # which ports are listening for connections? - args = commands_map[command]['args'] + args = commands_map[command]["args"] rc, stdout, stderr = module.run_command([bin_path] + args) if rc == 0: - parse_func = commands_map[command]['parse_func'] + parse_func = commands_map[command]["parse_func"] results = parse_func(stdout) for connection in results: # only display state and foreign_address for include_non_listening. - if not module.params['include_non_listening']: - connection.pop('state', None) - connection.pop('foreign_address', None) - connection['stime'] = getPidSTime(connection['pid']) - connection['user'] = getPidUser(connection['pid']) - if connection['protocol'].startswith('tcp'): - result['ansible_facts']['tcp_listen'].append(connection) - elif connection['protocol'].startswith('udp'): - result['ansible_facts']['udp_listen'].append(connection) + if not module.params["include_non_listening"]: + connection.pop("state", None) + connection.pop("foreign_address", None) + connection["stime"] = getPidSTime(connection["pid"]) + connection["user"] = getPidUser(connection["pid"]) + if connection["protocol"].startswith("tcp"): + result["ansible_facts"]["tcp_listen"].append(connection) + elif connection["protocol"].startswith("udp"): + result["ansible_facts"]["udp_listen"].append(connection) except (KeyError, EnvironmentError) as e: module.fail_json(msg=to_native(e)) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py index 4035a63b9eb..c8030fd550d 100644 --- a/plugins/modules/lldp.py +++ b/plugins/modules/lldp.py @@ -51,7 +51,7 @@ def gather_lldp(module): - cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] + cmd = [module.get_bin_path("lldpctl"), "-f", "keyvalue"] rc, output, err = module.run_command(cmd) if output: output_dict = {} @@ -60,7 +60,7 @@ def gather_lldp(module): final = "" for entry in lldp_entries: - if entry.startswith('lldp'): + if entry.startswith("lldp"): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] @@ -77,14 +77,14 @@ def gather_lldp(module): for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) if not isinstance(current_dict[path_component], dict): - current_dict[path_component] = {'value': current_dict[path_component]} + current_dict[path_component] = {"value": current_dict[path_component]} current_dict = current_dict[path_component] - if final in current_dict and isinstance(current_dict[final], dict) and module.params['multivalues']: + if final in current_dict and isinstance(current_dict[final], dict) and module.params["multivalues"]: current_dict = current_dict[final] - final = 'value' + final = "value" - if final not in current_dict or not module.params['multivalues']: + if final not in current_dict or not module.params["multivalues"]: current_dict[final] = value elif isinstance(current_dict[final], str): current_dict[final] = [current_dict[final], value] @@ -95,18 +95,16 @@ def gather_lldp(module): def main(): - module_args = dict( - multivalues=dict(type='bool', default=False) - ) + module_args = dict(multivalues=dict(type="bool", default=False)) module = AnsibleModule(module_args) lldp_output = gather_lldp(module) try: - data = {'lldp': lldp_output['lldp']} + data = {"lldp": lldp_output["lldp"]} module.exit_json(ansible_facts=data) except TypeError: module.fail_json(msg="lldpctl command failed. is lldpd running?") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index 3a538e6c03a..b555fe0d66a 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -105,7 +105,7 @@ class LocaleGen(StateModuleHelper): module = dict( argument_spec=dict( name=dict(type="list", elements="str", required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), + state=dict(type="str", default="present", choices=["absent", "present"]), ), supports_check_mode=True, ) @@ -132,7 +132,8 @@ def __init_module__(self): "On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from" " in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version," " please create an issue in the community.general repository", - version="13.0.0", collection_name="community.general" + version="13.0.0", + collection_name="community.general", ) else: self.do_raise(f'{VAR_LIB_LOCALES} and {ETC_LOCALE_GEN} are missing. Is the package "locales" installed?') @@ -155,11 +156,11 @@ def assert_available(self): checking either : * if the locale is present in /etc/locales.gen * or if the locale is present in /usr/share/i18n/SUPPORTED""" - regexp = r'^\s*#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$' + regexp = r"^\s*#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$" locales_available = self.MECHANISMS[self.vars.mechanism]["available"] re_compiled = re.compile(regexp) - with open(locales_available, 'r') as fd: + with open(locales_available, "r") as fd: lines = fd.readlines() res = [re_compiled.match(line) for line in lines] self.vars.set("available_lines", lines, verbosity=4) @@ -174,7 +175,9 @@ def assert_available(self): locales_not_found = self.locale_get_not_present(locales_not_found) if locales_not_found: - self.do_raise(f"The following locales you have entered are not available on your system: {', '.join(locales_not_found)}") + self.do_raise( + f"The following locales you have entered are not available on your system: {', '.join(locales_not_found)}" + ) def is_present(self): return not self.locale_get_not_present(self.vars.name) @@ -201,18 +204,18 @@ def fix_case(self, name): return name def set_locale_glibc(self, names, enabled=True): - """ Sets the state of the locale. Defaults to enabled. """ - with open(ETC_LOCALE_GEN, 'r') as fr: + """Sets the state of the locale. Defaults to enabled.""" + with open(ETC_LOCALE_GEN, "r") as fr: lines = fr.readlines() locale_regexes = [] for name in names: - search_string = rf'^#?\s*{re.escape(name)} (?P.+)' + search_string = rf"^#?\s*{re.escape(name)} (?P.+)" if enabled: - new_string = rf'{name} \g' + new_string = rf"{name} \g" else: - new_string = rf'# {name} \g' + new_string = rf"# {name} \g" re_search = re.compile(search_string) locale_regexes.append([re_search, new_string]) @@ -221,7 +224,7 @@ def set_locale_glibc(self, names, enabled=True): lines[i] = search.sub(replace, lines[i]) # Write the modified content back to the file - with open(ETC_LOCALE_GEN, 'w') as fw: + with open(ETC_LOCALE_GEN, "w") as fw: fw.writelines(lines) def apply_change_glibc(self, targetState, names): @@ -258,7 +261,7 @@ def apply_change_ubuntu_legacy(self, targetState, names): content = fr.readlines() with open(VAR_LIB_LOCALES_LOCAL, "w") as fw: for line in content: - locale, charset = line.split(' ') + locale, charset = line.split(" ") if locale not in names: fw.write(line) # Purge locales and regenerate. @@ -277,5 +280,5 @@ def main(): LocaleGen.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py index eebd743f068..85d71243382 100644 --- a/plugins/modules/logentries.py +++ b/plugins/modules/logentries.py @@ -66,7 +66,7 @@ def query_log_status(module, le_path, path, state="present"): - """ Returns whether a log is followed or not. """ + """Returns whether a log is followed or not.""" if state == "present": rc, out, err = module.run_command([le_path, "followed", path]) @@ -77,7 +77,7 @@ def query_log_status(module, le_path, path, state="present"): def follow_log(module, le_path, logs, name=None, logtype=None): - """ Follows one or more logs if not already followed. """ + """Follows one or more logs if not already followed.""" followed_count = 0 @@ -88,11 +88,11 @@ def follow_log(module, le_path, logs, name=None, logtype=None): if module.check_mode: module.exit_json(changed=True) - cmd = [le_path, 'follow', log] + cmd = [le_path, "follow", log] if name: - cmd.extend(['--name', name]) + cmd.extend(["--name", name]) if logtype: - cmd.extend(['--type', logtype]) + cmd.extend(["--type", logtype]) rc, out, err = module.run_command(cmd) if not query_log_status(module, le_path, log): @@ -107,7 +107,7 @@ def follow_log(module, le_path, logs, name=None, logtype=None): def unfollow_log(module, le_path, logs): - """ Unfollows one or more logs if followed. """ + """Unfollows one or more logs if followed.""" removed_count = 0 @@ -119,7 +119,7 @@ def unfollow_log(module, le_path, logs): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'rm', log]) + rc, out, err = module.run_command([le_path, "rm", log]) if query_log_status(module, le_path, log): module.fail_json(msg=f"failed to remove '{log}': {err.strip()}") @@ -137,13 +137,13 @@ def main(): argument_spec=dict( path=dict(required=True), state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name=dict(type='str'), - logtype=dict(type='str', aliases=['type']) + name=dict(type="str"), + logtype=dict(type="str", aliases=["type"]), ), - supports_check_mode=True + supports_check_mode=True, ) - le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + le_path = module.get_bin_path("le", True, ["/usr/local/bin"]) p = module.params @@ -152,11 +152,11 @@ def main(): logs = [_f for _f in logs if _f] if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) + follow_log(module, le_path, logs, name=p["name"], logtype=p["logtype"]) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py index 9e4a0981792..543d71af0ae 100644 --- a/plugins/modules/logentries_msg.py +++ b/plugins/modules/logentries_msg.py @@ -58,7 +58,6 @@ def send_msg(module, token, msg, api, port): - message = f"{token} {msg}\n" api_ip = socket.gethostbyname(api) @@ -76,11 +75,12 @@ def send_msg(module, token, msg, api, port): def main(): module = AnsibleModule( argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str', required=True), - api=dict(type='str', default="data.logentries.com"), - port=dict(type='int', default=80)), - supports_check_mode=True + token=dict(type="str", required=True, no_log=True), + msg=dict(type="str", required=True), + api=dict(type="str", default="data.logentries.com"), + port=dict(type="int", default=80), + ), + supports_check_mode=True, ) token = module.params["token"] @@ -98,5 +98,5 @@ def main(): module.exit_json(changed=changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py index b09cc04436b..8da8c4ec6d4 100644 --- a/plugins/modules/logstash_plugin.py +++ b/plugins/modules/logstash_plugin.py @@ -78,10 +78,7 @@ from ansible.module_utils.basic import AnsibleModule -PACKAGE_STATE_MAP = dict( - present="install", - absent="remove" -) +PACKAGE_STATE_MAP = dict(present="install", absent="remove") def is_plugin_present(module, plugin_bin, plugin_name): @@ -93,7 +90,7 @@ def is_plugin_present(module, plugin_bin, plugin_name): def parse_error(string): reason = "reason: " try: - return string[string.index(reason) + len(reason):].strip() + return string[string.index(reason) + len(reason) :].strip() except ValueError: return string @@ -146,9 +143,9 @@ def main(): plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), proxy_host=dict(), proxy_port=dict(), - version=dict() + version=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) name = module.params["name"] @@ -172,5 +169,5 @@ def main(): module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py index 8723ab7c888..4b1fc434abd 100644 --- a/plugins/modules/lvg.py +++ b/plugins/modules/lvg.py @@ -175,24 +175,26 @@ from ansible.module_utils.basic import AnsibleModule -VG_AUTOACTIVATION_OPT = '--setautoactivation' +VG_AUTOACTIVATION_OPT = "--setautoactivation" def parse_vgs(data): vgs = [] for line in data.splitlines(): - parts = line.strip().split(';') - vgs.append({ - 'name': parts[0], - 'pv_count': int(parts[1]), - 'lv_count': int(parts[2]), - }) + parts = line.strip().split(";") + vgs.append( + { + "name": parts[0], + "pv_count": int(parts[1]), + "lv_count": int(parts[2]), + } + ) return vgs def find_mapper_device_name(module, dm_device): - dmsetup_cmd = module.get_bin_path('dmsetup', True) - mapper_prefix = '/dev/mapper/' + dmsetup_cmd = module.get_bin_path("dmsetup", True) + mapper_prefix = "/dev/mapper/" rc, dm_name, err = module.run_command([dmsetup_cmd, "info", "-C", "--noheadings", "-o", "name", dm_device]) if rc != 0: module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) @@ -202,28 +204,32 @@ def find_mapper_device_name(module, dm_device): def parse_pvs(module, data): pvs = [] - dm_prefix = '/dev/dm-' + dm_prefix = "/dev/dm-" for line in data.splitlines(): - parts = line.strip().split(';') + parts = line.strip().split(";") if parts[0].startswith(dm_prefix): parts[0] = find_mapper_device_name(module, parts[0]) - pvs.append({ - 'name': parts[0], - 'vg_name': parts[1], - }) + pvs.append( + { + "name": parts[0], + "vg_name": parts[1], + } + ) return pvs def find_vg(module, vg): if not vg: return None - vgs_cmd = module.get_bin_path('vgs', True) - dummy, current_vgs, dummy = module.run_command([vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True) + vgs_cmd = module.get_bin_path("vgs", True) + dummy, current_vgs, dummy = module.run_command( + [vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True + ) vgs = parse_vgs(current_vgs) for test_vg in vgs: - if test_vg['name'] == vg: + if test_vg["name"] == vg: this_vg = test_vg break else: @@ -234,7 +240,7 @@ def find_vg(module, vg): def is_autoactivation_supported(module, vg_cmd): autoactivation_supported = False - dummy, vgchange_opts, dummy = module.run_command([vg_cmd, '--help'], check_rc=True) + dummy, vgchange_opts, dummy = module.run_command([vg_cmd, "--help"], check_rc=True) if VG_AUTOACTIVATION_OPT in vgchange_opts: autoactivation_supported = True @@ -244,36 +250,36 @@ def is_autoactivation_supported(module, vg_cmd): def activate_vg(module, vg, active): changed = False - vgchange_cmd = module.get_bin_path('vgchange', True) - vgs_cmd = module.get_bin_path('vgs', True) - vgs_fields = ['lv_attr'] + vgchange_cmd = module.get_bin_path("vgchange", True) + vgs_cmd = module.get_bin_path("vgs", True) + vgs_fields = ["lv_attr"] autoactivation_enabled = False autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgchange_cmd) if autoactivation_supported: - vgs_fields.append('autoactivation') + vgs_fields.append("autoactivation") - vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '-o', ','.join(vgs_fields), '--separator', ';', vg] + vgs_cmd_with_opts = [vgs_cmd, "--noheadings", "-o", ",".join(vgs_fields), "--separator", ";", vg] dummy, current_vg_lv_states, dummy = module.run_command(vgs_cmd_with_opts, check_rc=True) lv_active_count = 0 lv_inactive_count = 0 for line in current_vg_lv_states.splitlines(): - parts = line.strip().split(';') - if parts[0][4] == 'a': + parts = line.strip().split(";") + if parts[0][4] == "a": lv_active_count += 1 else: lv_inactive_count += 1 if autoactivation_supported: - autoactivation_enabled = autoactivation_enabled or parts[1] == 'enabled' + autoactivation_enabled = autoactivation_enabled or parts[1] == "enabled" activate_flag = None if active and lv_inactive_count > 0: - activate_flag = 'y' + activate_flag = "y" elif not active and lv_active_count > 0: - activate_flag = 'n' + activate_flag = "n" # Extra logic necessary because vgchange returns error when autoactivation is already set if autoactivation_supported: @@ -281,46 +287,56 @@ def activate_vg(module, vg, active): if module.check_mode: changed = True else: - module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'y', vg], check_rc=True) + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, "y", vg], check_rc=True) changed = True elif not active and autoactivation_enabled: if module.check_mode: changed = True else: - module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'n', vg], check_rc=True) + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, "n", vg], check_rc=True) changed = True if activate_flag is not None: if module.check_mode: changed = True else: - module.run_command([vgchange_cmd, '--activate', activate_flag, vg], check_rc=True) + module.run_command([vgchange_cmd, "--activate", activate_flag, vg], check_rc=True) changed = True return changed def append_vgcreate_options(module, state, vgoptions): - vgcreate_cmd = module.get_bin_path('vgcreate', True) + vgcreate_cmd = module.get_bin_path("vgcreate", True) autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgcreate_cmd) - if autoactivation_supported and state in ['active', 'inactive']: + if autoactivation_supported and state in ["active", "inactive"]: if VG_AUTOACTIVATION_OPT not in vgoptions: - if state == 'active': - vgoptions += [VG_AUTOACTIVATION_OPT, 'y'] + if state == "active": + vgoptions += [VG_AUTOACTIVATION_OPT, "y"] else: - vgoptions += [VG_AUTOACTIVATION_OPT, 'n'] + vgoptions += [VG_AUTOACTIVATION_OPT, "n"] def get_pv_values_for_resize(module, device): - pvdisplay_cmd = module.get_bin_path('pvdisplay', True) - pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix", "--separator", ";", "-o", "dev_size,pv_size,pe_start,vg_extent_size"] + pvdisplay_cmd = module.get_bin_path("pvdisplay", True) + pvdisplay_ops = [ + "--units", + "b", + "--columns", + "--noheadings", + "--nosuffix", + "--separator", + ";", + "-o", + "dev_size,pv_size,pe_start,vg_extent_size", + ] pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops dummy, pv_values, dummy = module.run_command(pvdisplay_cmd_device_options, check_rc=True) - values = pv_values.strip().split(';') + values = pv_values.strip().split(";") dev_size = int(values[0]) pv_size = int(values[1]) @@ -332,7 +348,7 @@ def get_pv_values_for_resize(module, device): def resize_pv(module, device): changed = False - pvresize_cmd = module.get_bin_path('pvresize', True) + pvresize_cmd = module.get_bin_path("pvresize", True) dev_size, pv_size, pe_start, vg_extent_size = get_pv_values_for_resize(module=module, device=device) if (dev_size - (pe_start + pv_size)) > vg_extent_size: @@ -352,10 +368,10 @@ def resize_pv(module, device): def reset_uuid_pv(module, device): changed = False - pvs_cmd = module.get_bin_path('pvs', True) - pvs_cmd_with_opts = [pvs_cmd, '--noheadings', '-o', 'uuid', device] - pvchange_cmd = module.get_bin_path('pvchange', True) - pvchange_cmd_with_opts = [pvchange_cmd, '-u', device] + pvs_cmd = module.get_bin_path("pvs", True) + pvs_cmd_with_opts = [pvs_cmd, "--noheadings", "-o", "uuid", device] + pvchange_cmd = module.get_bin_path("pvchange", True) + pvchange_cmd_with_opts = [pvchange_cmd, "-u", device] dummy, orig_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) @@ -366,7 +382,9 @@ def reset_uuid_pv(module, device): pvchange_rc, pvchange_out, pvchange_err = module.run_command(pvchange_cmd_with_opts) dummy, new_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) if orig_uuid.strip() == new_uuid.strip(): - module.fail_json(msg=f"PV ({device}) UUID change failed", rc=pvchange_rc, err=pvchange_err, out=pvchange_out) + module.fail_json( + msg=f"PV ({device}) UUID change failed", rc=pvchange_rc, err=pvchange_err, out=pvchange_out + ) else: changed = True @@ -375,8 +393,8 @@ def reset_uuid_pv(module, device): def reset_uuid_vg(module, vg): changed = False - vgchange_cmd = module.get_bin_path('vgchange', True) - vgchange_cmd_with_opts = [vgchange_cmd, '-u', vg] + vgchange_cmd = module.get_bin_path("vgchange", True) + vgchange_cmd_with_opts = [vgchange_cmd, "-u", vg] if module.check_mode: changed = True else: @@ -389,43 +407,43 @@ def reset_uuid_vg(module, vg): def main(): module = AnsibleModule( argument_spec=dict( - vg=dict(type='str', required=True), - pvs=dict(type='list', elements='str'), - pesize=dict(type='str', default='4'), - pv_options=dict(type='str', default=''), - pvresize=dict(type='bool', default=False), - vg_options=dict(type='str', default=''), - state=dict(type='str', default='present', choices=['absent', 'present', 'active', 'inactive']), - force=dict(type='bool', default=False), - reset_vg_uuid=dict(type='bool', default=False), - reset_pv_uuid=dict(type='bool', default=False), + vg=dict(type="str", required=True), + pvs=dict(type="list", elements="str"), + pesize=dict(type="str", default="4"), + pv_options=dict(type="str", default=""), + pvresize=dict(type="bool", default=False), + vg_options=dict(type="str", default=""), + state=dict(type="str", default="present", choices=["absent", "present", "active", "inactive"]), + force=dict(type="bool", default=False), + reset_vg_uuid=dict(type="bool", default=False), + reset_pv_uuid=dict(type="bool", default=False), remove_extra_pvs=dict(type="bool", default=True), ), required_if=[ - ['reset_pv_uuid', True, ['pvs']], + ["reset_pv_uuid", True, ["pvs"]], ], supports_check_mode=True, ) - vg = module.params['vg'] - state = module.params['state'] - force = module.boolean(module.params['force']) - pvresize = module.boolean(module.params['pvresize']) - pesize = module.params['pesize'] - pvoptions = module.params['pv_options'].split() - vgoptions = module.params['vg_options'].split() - reset_vg_uuid = module.boolean(module.params['reset_vg_uuid']) - reset_pv_uuid = module.boolean(module.params['reset_pv_uuid']) + vg = module.params["vg"] + state = module.params["state"] + force = module.boolean(module.params["force"]) + pvresize = module.boolean(module.params["pvresize"]) + pesize = module.params["pesize"] + pvoptions = module.params["pv_options"].split() + vgoptions = module.params["vg_options"].split() + reset_vg_uuid = module.boolean(module.params["reset_vg_uuid"]) + reset_pv_uuid = module.boolean(module.params["reset_pv_uuid"]) remove_extra_pvs = module.boolean(module.params["remove_extra_pvs"]) this_vg = find_vg(module=module, vg=vg) - present_state = state in ['present', 'active', 'inactive'] + present_state = state in ["present", "active", "inactive"] pvs_required = present_state and this_vg is None changed = False dev_list = [] - if module.params['pvs']: - dev_list = list(module.params['pvs']) + if module.params["pvs"]: + dev_list = list(module.params["pvs"]) elif pvs_required: module.fail_json(msg="No physical volumes given.") @@ -440,23 +458,22 @@ def main(): module.fail_json(msg=f"Device {test_dev} not found.") # get pv list - pvs_cmd = module.get_bin_path('pvs', True) + pvs_cmd = module.get_bin_path("pvs", True) if dev_list: - pvs_filter_pv_name = ' || '.join( - f'pv_name = {x}' - for x in itertools.chain(dev_list, module.params['pvs']) - ) - pvs_filter_vg_name = f'vg_name = {vg}' + pvs_filter_pv_name = " || ".join(f"pv_name = {x}" for x in itertools.chain(dev_list, module.params["pvs"])) + pvs_filter_vg_name = f"vg_name = {vg}" pvs_filter = ["--select", f"{pvs_filter_pv_name} || {pvs_filter_vg_name}"] else: pvs_filter = [] - rc, current_pvs, err = module.run_command([pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter) + rc, current_pvs, err = module.run_command( + [pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter + ) if rc != 0: module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) # check pv for devices pvs = parse_pvs(module, current_pvs) - used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + used_pvs = [pv for pv in pvs if pv["name"] in dev_list and pv["vg_name"] and pv["vg_name"] != vg] if used_pvs: module.fail_json(msg=f"Device {used_pvs[0]['name']} is already in {used_pvs[0]['vg_name']} volume group.") @@ -468,27 +485,27 @@ def main(): changed = True else: # create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) + pvcreate_cmd = module.get_bin_path("pvcreate", True) for current_dev in dev_list: - rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ["-f", str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg=f"Creating physical volume '{current_dev}' failed", rc=rc, err=err) - vgcreate_cmd = module.get_bin_path('vgcreate') - rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + vgcreate_cmd = module.get_bin_path("vgcreate") + rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ["-s", pesize, vg] + dev_list) if rc == 0: changed = True else: module.fail_json(msg=f"Creating volume group '{vg}' failed", rc=rc, err=err) else: - if state == 'absent': + if state == "absent": if module.check_mode: module.exit_json(changed=True) else: - if this_vg['lv_count'] == 0 or force: + if this_vg["lv_count"] == 0 or force: # remove VG - vgremove_cmd = module.get_bin_path('vgremove', True) + vgremove_cmd = module.get_bin_path("vgremove", True) rc, dummy, err = module.run_command([vgremove_cmd, "--force", vg]) if rc == 0: module.exit_json(changed=True) @@ -497,9 +514,9 @@ def main(): else: module.fail_json(msg=f"Refuse to remove non-empty volume group {vg} without force=true") # activate/deactivate existing VG - elif state == 'active': + elif state == "active": changed = activate_vg(module=module, vg=vg, active=True) - elif state == 'inactive': + elif state == "inactive": changed = activate_vg(module=module, vg=vg, active=False) # reset VG uuid @@ -508,7 +525,7 @@ def main(): # resize VG if dev_list: - current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + current_devs = [os.path.realpath(pv["name"]) for pv in pvs if pv["vg_name"] == vg] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) @@ -529,15 +546,15 @@ def main(): else: if devs_to_add: # create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) + pvcreate_cmd = module.get_bin_path("pvcreate", True) for current_dev in devs_to_add: - rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ["-f", str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg=f"Creating physical volume '{current_dev}' failed", rc=rc, err=err) # add PV to our VG - vgextend_cmd = module.get_bin_path('vgextend', True) + vgextend_cmd = module.get_bin_path("vgextend", True) rc, dummy, err = module.run_command([vgextend_cmd, vg] + devs_to_add) if rc == 0: changed = True @@ -546,15 +563,17 @@ def main(): # remove some PV from our VG if devs_to_remove: - vgreduce_cmd = module.get_bin_path('vgreduce', True) + vgreduce_cmd = module.get_bin_path("vgreduce", True) rc, dummy, err = module.run_command([vgreduce_cmd, "--force", vg] + devs_to_remove) if rc == 0: changed = True else: - module.fail_json(msg=f"Unable to reduce {vg} by {' '.join(devs_to_remove)}.", rc=rc, err=err) + module.fail_json( + msg=f"Unable to reduce {vg} by {' '.join(devs_to_remove)}.", rc=rc, err=err + ) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lvg_rename.py b/plugins/modules/lvg_rename.py index 01768ff0193..ebed27ff18e 100644 --- a/plugins/modules/lvg_rename.py +++ b/plugins/modules/lvg_rename.py @@ -55,20 +55,20 @@ from ansible.module_utils.basic import AnsibleModule argument_spec = dict( - vg=dict(type='str', required=True), - vg_new=dict(type='str', required=True), + vg=dict(type="str", required=True), + vg_new=dict(type="str", required=True), ) class LvgRename: def __init__(self, module): - ''' + """ Orchestrates the lvg_rename module logic. :param module: An AnsibleModule instance. - ''' + """ self.module = module - self.result = {'changed': False} + self.result = {"changed": False} self.vg_list = [] self._load_params() @@ -82,49 +82,49 @@ def run(self): if old_vg_exists: if new_vg_exists: - self.module.fail_json(msg=f'The new VG name ({self.vg_new}) is already in use.') + self.module.fail_json(msg=f"The new VG name ({self.vg_new}) is already in use.") else: self._rename_vg() else: if new_vg_exists: - self.result['msg'] = f'The new VG ({self.vg_new}) already exists, nothing to do.' + self.result["msg"] = f"The new VG ({self.vg_new}) already exists, nothing to do." self.module.exit_json(**self.result) else: - self.module.fail_json(msg=f'Both current ({self.vg}) and new ({self.vg_new}) VG are missing.') + self.module.fail_json(msg=f"Both current ({self.vg}) and new ({self.vg_new}) VG are missing.") self.module.exit_json(**self.result) def _load_params(self): """Load the parameters from the module.""" - self.vg = self.module.params['vg'] - self.vg_new = self.module.params['vg_new'] + self.vg = self.module.params["vg"] + self.vg_new = self.module.params["vg_new"] def _load_vg_list(self): """Load the VGs from the system.""" - vgs_cmd = self.module.get_bin_path('vgs', required=True) - vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '--separator', ';', '-o', 'vg_name,vg_uuid'] + vgs_cmd = self.module.get_bin_path("vgs", required=True) + vgs_cmd_with_opts = [vgs_cmd, "--noheadings", "--separator", ";", "-o", "vg_name,vg_uuid"] dummy, vg_raw_list, dummy = self.module.run_command(vgs_cmd_with_opts, check_rc=True) for vg_info in vg_raw_list.splitlines(): - vg_name, vg_uuid = vg_info.strip().split(';') + vg_name, vg_uuid = vg_info.strip().split(";") self.vg_list.append(vg_name) self.vg_list.append(vg_uuid) def _is_vg_exists(self, vg): - ''' + """ Checks VG existence by name or UUID. It removes the '/dev/' prefix before checking. :param vg: A string with the name or UUID of the VG. :returns: A boolean indicates whether the VG exists or not. - ''' + """ vg_found = False - dev_prefix = '/dev/' + dev_prefix = "/dev/" if vg.startswith(dev_prefix): - vg_id = vg[len(dev_prefix):] + vg_id = vg[len(dev_prefix) :] else: vg_id = vg @@ -135,25 +135,24 @@ def _is_vg_exists(self, vg): def _rename_vg(self): """Renames the volume group.""" - vgrename_cmd = self.module.get_bin_path('vgrename', required=True) + vgrename_cmd = self.module.get_bin_path("vgrename", required=True) if self.module._diff: - self.result['diff'] = {'before': {'vg': self.vg}, 'after': {'vg': self.vg_new}} + self.result["diff"] = {"before": {"vg": self.vg}, "after": {"vg": self.vg_new}} if self.module.check_mode: - self.result['msg'] = f"Running in check mode. The module would rename VG {self.vg} to {self.vg_new}." - self.result['changed'] = True + self.result["msg"] = f"Running in check mode. The module would rename VG {self.vg} to {self.vg_new}." + self.result["changed"] = True else: vgrename_cmd_with_opts = [vgrename_cmd, self.vg, self.vg_new] dummy, vg_rename_out, dummy = self.module.run_command(vgrename_cmd_with_opts, check_rc=True) - self.result['msg'] = vg_rename_out - self.result['changed'] = True + self.result["msg"] = vg_rename_out + self.result["changed"] = True def setup_module_object(): - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) return module @@ -163,5 +162,5 @@ def main(): lvg_rename.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py index 8f5108ef833..53a39799ac3 100644 --- a/plugins/modules/lvm_pv.py +++ b/plugins/modules/lvm_pv.py @@ -76,13 +76,13 @@ def get_pv_status(module, device): """Check if the device is already a PV.""" - cmd = ['pvs', '--noheadings', '--readonly', device] + cmd = ["pvs", "--noheadings", "--readonly", device] return module.run_command(cmd)[0] == 0 def get_pv_size(module, device): """Get current PV size in bytes.""" - cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device] + cmd = ["pvs", "--noheadings", "--nosuffix", "--units", "b", "-o", "pv_size", device] rc, out, err = module.run_command(cmd, check_rc=True) return int(out.strip()) @@ -96,17 +96,18 @@ def rescan_device(module, device): parent_device = base_device if os.path.exists(is_partition): parent_device = ( - base_device.rpartition('p')[0] if base_device.startswith('nvme') - else base_device.rstrip('0123456789') + base_device.rpartition("p")[0] if base_device.startswith("nvme") else base_device.rstrip("0123456789") ) # Determine rescan path - rescan_path = f"/sys/block/{parent_device}/device/{'rescan_controller' if base_device.startswith('nvme') else 'rescan'}" + rescan_path = ( + f"/sys/block/{parent_device}/device/{'rescan_controller' if base_device.startswith('nvme') else 'rescan'}" + ) if os.path.exists(rescan_path): try: - with open(rescan_path, 'w') as f: - f.write('1') + with open(rescan_path, "w") as f: + f.write("1") return True except IOError as e: module.warn(f"Failed to rescan device {device}: {e!s}") @@ -118,41 +119,41 @@ def rescan_device(module, device): def main(): module = AnsibleModule( argument_spec=dict( - device=dict(type='path', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - force=dict(type='bool', default=False), - resize=dict(type='bool', default=False), + device=dict(type="path", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + force=dict(type="bool", default=False), + resize=dict(type="bool", default=False), ), supports_check_mode=True, ) - device = module.params['device'] - state = module.params['state'] - force = module.params['force'] - resize = module.params['resize'] + device = module.params["device"] + state = module.params["state"] + force = module.params["force"] + resize = module.params["resize"] changed = False actions = [] # Validate device existence for present state - if state == 'present' and not os.path.exists(device): + if state == "present" and not os.path.exists(device): module.fail_json(msg=f"Device {device} not found") is_pv = get_pv_status(module, device) - if state == 'present': + if state == "present": # Create PV if needed if not is_pv: if module.check_mode: changed = True - actions.append('would be created') + actions.append("would be created") else: - cmd = ['pvcreate'] + cmd = ["pvcreate"] if force: - cmd.append('-f') + cmd.append("-f") cmd.append(device) rc, out, err = module.run_command(cmd, check_rc=True) changed = True - actions.append('created') + actions.append("created") is_pv = True # Handle resizing @@ -160,31 +161,31 @@ def main(): if module.check_mode: # In check mode, assume resize would change changed = True - actions.append('would be resized') + actions.append("would be resized") else: # Perform device rescan if each time if rescan_device(module, device): - actions.append('rescanned') + actions.append("rescanned") original_size = get_pv_size(module, device) - rc, out, err = module.run_command(['pvresize', device], check_rc=True) + rc, out, err = module.run_command(["pvresize", device], check_rc=True) new_size = get_pv_size(module, device) if new_size != original_size: changed = True - actions.append('resized') + actions.append("resized") - elif state == 'absent': + elif state == "absent": if is_pv: if module.check_mode: changed = True - actions.append('would be removed') + actions.append("would be removed") else: - cmd = ['pvremove', '-y'] + cmd = ["pvremove", "-y"] if force: - cmd.append('-ff') + cmd.append("-ff") changed = True cmd.append(device) rc, out, err = module.run_command(cmd, check_rc=True) - actions.append('removed') + actions.append("removed") # Generate final message if actions: @@ -194,5 +195,5 @@ def main(): module.exit_json(changed=changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py index 361f7410c6e..f048525fb23 100644 --- a/plugins/modules/lvm_pv_move_data.py +++ b/plugins/modules/lvm_pv_move_data.py @@ -79,11 +79,11 @@ def main(): module = AnsibleModule( argument_spec=dict( - source=dict(type='path', required=True), - destination=dict(type='path', required=True), - auto_answer=dict(type='bool', default=False), - atomic=dict(type='bool', default=True), - autobackup=dict(type='bool', default=True), + source=dict(type="path", required=True), + destination=dict(type="path", required=True), + auto_answer=dict(type="bool", default=False), + atomic=dict(type="bool", default=True), + autobackup=dict(type="bool", default=True), ), supports_check_mode=True, ) @@ -98,14 +98,14 @@ def main(): pv_pe_alloc_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_alloc_count"), pv_pe_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_count"), device=cmd_runner_fmt.as_list(), - ) + ), ) - source = module.params['source'] - destination = module.params['destination'] + source = module.params["source"] + destination = module.params["destination"] changed = False actions = [] - result = {'changed': False} + result = {"changed": False} # Validate device existence if not os.path.exists(source): @@ -155,7 +155,7 @@ def get_allocated_pe(device): allocated = get_allocated_pe(source) if allocated == 0: - actions.append('no allocated extents to move') + actions.append("no allocated extents to move") else: # Check destination has enough free space def get_total_pe(device): @@ -170,13 +170,15 @@ def get_free_pe(device): free_pe_dest = get_free_pe(destination) if free_pe_dest < allocated: module.fail_json( - msg=(f"Destination device {destination} has only {int(free_pe_dest)} free physical extents, but " - f"source device {source} has {int(allocated)} allocated extents. Not enough space.") + msg=( + f"Destination device {destination} has only {int(free_pe_dest)} free physical extents, but " + f"source device {source} has {int(allocated)} allocated extents. Not enough space." + ) ) if module.check_mode: changed = True - actions.append(f'would move data from {source} to {destination}') + actions.append(f"would move data from {source} to {destination}") else: pvmove_runner = CmdRunner( module, @@ -184,35 +186,31 @@ def get_free_pe(device): arg_formats=dict( auto_answer=cmd_runner_fmt.as_bool("-y"), atomic=cmd_runner_fmt.as_bool("--atomic"), - autobackup=cmd_runner_fmt.as_fixed("--autobackup", "y" if module.params['autobackup'] else "n"), + autobackup=cmd_runner_fmt.as_fixed("--autobackup", "y" if module.params["autobackup"] else "n"), verbosity=cmd_runner_fmt.as_func(lambda v: [f"-{'v' * v}"] if v > 0 else []), source=cmd_runner_fmt.as_list(), destination=cmd_runner_fmt.as_list(), - ) + ), ) verbosity = module._verbosity with pvmove_runner("auto_answer atomic autobackup verbosity source destination") as ctx: - rc, out, err = ctx.run( - verbosity=verbosity, - source=source, - destination=destination - ) - result['stdout'] = out - result['stderr'] = err + rc, out, err = ctx.run(verbosity=verbosity, source=source, destination=destination) + result["stdout"] = out + result["stderr"] = err changed = True - actions.append(f'moved data from {source} to {destination}') + actions.append(f"moved data from {source} to {destination}") - result['changed'] = changed - result['actions'] = actions + result["changed"] = changed + result["actions"] = actions if actions: - result['msg'] = f"PV data move: {', '.join(actions)}" + result["msg"] = f"PV data move: {', '.join(actions)}" else: - result['msg'] = f"No data to move from {source}" + result["msg"] = f"No data to move from {source}" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lvol.py b/plugins/modules/lvol.py index 27ea66985fb..f8f035875be 100644 --- a/plugins/modules/lvol.py +++ b/plugins/modules/lvol.py @@ -239,10 +239,10 @@ LVOL_ENV_VARS = dict( # make sure we use the C locale when running lvol-related commands - LANG='C', - LC_ALL='C', - LC_MESSAGES='C', - LC_CTYPE='C', + LANG="C", + LC_ALL="C", + LC_MESSAGES="C", + LC_CTYPE="C", ) @@ -253,27 +253,24 @@ def mkversion(major, minor, patch): def parse_lvs(data): lvs = [] for line in data.splitlines(): - parts = line.strip().split(';') - lvs.append({ - 'name': parts[0].replace('[', '').replace(']', ''), - 'size': float(parts[1]), - 'active': (parts[2][4] == 'a'), - 'thinpool': (parts[2][0] == 't'), - 'thinvol': (parts[2][0] == 'V'), - }) + parts = line.strip().split(";") + lvs.append( + { + "name": parts[0].replace("[", "").replace("]", ""), + "size": float(parts[1]), + "active": (parts[2][4] == "a"), + "thinpool": (parts[2][0] == "t"), + "thinvol": (parts[2][0] == "V"), + } + ) return lvs def parse_vgs(data): vgs = [] for line in data.splitlines(): - parts = line.strip().split(';') - vgs.append({ - 'name': parts[0], - 'size': float(parts[1]), - 'free': float(parts[2]), - 'ext_size': float(parts[3]) - }) + parts = line.strip().split(";") + vgs.append({"name": parts[0], "size": float(parts[1]), "free": float(parts[2]), "ext_size": float(parts[3])}) return vgs @@ -291,23 +288,21 @@ def get_lvm_version(module): def main(): module = AnsibleModule( argument_spec=dict( - vg=dict(type='str', required=True), - lv=dict(type='str'), - size=dict(type='str'), - opts=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - force=dict(type='bool', default=False), - shrink=dict(type='bool', default=True), - active=dict(type='bool', default=True), - snapshot=dict(type='str'), - pvs=dict(type='list', elements='str'), - resizefs=dict(type='bool', default=False), - thinpool=dict(type='str'), + vg=dict(type="str", required=True), + lv=dict(type="str"), + size=dict(type="str"), + opts=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + force=dict(type="bool", default=False), + shrink=dict(type="bool", default=True), + active=dict(type="bool", default=True), + snapshot=dict(type="str"), + pvs=dict(type="list", elements="str"), + resizefs=dict(type="bool", default=False), + thinpool=dict(type="str"), ), supports_check_mode=True, - required_one_of=( - ['lv', 'thinpool'], - ), + required_one_of=(["lv", "thinpool"],), ) module.run_command_environ_update = LVOL_ENV_VARS @@ -322,55 +317,55 @@ def main(): else: yesopt = [] - vg = module.params['vg'] - lv = module.params['lv'] - size = module.params['size'] - opts = shlex.split(module.params['opts'] or '') - state = module.params['state'] - force = module.boolean(module.params['force']) - shrink = module.boolean(module.params['shrink']) - active = module.boolean(module.params['active']) - resizefs = module.boolean(module.params['resizefs']) - thinpool = module.params['thinpool'] - size_opt = 'L' - size_unit = 'm' + vg = module.params["vg"] + lv = module.params["lv"] + size = module.params["size"] + opts = shlex.split(module.params["opts"] or "") + state = module.params["state"] + force = module.boolean(module.params["force"]) + shrink = module.boolean(module.params["shrink"]) + active = module.boolean(module.params["active"]) + resizefs = module.boolean(module.params["resizefs"]) + thinpool = module.params["thinpool"] + size_opt = "L" + size_unit = "m" size_operator = None - snapshot = module.params['snapshot'] - pvs = module.params['pvs'] or [] + snapshot = module.params["snapshot"] + pvs = module.params["pvs"] or [] # Add --test option when running in check-mode if module.check_mode: - test_opt = ['--test'] + test_opt = ["--test"] else: test_opt = [] if size: # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing - if size.startswith('+'): - size_operator = '+' + if size.startswith("+"): + size_operator = "+" size = size[1:] - elif size.startswith('-'): - size_operator = '-' + elif size.startswith("-"): + size_operator = "-" size = size[1:] # LVCREATE(8) does not support [+-] # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage - if '%' in size: - size_parts = size.split('%', 1) + if "%" in size: + size_parts = size.split("%", 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] - if size_whole == 'ORIGIN' and snapshot is None: + if size_whole == "ORIGIN" and snapshot is None: module.fail_json(msg="Percentage of ORIGIN supported only for snapshot volumes") - elif size_whole not in ['VG', 'PVS', 'FREE', 'ORIGIN']: + elif size_whole not in ["VG", "PVS", "FREE", "ORIGIN"]: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE|ORIGIN") - size_opt = 'l' - size_unit = '' + size_opt = "l" + size_unit = "" # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit - if '%' not in size: - if size[-1].lower() in 'bskmgtpe': + if "%" not in size: + if size[-1].lower() in "bskmgtpe": size_unit = size[-1] size = size[0:-1] @@ -382,18 +377,30 @@ def main(): module.fail_json(msg=f"Bad size specification of '{size}'") # when no unit, megabytes by default - if size_opt == 'l': - unit = 'm' + if size_opt == "l": + unit = "m" else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( - [vgs_cmd, "--noheadings", "--nosuffix", "-o", "vg_name,size,free,vg_extent_size", "--units", unit.lower(), "--separator", ";", vg]) + [ + vgs_cmd, + "--noheadings", + "--nosuffix", + "-o", + "vg_name,size,free,vg_extent_size", + "--units", + unit.lower(), + "--separator", + ";", + vg, + ] + ) if rc != 0: - if state == 'absent': + if state == "absent": module.exit_json(changed=False, stdout=f"Volume group {vg} does not exist.") else: module.fail_json(msg=f"Volume group {vg} does not exist.", rc=rc, err=err) @@ -404,10 +411,23 @@ def main(): # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - [lvs_cmd, "-a", "--noheadings", "--nosuffix", "-o", "lv_name,size,lv_attr", "--units", unit.lower(), "--separator", ";", vg]) + [ + lvs_cmd, + "-a", + "--noheadings", + "--nosuffix", + "-o", + "lv_name,size,lv_attr", + "--units", + unit.lower(), + "--separator", + ";", + vg, + ] + ) if rc != 0: - if state == 'absent': + if state == "absent": module.exit_json(changed=False, stdout=f"Volume group {vg} does not exist.") else: module.fail_json(msg=f"Volume group {vg} does not exist.", rc=rc, err=err) @@ -419,8 +439,8 @@ def main(): if snapshot: # Check snapshot pre-conditions for test_lv in lvs: - if test_lv['name'] == lv or test_lv['name'] == thinpool: - if not test_lv['thinpool'] and not thinpool: + if test_lv["name"] == lv or test_lv["name"] == thinpool: + if not test_lv["thinpool"] and not thinpool: break else: module.fail_json(msg="Snapshots of thin pool LVs are not supported.") @@ -431,7 +451,7 @@ def main(): if lv: # Check thin volume pre-conditions for test_lv in lvs: - if test_lv['name'] == thinpool: + if test_lv["name"] == thinpool: break else: module.fail_json(msg=f"Thin pool LV {thinpool} does not exist in volume group {vg}.") @@ -442,22 +462,22 @@ def main(): check_lv = lv for test_lv in lvs: - if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): + if test_lv["name"] in (check_lv, check_lv.rsplit("/", 1)[-1]): this_lv = test_lv break else: this_lv = None - msg = '' + msg = "" if this_lv is None: - if state == 'present': + if state == "present": if size_operator is not None: if size_operator == "-" or (size_whole not in ["VG", "PVS", "FREE", "ORIGIN", None]): module.fail_json(msg=f"Bad size specification of '{size_operator}{size}' for creating LV") # Require size argument except for snapshot of thin volumes if (lv or thinpool) and not size: for test_lv in lvs: - if test_lv['name'] == lv and test_lv['thinvol'] and snapshot: + if test_lv["name"] == lv and test_lv["thinvol"] and snapshot: break else: module.fail_json(msg="No size given.") @@ -471,9 +491,9 @@ def main(): cmd += ["-s", "-n", snapshot] + opts + [f"{vg}/{lv}"] elif thinpool: if lv: - if size_opt == 'l': + if size_opt == "l": module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") - size_opt = 'V' + size_opt = "V" cmd += ["-n", lv] cmd += [f"-{size_opt}", f"{size}{size_unit}"] cmd += opts + ["-T", f"{vg}/{thinpool}"] @@ -487,7 +507,7 @@ def main(): else: module.fail_json(msg=f"Creating logical volume '{lv}' failed", rc=rc, err=err) else: - if state == 'absent': + if state == "absent": # remove LV if not force: module.fail_json(msg=f"Sorry, no removal of logical volume {this_lv['name']} without force=true.") @@ -501,42 +521,44 @@ def main(): elif not size: pass - elif size_opt == 'l': + elif size_opt == "l": # Resize LV based on % value tool = None - size_free = this_vg['free'] - if size_whole == 'VG' or size_whole == 'PVS': - size_requested = size_percent * this_vg['size'] / 100 + size_free = this_vg["free"] + if size_whole == "VG" or size_whole == "PVS": + size_requested = size_percent * this_vg["size"] / 100 else: # size_whole == 'FREE': - size_requested = size_percent * this_vg['free'] / 100 + size_requested = size_percent * this_vg["free"] / 100 - if size_operator == '+': - size_requested += this_lv['size'] - elif size_operator == '-': - size_requested = this_lv['size'] - size_requested + if size_operator == "+": + size_requested += this_lv["size"] + elif size_operator == "-": + size_requested = this_lv["size"] - size_requested # According to latest documentation (LVM2-2.03.11) all tools round down - size_requested -= (size_requested % this_vg['ext_size']) + size_requested -= size_requested % this_vg["ext_size"] - if this_lv['size'] < size_requested: - if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): + if this_lv["size"] < size_requested: + if (size_free > 0) and (size_free >= (size_requested - this_lv["size"])): tool = [module.get_bin_path("lvextend", required=True)] else: module.fail_json( - msg=(f"Logical Volume {this_lv['name']} could not be extended. Not enough free space left " - f"({size_requested - this_lv['size']}{unit} required / {size_free}{unit} available)") + msg=( + f"Logical Volume {this_lv['name']} could not be extended. Not enough free space left " + f"({size_requested - this_lv['size']}{unit} required / {size_free}{unit} available)" + ) ) - elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large + elif shrink and this_lv["size"] > size_requested + this_vg["ext_size"]: # more than an extent too large if size_requested < 1: module.fail_json(msg=f"Sorry, no shrinking of {this_lv['name']} to 0 permitted.") elif not force: module.fail_json(msg=f"Sorry, no shrinking of {this_lv['name']} without force=true") else: - tool = [module.get_bin_path("lvreduce", required=True), '--force'] + tool = [module.get_bin_path("lvreduce", required=True), "--force"] if tool: if resizefs: - tool += ['--resizefs'] + tool += ["--resizefs"] cmd = tool + test_opt if size_operator: cmd += [f"-{size_opt}", f"{size_operator}{size}{size_unit}"] @@ -550,28 +572,35 @@ def main(): changed = True msg = f"Volume {this_lv['name']} resized to {size_requested}{unit}" elif "matches existing size" in err or "matches existing size" in out: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + module.exit_json(changed=False, vg=vg, lv=this_lv["name"], size=this_lv["size"]) elif "not larger than existing size" in err or "not larger than existing size" in out: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + module.exit_json( + changed=False, + vg=vg, + lv=this_lv["name"], + size=this_lv["size"], + msg="Original size is larger than requested size", + err=err, + ) else: module.fail_json(msg=f"Unable to resize {lv} to {size}{size_unit}", rc=rc, err=err) else: # resize LV based on absolute values tool = None - if float(size) > this_lv['size'] or size_operator == '+': + if float(size) > this_lv["size"] or size_operator == "+": tool = [module.get_bin_path("lvextend", required=True)] - elif shrink and float(size) < this_lv['size'] or size_operator == '-': + elif shrink and float(size) < this_lv["size"] or size_operator == "-": if float(size) == 0: module.fail_json(msg=f"Sorry, no shrinking of {this_lv['name']} to 0 permitted.") if not force: module.fail_json(msg=f"Sorry, no shrinking of {this_lv['name']} without force=true.") else: - tool = [module.get_bin_path("lvreduce", required=True), '--force'] + tool = [module.get_bin_path("lvreduce", required=True), "--force"] if tool: if resizefs: - tool += ['--resizefs'] + tool += ["--resizefs"] cmd = tool + test_opt if size_operator: cmd += [f"-{size_opt}", f"{size_operator}{size}{size_unit}"] @@ -584,9 +613,16 @@ def main(): elif rc == 0: changed = True elif "matches existing size" in err or "matches existing size" in out: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + module.exit_json(changed=False, vg=vg, lv=this_lv["name"], size=this_lv["size"]) elif "not larger than existing size" in err or "not larger than existing size" in out: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + module.exit_json( + changed=False, + vg=vg, + lv=this_lv["name"], + size=this_lv["size"], + msg="Original size is larger than requested size", + err=err, + ) else: module.fail_json(msg=f"Unable to resize {lv} to {size}{size_unit}", rc=rc, err=err) @@ -595,19 +631,23 @@ def main(): lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, dummy, err = module.run_command([lvchange_cmd, "-ay", f"{vg}/{this_lv['name']}"]) if rc == 0: - module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + module.exit_json( + changed=((not this_lv["active"]) or changed), vg=vg, lv=this_lv["name"], size=this_lv["size"] + ) else: module.fail_json(msg=f"Failed to activate logical volume {lv}", rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, dummy, err = module.run_command([lvchange_cmd, "-an", f"{vg}/{this_lv['name']}"]) if rc == 0: - module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + module.exit_json( + changed=(this_lv["active"] or changed), vg=vg, lv=this_lv["name"], size=this_lv["size"] + ) else: module.fail_json(msg=f"Failed to deactivate logical volume {lv}", rc=rc, err=err) module.exit_json(changed=changed, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py index 6326fe12294..cedff97330b 100644 --- a/plugins/modules/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -433,100 +433,79 @@ # LXC_COMPRESSION_MAP is a map of available compression types when creating # an archive of a container. LXC_COMPRESSION_MAP = { - 'gzip': { - 'extension': 'tar.tgz', - 'argument': '-czf' - }, - 'bzip2': { - 'extension': 'tar.bz2', - 'argument': '-cjf' - }, - 'none': { - 'extension': 'tar', - 'argument': '-cf' - } + "gzip": {"extension": "tar.tgz", "argument": "-czf"}, + "bzip2": {"extension": "tar.bz2", "argument": "-cjf"}, + "none": {"extension": "tar", "argument": "-cf"}, } # LXC_COMMAND_MAP is a map of variables that are available to a method based # on the state the container is in. LXC_COMMAND_MAP = { - 'create': { - 'variables': { - 'config': '--config', - 'template': '--template', - 'backing_store': '--bdev', - 'lxc_path': '--lxcpath', - 'lv_name': '--lvname', - 'vg_name': '--vgname', - 'thinpool': '--thinpool', - 'fs_type': '--fstype', - 'fs_size': '--fssize', - 'directory': '--dir', - 'zfs_root': '--zfsroot' + "create": { + "variables": { + "config": "--config", + "template": "--template", + "backing_store": "--bdev", + "lxc_path": "--lxcpath", + "lv_name": "--lvname", + "vg_name": "--vgname", + "thinpool": "--thinpool", + "fs_type": "--fstype", + "fs_size": "--fssize", + "directory": "--dir", + "zfs_root": "--zfsroot", } }, - 'clone': { - 'variables-lxc-copy': { - 'backing_store': '--backingstorage', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--name', - 'clone_name': '--newname' + "clone": { + "variables-lxc-copy": { + "backing_store": "--backingstorage", + "lxc_path": "--lxcpath", + "fs_size": "--fssize", + "name": "--name", + "clone_name": "--newname", }, # lxc-clone is deprecated in favor of lxc-copy - 'variables-lxc-clone': { - 'backing_store': '--backingstore', - 'lxc_path': '--lxcpath', - 'fs_size': '--fssize', - 'name': '--orig', - 'clone_name': '--new' - } - } + "variables-lxc-clone": { + "backing_store": "--backingstore", + "lxc_path": "--lxcpath", + "fs_size": "--fssize", + "name": "--orig", + "clone_name": "--new", + }, + }, } # LXC_BACKING_STORE is a map of available storage backends and options that # are incompatible with the given storage backend. LXC_BACKING_STORE = { - 'dir': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ], - 'lvm': [ - 'zfs_root' - ], - 'btrfs': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' - ], - 'loop': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root' - ], - 'overlayfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' - ], - 'zfs': [ - 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' - ] + "dir": ["lv_name", "vg_name", "fs_type", "fs_size", "thinpool"], + "lvm": ["zfs_root"], + "btrfs": ["lv_name", "vg_name", "thinpool", "zfs_root", "fs_type", "fs_size"], + "loop": ["lv_name", "vg_name", "thinpool", "zfs_root"], + "overlayfs": ["lv_name", "vg_name", "fs_type", "fs_size", "thinpool", "zfs_root"], + "zfs": ["lv_name", "vg_name", "fs_type", "fs_size", "thinpool"], } # LXC_LOGGING_LEVELS is a map of available log levels LXC_LOGGING_LEVELS = { - 'INFO': ['info', 'INFO', 'Info'], - 'ERROR': ['error', 'ERROR', 'Error'], - 'DEBUG': ['debug', 'DEBUG', 'Debug'] + "INFO": ["info", "INFO", "Info"], + "ERROR": ["error", "ERROR", "Error"], + "DEBUG": ["debug", "DEBUG", "Debug"], } # LXC_ANSIBLE_STATES is a map of states that contain values of methods used # when a particular state is evoked. LXC_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen', - 'clone': '_clone' + "started": "_started", + "stopped": "_stopped", + "restarted": "_restarted", + "absent": "_destroyed", + "frozen": "_frozen", + "clone": "_clone", } @@ -559,30 +538,26 @@ def create_script(command): :type command: ``str`` """ - (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') - f = os.fdopen(fd, 'wb') + (fd, script_file) = tempfile.mkstemp(prefix="lxc-attach-script") + f = os.fdopen(fd, "wb") try: - f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) + f.write(to_bytes(ATTACH_TEMPLATE % {"container_command": command}, errors="surrogate_or_strict")) f.flush() finally: f.close() # Ensure the script is executable. - os.chmod(script_file, int('0700', 8)) + os.chmod(script_file, int("0700", 8)) # Output log file. - stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') + stdout_file = os.fdopen(tempfile.mkstemp(prefix="lxc-attach-script-log")[0], "ab") # Error log file. - stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') + stderr_file = os.fdopen(tempfile.mkstemp(prefix="lxc-attach-script-err")[0], "ab") # Execute the script command. try: - subprocess.Popen( - [script_file], - stdout=stdout_file, - stderr=stderr_file - ).communicate() + subprocess.Popen([script_file], stdout=stdout_file, stderr=stderr_file).communicate() finally: # Close the log files. stderr_file.close() @@ -600,11 +575,11 @@ def __init__(self, module): :type module: ``object`` """ self.module = module - self.state = self.module.params['state'] + self.state = self.module.params["state"] self.state_change = False self.lxc_vg = None - self.lxc_path = self.module.params['lxc_path'] - self.container_name = self.module.params['name'] + self.lxc_path = self.module.params["lxc_path"] + self.container_name = self.module.params["name"] self.container = self.get_container_bind() self.archive_info = None self.clone_info = None @@ -621,7 +596,7 @@ def _roundup(num): :returns: Rounded up number. :rtype: ``int`` """ - num, part = str(num).split('.') + num, part = str(num).split(".") num = int(num) if int(part) != 0: num += 1 @@ -665,15 +640,11 @@ def _get_vars(self, variables): # Remove incompatible storage backend options. variables = variables.copy() - for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + for v in LXC_BACKING_STORE[self.module.params["backing_store"]]: variables.pop(v, None) - false_values = BOOLEANS_FALSE.union([None, '']) - result = { - v: self.module.params[k] - for k, v in variables.items() - if self.module.params[k] not in false_values - } + false_values = BOOLEANS_FALSE.union([None, ""]) + result = {v: self.module.params[k] for k, v in variables.items() if self.module.params[k] not in false_values} return result def _config(self): @@ -684,26 +655,26 @@ def _config(self): restart the container upon completion. """ - _container_config = self.module.params['container_config'] + _container_config = self.module.params["container_config"] if not _container_config: return False container_config_file = self.container.config_file_name - with open(container_config_file, 'rb') as f: - container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) + with open(container_config_file, "rb") as f: + container_config = to_text(f.read(), errors="surrogate_or_strict").splitlines(True) - parsed_options = [i.split('=', 1) for i in _container_config] + parsed_options = [i.split("=", 1) for i in _container_config] config_change = False for key, value in parsed_options: key = key.strip() value = value.strip() - new_entry = f'{key} = {value}\n' - keyre = re.compile(rf'{key}(\s+)?=') + new_entry = f"{key} = {value}\n" + keyre = re.compile(rf"{key}(\s+)?=") for option_line in container_config: # Look for key in config if keyre.match(option_line): - dummy, _value = option_line.split('=', 1) - config_value = ' '.join(_value.split()) + dummy, _value = option_line.split("=", 1) + config_value = " ".join(_value.split()) line_index = container_config.index(option_line) # If the sanitized values don't match replace them if value != config_value: @@ -720,16 +691,16 @@ def _config(self): # If the config changed restart the container. if config_change: container_state = self._get_state() - if container_state != 'stopped': + if container_state != "stopped": self.container.stop() - with open(container_config_file, 'wb') as f: - f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) + with open(container_config_file, "wb") as f: + f.writelines([to_bytes(line, errors="surrogate_or_strict") for line in container_config]) self.state_change = True - if container_state == 'running': + if container_state == "running": self._container_startup() - elif container_state == 'frozen': + elif container_state == "frozen": self._container_startup() self.container.freeze() @@ -751,52 +722,45 @@ def _container_create_clone(self): # Ensure that the state of the original container is stopped container_state = self._get_state() - if container_state != 'stopped': + if container_state != "stopped": self.state_change = True self.container.stop() # lxc-clone is deprecated in favor of lxc-copy - clone_vars = 'variables-lxc-copy' - clone_cmd = self.module.get_bin_path('lxc-copy') + clone_vars = "variables-lxc-copy" + clone_cmd = self.module.get_bin_path("lxc-copy") if not clone_cmd: - clone_vars = 'variables-lxc-clone' - clone_cmd = self.module.get_bin_path('lxc-clone', True) + clone_vars = "variables-lxc-clone" + clone_cmd = self.module.get_bin_path("lxc-clone", True) build_command = [ clone_cmd, ] build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['clone'][clone_vars] - ), - build_command=build_command + variables_dict=self._get_vars(variables=LXC_COMMAND_MAP["clone"][clone_vars]), build_command=build_command ) # Load logging for the instance when creating it. - if self.module.params['clone_snapshot']: - build_command.append('--snapshot') + if self.module.params["clone_snapshot"]: + build_command.append("--snapshot") # Check for backing_store == overlayfs if so force the use of snapshot # If overlay fs is used and snapshot is unset the clone command will # fail with an unsupported type. - elif self.module.params['backing_store'] == 'overlayfs': - build_command.append('--snapshot') + elif self.module.params["backing_store"] == "overlayfs": + build_command.append("--snapshot") rc, return_data, err = self.module.run_command(build_command) if rc != 0: message = f"Failed executing {os.path.basename(clone_cmd)}." - self.failure( - err=err, rc=rc, msg=message, command=' '.join( - build_command - ) - ) + self.failure(err=err, rc=rc, msg=message, command=" ".join(build_command)) else: self.state_change = True # Restore the original state of the origin container if it was # not in a stopped state. - if container_state == 'running': + if container_state == "running": self.container.start() - elif container_state == 'frozen': + elif container_state == "frozen": self.container.start() self.container.freeze() @@ -813,56 +777,45 @@ def _create(self): LXC containers with block devices. """ - build_command = [ - self.module.get_bin_path('lxc-create', True), - '--name', self.container_name, - '--quiet' - ] + build_command = [self.module.get_bin_path("lxc-create", True), "--name", self.container_name, "--quiet"] build_command = self._add_variables( - variables_dict=self._get_vars( - variables=LXC_COMMAND_MAP['create']['variables'] - ), - build_command=build_command + variables_dict=self._get_vars(variables=LXC_COMMAND_MAP["create"]["variables"]), build_command=build_command ) # Load logging for the instance when creating it. - if self.module.params['container_log']: + if self.module.params["container_log"]: # Set the logging path to the /var/log/lxc if uid is root. else # set it to the home folder of the user executing. try: if os.getuid() != 0: - log_path = os.getenv('HOME') + log_path = os.getenv("HOME") else: - if not os.path.isdir('/var/log/lxc/'): - os.makedirs('/var/log/lxc/') - log_path = '/var/log/lxc/' + if not os.path.isdir("/var/log/lxc/"): + os.makedirs("/var/log/lxc/") + log_path = "/var/log/lxc/" except OSError: - log_path = os.getenv('HOME') - - build_command.extend([ - '--logfile', - os.path.join( - log_path, f'lxc-{self.container_name}.log' - ), - '--logpriority', - self.module.params.get( - 'container_log_level' - ).upper() - ]) + log_path = os.getenv("HOME") + + build_command.extend( + [ + "--logfile", + os.path.join(log_path, f"lxc-{self.container_name}.log"), + "--logpriority", + self.module.params.get("container_log_level").upper(), + ] + ) # Add the template commands to the end of the command if there are any - template_options = self.module.params['template_options'] + template_options = self.module.params["template_options"] if template_options: - build_command.append('--') + build_command.append("--") build_command += shlex.split(template_options) rc, return_data, err = self.module.run_command(build_command) if rc != 0: message = "Failed executing lxc-create." - self.failure( - err=err, rc=rc, msg=message, command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg=message, command=" ".join(build_command)) else: self.state_change = True @@ -874,11 +827,11 @@ def _container_data(self): """ return { - 'interfaces': self.container.get_interfaces(), - 'ips': self.container.get_ips(), - 'state': self._get_state(), - 'init_pid': int(self.container.init_pid), - 'name': self.container_name, + "interfaces": self.container.get_interfaces(), + "ips": self.container.get_ips(), + "state": self._get_state(), + "init_pid": int(self.container.init_pid), + "name": self.container_name, } def _unfreeze(self): @@ -904,17 +857,17 @@ def _get_state(self): if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): return str(self.container.state).lower() - return str('absent') + return str("absent") def _execute_command(self): """Execute a shell command.""" - container_command = self.module.params['container_command'] + container_command = self.module.params["container_command"] if container_command: container_state = self._get_state() - if container_state == 'frozen': + if container_state == "frozen": self._unfreeze() - elif container_state == 'stopped': + elif container_state == "stopped": self._container_startup() self.container.attach_wait(create_script, container_command) @@ -929,7 +882,7 @@ def _container_startup(self, timeout=60): self.container = self.get_container_bind() for dummy in range(timeout): - if self._get_state() == 'running': + if self._get_state() == "running": return True self.container.start() @@ -938,9 +891,9 @@ def _container_startup(self, timeout=60): time.sleep(1) self.failure( lxc_container=self._container_data(), - error=f'Failed to start container [ {self.container_name} ]', + error=f"Failed to start container [ {self.container_name} ]", rc=1, - msg=f'The container [ {self.container_name} ] failed to start. Check to lxc is available and that the container is in a functional state.' + msg=f"The container [ {self.container_name} ] failed to start. Check to lxc is available and that the container is in a functional state.", ) def _check_archive(self): @@ -949,10 +902,8 @@ def _check_archive(self): This will store archive_info in as self.archive_info """ - if self.module.params['archive']: - self.archive_info = { - 'archive': self._container_create_tar() - } + if self.module.params["archive"]: + self.archive_info = {"archive": self._container_create_tar()} def _check_clone(self): """Create a compressed archive of a container. @@ -960,16 +911,12 @@ def _check_clone(self): This will store archive_info in as self.archive_info """ - clone_name = self.module.params['clone_name'] + clone_name = self.module.params["clone_name"] if clone_name: if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): - self.clone_info = { - 'cloned': self._container_create_clone() - } + self.clone_info = {"cloned": self._container_create_clone()} else: - self.clone_info = { - 'cloned': False - } + self.clone_info = {"cloned": False} def _destroyed(self, timeout=60): """Ensure a container is destroyed. @@ -988,7 +935,7 @@ def _destroyed(self, timeout=60): # Check if the container is to be cloned self._check_clone() - if self._get_state() != 'stopped': + if self._get_state() != "stopped": self.state_change = True self.container.stop() @@ -1000,10 +947,12 @@ def _destroyed(self, timeout=60): else: self.failure( lxc_container=self._container_data(), - error=f'Failed to destroy container [ {self.container_name} ]', + error=f"Failed to destroy container [ {self.container_name} ]", rc=1, - msg=(f'The container [ {self.container_name} ] failed to be destroyed. ' - 'Check that lxc is available and that the container is in a functional state.') + msg=( + f"The container [ {self.container_name} ] failed to be destroyed. " + "Check that lxc is available and that the container is in a functional state." + ), ) def _frozen(self, count=0): @@ -1015,7 +964,7 @@ def _frozen(self, count=0): :type count: ``int`` """ - self.check_count(count=count, method='frozen') + self.check_count(count=count, method="frozen") if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() @@ -1023,9 +972,9 @@ def _frozen(self, count=0): self._config() container_state = self._get_state() - if container_state == 'frozen': + if container_state == "frozen": pass - elif container_state == 'running': + elif container_state == "running": self.container.freeze() self.state_change = True else: @@ -1052,14 +1001,14 @@ def _restarted(self, count=0): :type count: ``int`` """ - self.check_count(count=count, method='restart') + self.check_count(count=count, method="restart") if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() # Perform any configuration updates self._config() - if self._get_state() != 'stopped': + if self._get_state() != "stopped": self.container.stop() self.state_change = True @@ -1085,14 +1034,14 @@ def _stopped(self, count=0): :type count: ``int`` """ - self.check_count(count=count, method='stop') + self.check_count(count=count, method="stop") if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() # Perform any configuration updates self._config() - if self._get_state() != 'stopped': + if self._get_state() != "stopped": self.container.stop() self.state_change = True @@ -1115,19 +1064,19 @@ def _started(self, count=0): :type count: ``int`` """ - self.check_count(count=count, method='start') + self.check_count(count=count, method="start") if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): container_state = self._get_state() - if container_state == 'running': + if container_state == "running": pass - elif container_state == 'frozen': + elif container_state == "frozen": self._unfreeze() elif not self._container_startup(): self.failure( lxc_container=self._container_data(), - error=f'Failed to start container [ {self.container_name} ]', + error=f"Failed to start container [ {self.container_name} ]", rc=1, - msg=f'The container [ {self.container_name} ] failed to start. Check to lxc is available and that the container is in a functional state.' + msg=f"The container [ {self.container_name} ] failed to start. Check to lxc is available and that the container is in a functional state.", ) # Return data @@ -1149,18 +1098,10 @@ def _started(self, count=0): def _get_lxc_vg(self): """Return the name of the Volume Group used in LXC.""" - build_command = [ - self.module.get_bin_path('lxc-config', True), - "lxc.bdev.lvm.vg" - ] + build_command = [self.module.get_bin_path("lxc-config", True), "lxc.bdev.lvm.vg"] rc, vg, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to read LVM VG from LXC config', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg="Failed to read LVM VG from LXC config", command=" ".join(build_command)) else: return str(vg.strip()) @@ -1168,17 +1109,10 @@ def _lvm_lv_list(self): """Return a list of all lv in a current vg.""" vg = self._get_lxc_vg() - build_command = [ - self.module.get_bin_path('lvs', True) - ] + build_command = [self.module.get_bin_path("lvs", True)] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg='Failed to get list of LVs', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg="Failed to get list of LVs", command=" ".join(build_command)) all_lvms = [i.split() for i in stdout.splitlines()][1:] return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] @@ -1192,23 +1126,13 @@ def _get_vg_free_pe(self, vg_name): :type: ``tuple`` """ - build_command = [ - 'vgdisplay', - vg_name, - '--units', - 'g' - ] + build_command = ["vgdisplay", vg_name, "--units", "g"] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'failed to read vg {vg_name}', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg=f"failed to read vg {vg_name}", command=" ".join(build_command)) vg_info = [i.strip() for i in stdout.splitlines()][1:] - free_pe = [i for i in vg_info if i.startswith('Free')] + free_pe = [i for i in vg_info if i.startswith("Free")] _free_pe = free_pe[0].split() return float(_free_pe[-2]), _free_pe[-1] @@ -1223,28 +1147,17 @@ def _get_lv_size(self, lv_name): vg = self._get_lxc_vg() lv = os.path.join(vg, lv_name) - build_command = [ - 'lvdisplay', - lv, - '--units', - 'g' - ] + build_command = ["lvdisplay", lv, "--units", "g"] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'failed to read lv {lv}', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg=f"failed to read lv {lv}", command=" ".join(build_command)) lv_info = [i.strip() for i in stdout.splitlines()][1:] - _free_pe = [i for i in lv_info if i.startswith('LV Size')] + _free_pe = [i for i in lv_info if i.startswith("LV Size")] free_pe = _free_pe[0].split() return self._roundup(float(free_pe[-2])), free_pe[-1] - def _lvm_snapshot_create(self, source_lv, snapshot_name, - snapshot_size_gb=5): + def _lvm_snapshot_create(self, source_lv, snapshot_name, snapshot_size_gb=5): """Create an LVM snapshot. :param source_lv: Name of lv to snapshot @@ -1260,30 +1173,22 @@ def _lvm_snapshot_create(self, source_lv, snapshot_name, if free_space < float(snapshot_size_gb): message = ( - f'Snapshot size [ {snapshot_size_gb} ] is > greater than [ {free_space} ] on volume group [ {vg} ]' - ) - self.failure( - error='Not enough space to create snapshot', - rc=2, - msg=message + f"Snapshot size [ {snapshot_size_gb} ] is > greater than [ {free_space} ] on volume group [ {vg} ]" ) + self.failure(error="Not enough space to create snapshot", rc=2, msg=message) # Create LVM Snapshot build_command = [ - self.module.get_bin_path('lvcreate', True), + self.module.get_bin_path("lvcreate", True), "-n", snapshot_name, "-s", os.path.join(vg, source_lv), - f"-L{snapshot_size_gb}g" + f"-L{snapshot_size_gb}g", ] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'Failed to Create LVM snapshot {vg}/{source_lv} --> {snapshot_name}' - ) + self.failure(err=err, rc=rc, msg=f"Failed to Create LVM snapshot {vg}/{source_lv} --> {snapshot_name}") def _lvm_lv_mount(self, lv_name, mount_point): """mount an lv. @@ -1297,17 +1202,13 @@ def _lvm_lv_mount(self, lv_name, mount_point): vg = self._get_lxc_vg() build_command = [ - self.module.get_bin_path('mount', True), + self.module.get_bin_path("mount", True), f"/dev/{vg}/{lv_name}", mount_point, ] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'failed to mountlvm lv {vg}/{lv_name} to {mount_point}' - ) + self.failure(err=err, rc=rc, msg=f"failed to mountlvm lv {vg}/{lv_name} to {mount_point}") def _create_tar(self, source_dir): """Create an archive of a given ``source_dir`` to ``output_path``. @@ -1316,39 +1217,32 @@ def _create_tar(self, source_dir): :type source_dir: ``str`` """ - old_umask = os.umask(int('0077', 8)) + old_umask = os.umask(int("0077", 8)) - archive_path = self.module.params['archive_path'] + archive_path = self.module.params["archive_path"] if not os.path.isdir(archive_path): os.makedirs(archive_path) - archive_compression = self.module.params['archive_compression'] + archive_compression = self.module.params["archive_compression"] compression_type = LXC_COMPRESSION_MAP[archive_compression] # remove trailing / if present. archive_name = f"{os.path.join(archive_path, self.container_name)}.{compression_type['extension']}" build_command = [ - self.module.get_bin_path('tar', True), - f'--directory={os.path.realpath(source_dir)}', - compression_type['argument'], + self.module.get_bin_path("tar", True), + f"--directory={os.path.realpath(source_dir)}", + compression_type["argument"], archive_name, - '.' + ".", ] - rc, stdout, err = self.module.run_command( - build_command - ) + rc, stdout, err = self.module.run_command(build_command) os.umask(old_umask) if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to create tar archive', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg="failed to create tar archive", command=" ".join(build_command)) return archive_name @@ -1361,18 +1255,13 @@ def _lvm_lv_remove(self, lv_name): vg = self._get_lxc_vg() build_command = [ - self.module.get_bin_path('lvremove', True), + self.module.get_bin_path("lvremove", True), "-f", f"{vg}/{lv_name}", ] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'Failed to remove LVM LV {vg}/{lv_name}', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg=f"Failed to remove LVM LV {vg}/{lv_name}", command=" ".join(build_command)) def _rsync_data(self, container_path, temp_dir): """Sync the container directory to the temp directory. @@ -1384,9 +1273,9 @@ def _rsync_data(self, container_path, temp_dir): """ # This loop is created to support overlayfs archives. This should # squash all of the layers into a single archive. - fs_paths = container_path.split(':') - if 'overlayfs' in fs_paths: - fs_paths.pop(fs_paths.index('overlayfs')) + fs_paths = container_path.split(":") + if "overlayfs" in fs_paths: + fs_paths.pop(fs_paths.index("overlayfs")) for fs_path in fs_paths: # Set the path to the container data @@ -1394,8 +1283,8 @@ def _rsync_data(self, container_path, temp_dir): # Run the sync command build_command = [ - self.module.get_bin_path('rsync', True), - '-aHAX', + self.module.get_bin_path("rsync", True), + "-aHAX", fs_path, temp_dir, ] @@ -1403,12 +1292,7 @@ def _rsync_data(self, container_path, temp_dir): build_command, ) if rc != 0: - self.failure( - err=err, - rc=rc, - msg='failed to perform archive', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg="failed to perform archive", command=" ".join(build_command)) def _unmount(self, mount_point): """Unmount a file system. @@ -1418,17 +1302,12 @@ def _unmount(self, mount_point): """ build_command = [ - self.module.get_bin_path('umount', True), + self.module.get_bin_path("umount", True), mount_point, ] rc, stdout, err = self.module.run_command(build_command) if rc != 0: - self.failure( - err=err, - rc=rc, - msg=f'failed to unmount [ {mount_point} ]', - command=' '.join(build_command) - ) + self.failure(err=err, rc=rc, msg=f"failed to unmount [ {mount_point} ]", command=" ".join(build_command)) def _overlayfs_mount(self, lowerdir, upperdir, mount_point): """mount an lv. @@ -1442,10 +1321,12 @@ def _overlayfs_mount(self, lowerdir, upperdir, mount_point): """ build_command = [ - self.module.get_bin_path('mount', True), - '-t', 'overlayfs', - '-o', f'lowerdir={lowerdir},upperdir={upperdir}', - 'overlayfs', + self.module.get_bin_path("mount", True), + "-t", + "overlayfs", + "-o", + f"lowerdir={lowerdir},upperdir={upperdir}", + "overlayfs", mount_point, ] rc, stdout, err = self.module.run_command(build_command) @@ -1453,7 +1334,7 @@ def _overlayfs_mount(self, lowerdir, upperdir, mount_point): self.failure( err=err, rc=rc, - msg=f'failed to mount overlayfs:{lowerdir}:{upperdir} to {mount_point} -- Command: {build_command}' + msg=f"failed to mount overlayfs:{lowerdir}:{upperdir} to {mount_point} -- Command: {build_command}", ) def _container_create_tar(self): @@ -1478,24 +1359,24 @@ def _container_create_tar(self): work_dir = os.path.join(temp_dir, self.container_name) # LXC container rootfs - lxc_rootfs = self.container.get_config_item('lxc.rootfs') + lxc_rootfs = self.container.get_config_item("lxc.rootfs") # Test if the containers rootfs is a block device - block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, "dev")) # Test if the container is using overlayfs - overlayfs_backed = lxc_rootfs.startswith('overlayfs') + overlayfs_backed = lxc_rootfs.startswith("overlayfs") - mount_point = os.path.join(work_dir, 'rootfs') + mount_point = os.path.join(work_dir, "rootfs") # Set the snapshot name if needed - snapshot_name = f'{self.container_name}_lxc_snapshot' + snapshot_name = f"{self.container_name}_lxc_snapshot" container_state = self._get_state() try: # Ensure the original container is stopped or frozen - if container_state not in ['stopped', 'frozen']: - if container_state == 'running': + if container_state not in ["stopped", "frozen"]: + if container_state == "running": self.container.freeze() else: self.container.stop() @@ -1509,33 +1390,22 @@ def _container_create_tar(self): os.makedirs(mount_point) # Take snapshot - size, measurement = self._get_lv_size( - lv_name=self.container_name - ) + size, measurement = self._get_lv_size(lv_name=self.container_name) self._lvm_snapshot_create( - source_lv=self.container_name, - snapshot_name=snapshot_name, - snapshot_size_gb=size + source_lv=self.container_name, snapshot_name=snapshot_name, snapshot_size_gb=size ) # Mount snapshot - self._lvm_lv_mount( - lv_name=snapshot_name, - mount_point=mount_point - ) + self._lvm_lv_mount(lv_name=snapshot_name, mount_point=mount_point) else: self.failure( - err=f'snapshot [ {snapshot_name} ] already exists', + err=f"snapshot [ {snapshot_name} ] already exists", rc=1, - msg=f'The snapshot [ {snapshot_name} ] already exists. Please clean up old snapshot of containers before continuing.' + msg=f"The snapshot [ {snapshot_name} ] already exists. Please clean up old snapshot of containers before continuing.", ) elif overlayfs_backed: - lowerdir, upperdir = lxc_rootfs.split(':')[1:] - self._overlayfs_mount( - lowerdir=lowerdir, - upperdir=upperdir, - mount_point=mount_point - ) + lowerdir, upperdir = lxc_rootfs.split(":")[1:] + self._overlayfs_mount(lowerdir=lowerdir, upperdir=upperdir, mount_point=mount_point) # Set the state as changed and set a new fact self.state_change = True @@ -1550,8 +1420,8 @@ def _container_create_tar(self): self._lvm_lv_remove(snapshot_name) # Restore original state of container - if container_state == 'running': - if self._get_state() == 'frozen': + if container_state == "running": + if self._get_state() == "frozen": self.container.unfreeze() else: self.container.start() @@ -1562,9 +1432,9 @@ def _container_create_tar(self): def check_count(self, count, method): if count > 1: self.failure( - error=f'Failed to {method} container', + error=f"Failed to {method} container", rc=1, - msg=f'The container [ {self.container_name} ] failed to {method}. Check to lxc is available and that the container is in a functional state.' + msg=f"The container [ {self.container_name} ] failed to {method}. Check to lxc is available and that the container is in a functional state.", ) def failure(self, **kwargs): @@ -1590,10 +1460,7 @@ def run(self): if self.clone_info: outcome.update(self.clone_info) - self.module.exit_json( - changed=self.state_change, - lxc_container=outcome - ) + self.module.exit_json(changed=self.state_change, lxc_container=outcome) def main(): @@ -1601,107 +1468,49 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict( - type='str', - required=True - ), - template=dict( - type='str', - default='ubuntu' - ), - backing_store=dict( - type='str', - choices=list(LXC_BACKING_STORE.keys()), - default='dir' - ), - template_options=dict( - type='str' - ), + name=dict(type="str", required=True), + template=dict(type="str", default="ubuntu"), + backing_store=dict(type="str", choices=list(LXC_BACKING_STORE.keys()), default="dir"), + template_options=dict(type="str"), config=dict( - type='path', - ), - vg_name=dict( - type='str', - default='lxc' - ), - thinpool=dict( - type='str' - ), - fs_type=dict( - type='str', - default='ext4' - ), - fs_size=dict( - type='str', - default='5G' - ), - directory=dict( - type='path' - ), - zfs_root=dict( - type='str' - ), - lv_name=dict( - type='str' - ), - lxc_path=dict( - type='path' - ), - state=dict( - choices=list(LXC_ANSIBLE_STATES.keys()), - default='started' - ), - container_command=dict( - type='str' - ), - container_config=dict( - type='list', - elements='str' - ), - container_log=dict( - type='bool', - default=False - ), - container_log_level=dict( - choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], - default='INFO' + type="path", ), + vg_name=dict(type="str", default="lxc"), + thinpool=dict(type="str"), + fs_type=dict(type="str", default="ext4"), + fs_size=dict(type="str", default="5G"), + directory=dict(type="path"), + zfs_root=dict(type="str"), + lv_name=dict(type="str"), + lxc_path=dict(type="path"), + state=dict(choices=list(LXC_ANSIBLE_STATES.keys()), default="started"), + container_command=dict(type="str"), + container_config=dict(type="list", elements="str"), + container_log=dict(type="bool", default=False), + container_log_level=dict(choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], default="INFO"), clone_name=dict( - type='str', - ), - clone_snapshot=dict( - type='bool', - default='false' - ), - archive=dict( - type='bool', - default=False + type="str", ), + clone_snapshot=dict(type="bool", default="false"), + archive=dict(type="bool", default=False), archive_path=dict( - type='path', + type="path", ), - archive_compression=dict( - choices=list(LXC_COMPRESSION_MAP.keys()), - default='gzip' - ) + archive_compression=dict(choices=list(LXC_COMPRESSION_MAP.keys()), default="gzip"), ), supports_check_mode=False, - required_if=([ - ('archive', True, ['archive_path']) - ]), + required_if=([("archive", True, ["archive_path"])]), ) if not HAS_LXC: - module.fail_json( - msg='The `lxc` module is not importable. Check the requirements.' - ) + module.fail_json(msg="The `lxc` module is not importable. Check the requirements.") - if not module.params['lv_name']: - module.params['lv_name'] = module.params['name'] + if not module.params["lv_name"]: + module.params["lv_name"] = module.params["name"] lxc_manage = LxcContainerManagement(module=module) lxc_manage.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py index 9078cd272ab..55995c77f5e 100644 --- a/plugins/modules/lxca_cmms.py +++ b/plugins/modules/lxca_cmms.py @@ -92,15 +92,20 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import ( + LXCA_COMMON_ARGS, + has_pylxca, + connection_object, +) + try: from pylxca import cmms except ImportError: pass -UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' +UUID_REQUIRED = "UUID of device is required for cmms_by_uuid command." +CHASSIS_UUID_REQUIRED = "UUID of chassis is required for cmms_by_chassis_uuid command." SUCCESS_MSG = "Success %s result" @@ -109,15 +114,15 @@ def _cmms(module, lxca_con): def _cmms_by_uuid(module, lxca_con): - if not module.params['uuid']: + if not module.params["uuid"]: module.fail_json(msg=UUID_REQUIRED) - return cmms(lxca_con, module.params['uuid']) + return cmms(lxca_con, module.params["uuid"]) def _cmms_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: + if not module.params["chassis"]: module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return cmms(lxca_con, chassis=module.params['chassis']) + return cmms(lxca_con, chassis=module.params["chassis"]) def setup_module_object(): @@ -133,17 +138,16 @@ def setup_module_object(): FUNC_DICT = { - 'cmms': _cmms, - 'cmms_by_uuid': _cmms_by_uuid, - 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, + "cmms": _cmms, + "cmms_by_uuid": _cmms_by_uuid, + "cmms_by_chassis_uuid": _cmms_by_chassis_uuid, } INPUT_ARG_SPEC = dict( - command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', - 'cmms_by_chassis_uuid']), + command_options=dict(default="cmms", choices=["cmms", "cmms_by_uuid", "cmms_by_chassis_uuid"]), uuid=dict(), - chassis=dict() + chassis=dict(), ) @@ -154,12 +158,10 @@ def execute_module(module): """ try: with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) + result = FUNC_DICT[module.params["command_options"]](module, lxca_con) + module.exit_json(changed=False, msg=SUCCESS_MSG % module.params["command_options"], result=result) except Exception as exception: - error_msg = '; '.join((e) for e in exception.args) + error_msg = "; ".join((e) for e in exception.args) module.fail_json(msg=error_msg, exception=traceback.format_exc()) @@ -169,5 +171,5 @@ def main(): execute_module(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py index 010f1896295..0cc40aa4a1d 100644 --- a/plugins/modules/lxca_nodes.py +++ b/plugins/modules/lxca_nodes.py @@ -111,15 +111,20 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import ( + LXCA_COMMON_ARGS, + has_pylxca, + connection_object, +) + try: from pylxca import nodes except ImportError: pass -UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' -CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' +UUID_REQUIRED = "UUID of device is required for nodes_by_uuid command." +CHASSIS_UUID_REQUIRED = "UUID of chassis is required for nodes_by_chassis_uuid command." SUCCESS_MSG = "Success %s result" @@ -128,23 +133,23 @@ def _nodes(module, lxca_con): def _nodes_by_uuid(module, lxca_con): - if not module.params['uuid']: + if not module.params["uuid"]: module.fail_json(msg=UUID_REQUIRED) - return nodes(lxca_con, module.params['uuid']) + return nodes(lxca_con, module.params["uuid"]) def _nodes_by_chassis_uuid(module, lxca_con): - if not module.params['chassis']: + if not module.params["chassis"]: module.fail_json(msg=CHASSIS_UUID_REQUIRED) - return nodes(lxca_con, chassis=module.params['chassis']) + return nodes(lxca_con, chassis=module.params["chassis"]) def _nodes_status_managed(module, lxca_con): - return nodes(lxca_con, status='managed') + return nodes(lxca_con, status="managed") def _nodes_status_unmanaged(module, lxca_con): - return nodes(lxca_con, status='unmanaged') + return nodes(lxca_con, status="unmanaged") def setup_module_object(): @@ -160,20 +165,21 @@ def setup_module_object(): FUNC_DICT = { - 'nodes': _nodes, - 'nodes_by_uuid': _nodes_by_uuid, - 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, - 'nodes_status_managed': _nodes_status_managed, - 'nodes_status_unmanaged': _nodes_status_unmanaged, + "nodes": _nodes, + "nodes_by_uuid": _nodes_by_uuid, + "nodes_by_chassis_uuid": _nodes_by_chassis_uuid, + "nodes_status_managed": _nodes_status_managed, + "nodes_status_unmanaged": _nodes_status_unmanaged, } INPUT_ARG_SPEC = dict( - command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', - 'nodes_by_chassis_uuid', - 'nodes_status_managed', - 'nodes_status_unmanaged']), - uuid=dict(), chassis=dict() + command_options=dict( + default="nodes", + choices=["nodes", "nodes_by_uuid", "nodes_by_chassis_uuid", "nodes_status_managed", "nodes_status_unmanaged"], + ), + uuid=dict(), + chassis=dict(), ) @@ -184,12 +190,10 @@ def execute_module(module): """ try: with connection_object(module) as lxca_con: - result = FUNC_DICT[module.params['command_options']](module, lxca_con) - module.exit_json(changed=False, - msg=SUCCESS_MSG % module.params['command_options'], - result=result) + result = FUNC_DICT[module.params["command_options"]](module, lxca_con) + module.exit_json(changed=False, msg=SUCCESS_MSG % module.params["command_options"], result=result) except Exception as exception: - error_msg = '; '.join(exception.args) + error_msg = "; ".join(exception.args) module.fail_json(msg=error_msg, exception=traceback.format_exc()) @@ -199,5 +203,5 @@ def main(): execute_module(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py index e62869d750f..b73485c182f 100644 --- a/plugins/modules/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -426,32 +426,30 @@ # LXD_ANSIBLE_STATES is a map of states that contain values of methods used # when a particular state is evoked. LXD_ANSIBLE_STATES = { - 'started': '_started', - 'stopped': '_stopped', - 'restarted': '_restarted', - 'absent': '_destroyed', - 'frozen': '_frozen', + "started": "_started", + "stopped": "_stopped", + "restarted": "_restarted", + "absent": "_destroyed", + "frozen": "_frozen", } # ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible # lxc_container module state parameter value. ANSIBLE_LXD_STATES = { - 'Running': 'started', - 'Stopped': 'stopped', - 'Frozen': 'frozen', + "Running": "started", + "Stopped": "stopped", + "Frozen": "frozen", } # ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' +ANSIBLE_LXD_DEFAULT_URL = "unix:/var/lib/lxd/unix.socket" # CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source', 'type' -] +CONFIG_PARAMS = ["architecture", "config", "devices", "ephemeral", "profiles", "source", "type"] # CONFIG_CREATION_PARAMS is a list of attribute names that are only applied # on instance creation. -CONFIG_CREATION_PARAMS = ['source', 'type'] +CONFIG_CREATION_PARAMS = ["source", "type"] class LXDContainerManagement: @@ -462,61 +460,58 @@ def __init__(self, module): :type module: ``object`` """ self.module = module - self.name = self.module.params['name'] - self.project = self.module.params['project'] + self.name = self.module.params["name"] + self.project = self.module.params["project"] self._build_config() - self.state = self.module.params['state'] + self.state = self.module.params["state"] - self.timeout = self.module.params['timeout'] - self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] - self.force_stop = self.module.params['force_stop'] + self.timeout = self.module.params["timeout"] + self.wait_for_ipv4_addresses = self.module.params["wait_for_ipv4_addresses"] + self.force_stop = self.module.params["force_stop"] self.addresses = None - self.target = self.module.params['target'] - self.wait_for_container = self.module.params['wait_for_container'] + self.target = self.module.params["target"] + self.wait_for_container = self.module.params["wait_for_container"] - self.type = self.module.params['type'] + self.type = self.module.params["type"] - self.key_file = self.module.params.get('client_key') + self.key_file = self.module.params.get("client_key") if self.key_file is None: self.key_file = f"{os.environ['HOME']}/.config/lxc/client.key" - self.cert_file = self.module.params.get('client_cert') + self.cert_file = self.module.params.get("client_cert") if self.cert_file is None: self.cert_file = f"{os.environ['HOME']}/.config/lxc/client.crt" self.debug = self.module._verbosity >= 4 try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] + if self.module.params["url"] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params["url"] + elif os.path.exists(self.module.params["snap_url"].replace("unix:", "")): + self.url = self.module.params["snap_url"] else: - self.url = self.module.params['url'] + self.url = self.module.params["url"] except Exception as e: self.module.fail_json(msg=e.msg) try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) + self.client = LXDClient(self.url, key_file=self.key_file, cert_file=self.cert_file, debug=self.debug) except LXDClientException as e: self.module.fail_json(msg=e.msg) # LXD (3.19) Rest API provides instances endpoint, failback to containers and virtual-machines # https://documentation.ubuntu.com/lxd/en/latest/rest-api/#instances-containers-and-virtual-machines - self.api_endpoint = '/1.0/instances' - check_api_endpoint = self.client.do('GET', f'{self.api_endpoint}?project=', ok_error_codes=[404]) + self.api_endpoint = "/1.0/instances" + check_api_endpoint = self.client.do("GET", f"{self.api_endpoint}?project=", ok_error_codes=[404]) - if check_api_endpoint['error_code'] == 404: - if self.type == 'container': - self.api_endpoint = '/1.0/containers' - elif self.type == 'virtual-machine': - self.api_endpoint = '/1.0/virtual-machines' + if check_api_endpoint["error_code"] == 404: + if self.type == "container": + self.api_endpoint = "/1.0/containers" + elif self.type == "virtual-machine": + self.api_endpoint = "/1.0/virtual-machines" - self.trust_password = self.module.params.get('trust_password', None) + self.trust_password = self.module.params.get("trust_password", None) self.actions = [] - self.diff = {'before': {}, 'after': {}} + self.diff = {"before": {}, "after": {}} self.old_instance_json = {} self.old_sections = {} @@ -528,90 +523,83 @@ def _build_config(self): self.config[attr] = param_val def _get_instance_json(self): - url = f'{self.api_endpoint}/{self.name}' + url = f"{self.api_endpoint}/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - return self.client.do('GET', url, ok_error_codes=[404]) + url = f"{url}?{urlencode(dict(project=self.project))}" + return self.client.do("GET", url, ok_error_codes=[404]) def _get_instance_state_json(self): - url = f'{self.api_endpoint}/{self.name}/state' + url = f"{self.api_endpoint}/{self.name}/state" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - return self.client.do('GET', url, ok_error_codes=[404]) + url = f"{url}?{urlencode(dict(project=self.project))}" + return self.client.do("GET", url, ok_error_codes=[404]) @staticmethod def _instance_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] + if resp_json["type"] == "error": + return "absent" + return ANSIBLE_LXD_STATES[resp_json["metadata"]["status"]] def _change_state(self, action, force_stop=False): - url = f'{self.api_endpoint}/{self.name}/state' + url = f"{self.api_endpoint}/{self.name}/state" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - body_json = {'action': action, 'timeout': self.timeout} + url = f"{url}?{urlencode(dict(project=self.project))}" + body_json = {"action": action, "timeout": self.timeout} if force_stop: - body_json['force'] = True + body_json["force"] = True if not self.module.check_mode: - return self.client.do('PUT', url, body_json=body_json) + return self.client.do("PUT", url, body_json=body_json) def _create_instance(self): url = self.api_endpoint url_params = dict() if self.target: - url_params['target'] = self.target + url_params["target"] = self.target if self.project: - url_params['project'] = self.project + url_params["project"] = self.project if url_params: - url = f'{url}?{urlencode(url_params)}' + url = f"{url}?{urlencode(url_params)}" config = self.config.copy() - config['name'] = self.name + config["name"] = self.name if self.type not in self.api_endpoint: - config['type'] = self.type + config["type"] = self.type if not self.module.check_mode: - self.client.do('POST', url, config, wait_for_container=self.wait_for_container) - self.actions.append('create') + self.client.do("POST", url, config, wait_for_container=self.wait_for_container) + self.actions.append("create") def _start_instance(self): - self._change_state('start') - self.actions.append('start') + self._change_state("start") + self.actions.append("start") def _stop_instance(self): - self._change_state('stop', self.force_stop) - self.actions.append('stop') + self._change_state("stop", self.force_stop) + self.actions.append("stop") def _restart_instance(self): - self._change_state('restart', self.force_stop) - self.actions.append('restart') + self._change_state("restart", self.force_stop) + self.actions.append("restart") def _delete_instance(self): - url = f'{self.api_endpoint}/{self.name}' + url = f"{self.api_endpoint}/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' + url = f"{url}?{urlencode(dict(project=self.project))}" if not self.module.check_mode: - self.client.do('DELETE', url) - self.actions.append('delete') + self.client.do("DELETE", url) + self.actions.append("delete") def _freeze_instance(self): - self._change_state('freeze') - self.actions.append('freeze') + self._change_state("freeze") + self.actions.append("freeze") def _unfreeze_instance(self): - self._change_state('unfreeze') - self.actions.append('unfreeze') + self._change_state("unfreeze") + self.actions.append("unfreeze") def _instance_ipv4_addresses(self, ignore_devices=None): - ignore_devices = ['lo'] if ignore_devices is None else ignore_devices - data = (self._get_instance_state_json() or {}).get('metadata', None) or {} - network = { - k: v - for k, v in (data.get('network') or {}).items() - if k not in ignore_devices - } - addresses = { - k: [a['address'] for a in v['addresses'] if a['family'] == 'inet'] - for k, v in network.items() - } + ignore_devices = ["lo"] if ignore_devices is None else ignore_devices + data = (self._get_instance_state_json() or {}).get("metadata", None) or {} + network = {k: v for k, v in (data.get("network") or {}).items() if k not in ignore_devices} + addresses = {k: [a["address"] for a in v["addresses"] if a["family"] == "inet"] for k, v in network.items()} return addresses @staticmethod @@ -628,17 +616,17 @@ def _get_addresses(self): self.addresses = addresses return except LXDClientException as e: - e.msg = 'timeout for getting IPv4 addresses' + e.msg = "timeout for getting IPv4 addresses" raise def _started(self): - if self.old_state == 'absent': + if self.old_state == "absent": self._create_instance() self._start_instance() else: - if self.old_state == 'frozen': + if self.old_state == "frozen": self._unfreeze_instance() - elif self.old_state == 'stopped': + elif self.old_state == "stopped": self._start_instance() if self._needs_to_apply_instance_configs(): self._apply_instance_configs() @@ -646,27 +634,27 @@ def _started(self): self._get_addresses() def _stopped(self): - if self.old_state == 'absent': + if self.old_state == "absent": self._create_instance() else: - if self.old_state == 'stopped': + if self.old_state == "stopped": if self._needs_to_apply_instance_configs(): self._start_instance() self._apply_instance_configs() self._stop_instance() else: - if self.old_state == 'frozen': + if self.old_state == "frozen": self._unfreeze_instance() if self._needs_to_apply_instance_configs(): self._apply_instance_configs() self._stop_instance() def _restarted(self): - if self.old_state == 'absent': + if self.old_state == "absent": self._create_instance() self._start_instance() else: - if self.old_state == 'frozen': + if self.old_state == "frozen": self._unfreeze_instance() if self._needs_to_apply_instance_configs(): self._apply_instance_configs() @@ -675,20 +663,20 @@ def _restarted(self): self._get_addresses() def _destroyed(self): - if self.old_state != 'absent': - if self.old_state == 'frozen': + if self.old_state != "absent": + if self.old_state == "frozen": self._unfreeze_instance() - if self.old_state != 'stopped': + if self.old_state != "stopped": self._stop_instance() self._delete_instance() def _frozen(self): - if self.old_state == 'absent': + if self.old_state == "absent": self._create_instance() self._start_instance() self._freeze_instance() else: - if self.old_state == 'stopped': + if self.old_state == "stopped": self._start_instance() if self._needs_to_apply_instance_configs(): self._apply_instance_configs() @@ -698,10 +686,10 @@ def _needs_to_change_instance_config(self, key): if key not in self.config: return False - if key == 'config': + if key == "config": # self.old_sections is already filtered for volatile keys if necessary old_configs = dict(self.old_sections.get(key, None) or {}) - for k, v in self.config['config'].items(): + for k, v in self.config["config"].items(): if k not in old_configs: return True if old_configs[k] != v: @@ -718,81 +706,85 @@ def _needs_to_apply_instance_configs(self): return False def _apply_instance_configs(self): - old_metadata = copy.deepcopy(self.old_instance_json).get('metadata', None) or {} + old_metadata = copy.deepcopy(self.old_instance_json).get("metadata", None) or {} body_json = {} for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): if param in old_metadata: body_json[param] = old_metadata[param] if self._needs_to_change_instance_config(param): - if param == 'config': - body_json['config'] = body_json.get('config', None) or {} - for k, v in self.config['config'].items(): - body_json['config'][k] = v + if param == "config": + body_json["config"] = body_json.get("config", None) or {} + for k, v in self.config["config"].items(): + body_json["config"][k] = v else: body_json[param] = self.config[param] - self.diff['after']['instance'] = body_json - url = f'{self.api_endpoint}/{self.name}' + self.diff["after"]["instance"] = body_json + url = f"{self.api_endpoint}/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' + url = f"{url}?{urlencode(dict(project=self.project))}" if not self.module.check_mode: - self.client.do('PUT', url, body_json=body_json) - self.actions.append('apply_instance_configs') + self.client.do("PUT", url, body_json=body_json) + self.actions.append("apply_instance_configs") def run(self): """Run the main method.""" def adjust_content(content): - return content if not isinstance(content, dict) else { - k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith('volatile.')) - } + return ( + content + if not isinstance(content, dict) + else { + k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith("volatile.")) + } + ) try: if self.trust_password is not None: self.client.authenticate(self.trust_password) - self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') + self.ignore_volatile_options = self.module.params.get("ignore_volatile_options") self.old_instance_json = self._get_instance_json() self.old_sections = { section: adjust_content(content) - for section, content in (self.old_instance_json.get('metadata') or {}).items() + for section, content in (self.old_instance_json.get("metadata") or {}).items() if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) } - self.diff['before']['instance'] = self.old_sections + self.diff["before"]["instance"] = self.old_sections # preliminary, will be overwritten in _apply_instance_configs() if called - self.diff['after']['instance'] = self.config + self.diff["after"]["instance"] = self.config self.old_state = self._instance_json_to_module_state(self.old_instance_json) - self.diff['before']['state'] = self.old_state - self.diff['after']['state'] = self.state + self.diff["before"]["state"] = self.old_state + self.diff["after"]["state"] = self.state action = getattr(self, LXD_ANSIBLE_STATES[self.state]) action() state_changed = len(self.actions) > 0 result_json = { - 'log_verbosity': self.module._verbosity, - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions, - 'diff': self.diff, + "log_verbosity": self.module._verbosity, + "changed": state_changed, + "old_state": self.old_state, + "actions": self.actions, + "diff": self.diff, } if self.client.debug: - result_json['logs'] = self.client.logs + result_json["logs"] = self.client.logs if self.addresses is not None: - result_json['addresses'] = self.addresses + result_json["addresses"] = self.addresses self.module.exit_json(**result_json) except LXDClientException as e: state_changed = len(self.actions) > 0 fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions, - 'diff': self.diff, + "msg": e.msg, + "changed": state_changed, + "actions": self.actions, + "diff": self.diff, } if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] + fail_params["logs"] = e.kwargs["logs"] self.module.fail_json(**fail_params) @@ -802,80 +794,77 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict( - type='str', + type="str", required=True, ), project=dict( - type='str', + type="str", ), architecture=dict( - type='str', + type="str", ), config=dict( - type='dict', + type="dict", ), ignore_volatile_options=dict( - type='bool', + type="bool", default=False, ), devices=dict( - type='dict', + type="dict", ), ephemeral=dict( - type='bool', + type="bool", ), profiles=dict( - type='list', - elements='str', + type="list", + elements="str", ), source=dict( - type='dict', + type="dict", ), state=dict( choices=list(LXD_ANSIBLE_STATES.keys()), - default='started', + default="started", ), target=dict( - type='str', - ), - timeout=dict( - type='int', - default=30 + type="str", ), + timeout=dict(type="int", default=30), type=dict( - type='str', - default='container', - choices=['container', 'virtual-machine'], + type="str", + default="container", + choices=["container", "virtual-machine"], ), wait_for_container=dict( - type='bool', + type="bool", default=False, ), wait_for_ipv4_addresses=dict( - type='bool', + type="bool", default=False, ), force_stop=dict( - type='bool', + type="bool", default=False, ), url=dict( - type='str', + type="str", default=ANSIBLE_LXD_DEFAULT_URL, ), snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket', + type="str", + default="unix:/var/snap/lxd/common/lxd/unix.socket", ), client_key=dict( - type='path', - aliases=['key_file'], + type="path", + aliases=["key_file"], ), client_cert=dict( - type='path', - aliases=['cert_file'], + type="path", + aliases=["cert_file"], ), - trust_password=dict(type='str', no_log=True), + trust_password=dict(type="str", no_log=True), ), supports_check_mode=True, ) @@ -884,5 +873,5 @@ def main(): lxd_manage.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py index 1eb0f07ac12..69e60d149e9 100644 --- a/plugins/modules/lxd_profile.py +++ b/plugins/modules/lxd_profile.py @@ -227,17 +227,13 @@ from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException # ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' +ANSIBLE_LXD_DEFAULT_URL = "unix:/var/lib/lxd/unix.socket" # PROFILE_STATES is a list for states supported -PROFILES_STATES = [ - 'present', 'absent' -] +PROFILES_STATES = ["present", "absent"] # CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'config', 'description', 'devices' -] +CONFIG_PARAMS = ["config", "description", "devices"] class LXDProfileManagement: @@ -248,38 +244,35 @@ def __init__(self, module): :type module: ``object`` """ self.module = module - self.name = self.module.params['name'] - self.project = self.module.params['project'] + self.name = self.module.params["name"] + self.project = self.module.params["project"] self._build_config() - self.state = self.module.params['state'] - self.new_name = self.module.params.get('new_name', None) + self.state = self.module.params["state"] + self.new_name = self.module.params.get("new_name", None) - self.key_file = self.module.params.get('client_key') + self.key_file = self.module.params.get("client_key") if self.key_file is None: self.key_file = f"{os.environ['HOME']}/.config/lxc/client.key" - self.cert_file = self.module.params.get('client_cert') + self.cert_file = self.module.params.get("client_cert") if self.cert_file is None: self.cert_file = f"{os.environ['HOME']}/.config/lxc/client.crt" self.debug = self.module._verbosity >= 4 try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] + if self.module.params["url"] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params["url"] + elif os.path.exists(self.module.params["snap_url"].replace("unix:", "")): + self.url = self.module.params["snap_url"] else: - self.url = self.module.params['url'] + self.url = self.module.params["url"] except Exception as e: self.module.fail_json(msg=e.msg) try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) + self.client = LXDClient(self.url, key_file=self.key_file, cert_file=self.cert_file, debug=self.debug) except LXDClientException as e: self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) + self.trust_password = self.module.params.get("trust_password", None) self.actions = [] def _build_config(self): @@ -290,69 +283,71 @@ def _build_config(self): self.config[attr] = param_val def _get_profile_json(self): - url = f'/1.0/profiles/{self.name}' + url = f"/1.0/profiles/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - return self.client.do('GET', url, ok_error_codes=[404]) + url = f"{url}?{urlencode(dict(project=self.project))}" + return self.client.do("GET", url, ok_error_codes=[404]) @staticmethod def _profile_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return 'present' + if resp_json["type"] == "error": + return "absent" + return "present" def _update_profile(self): - if self.state == 'present': - if self.old_state == 'absent': + if self.state == "present": + if self.old_state == "absent": if self.new_name is None: self._create_profile() else: self.module.fail_json( - msg='new_name must not be set when the profile does not exist and the state is present', - changed=False) + msg="new_name must not be set when the profile does not exist and the state is present", + changed=False, + ) else: if self.new_name is not None and self.new_name != self.name: self._rename_profile() if self._needs_to_apply_profile_configs(): self._apply_profile_configs() - elif self.state == 'absent': - if self.old_state == 'present': + elif self.state == "absent": + if self.old_state == "present": if self.new_name is None: self._delete_profile() else: self.module.fail_json( - msg='new_name must not be set when the profile exists and the specified state is absent', - changed=False) + msg="new_name must not be set when the profile exists and the specified state is absent", + changed=False, + ) def _create_profile(self): - url = '/1.0/profiles' + url = "/1.0/profiles" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' + url = f"{url}?{urlencode(dict(project=self.project))}" config = self.config.copy() - config['name'] = self.name - self.client.do('POST', url, config) - self.actions.append('create') + config["name"] = self.name + self.client.do("POST", url, config) + self.actions.append("create") def _rename_profile(self): - url = f'/1.0/profiles/{self.name}' + url = f"/1.0/profiles/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - config = {'name': self.new_name} - self.client.do('POST', url, config) - self.actions.append('rename') + url = f"{url}?{urlencode(dict(project=self.project))}" + config = {"name": self.new_name} + self.client.do("POST", url, config) + self.actions.append("rename") self.name = self.new_name def _needs_to_change_profile_config(self, key): if key not in self.config: return False - old_configs = self.old_profile_json['metadata'].get(key, None) + old_configs = self.old_profile_json["metadata"].get(key, None) return self.config[key] != old_configs def _needs_to_apply_profile_configs(self): return ( - self._needs_to_change_profile_config('config') or - self._needs_to_change_profile_config('description') or - self._needs_to_change_profile_config('devices') + self._needs_to_change_profile_config("config") + or self._needs_to_change_profile_config("description") + or self._needs_to_change_profile_config("devices") ) def _merge_dicts(self, source, destination): @@ -380,7 +375,7 @@ def _merge_dicts(self, source, destination): return destination def _merge_config(self, config): - """ merge profile + """merge profile Merge Configuration of the present profile and the new desired configitems @@ -393,16 +388,16 @@ def _merge_config(self, config): Returns: dict(config): new config""" # merge or copy the sections from the existing profile to 'config' - for item in ['config', 'description', 'devices', 'name', 'used_by']: + for item in ["config", "description", "devices", "name", "used_by"]: if item in config: - config[item] = self._merge_dicts(config['metadata'][item], config[item]) + config[item] = self._merge_dicts(config["metadata"][item], config[item]) else: - config[item] = config['metadata'][item] + config[item] = config["metadata"][item] # merge or copy the sections from the ansible-task to 'config' return self._merge_dicts(self.config, config) def _generate_new_config(self, config): - """ rebuild profile + """rebuild profile Rebuild the Profile by the configuration provided in the play. Existing configurations are discarded. @@ -422,7 +417,7 @@ def _generate_new_config(self, config): return config def _apply_profile_configs(self): - """ Selection of the procedure: rebuild or merge + """Selection of the procedure: rebuild or merge The standard behavior is that all information not contained in the play is discarded. @@ -439,24 +434,24 @@ def _apply_profile_configs(self): Returns: None""" config = self.old_profile_json.copy() - if self.module.params['merge_profile']: + if self.module.params["merge_profile"]: config = self._merge_config(config) else: config = self._generate_new_config(config) # upload config to lxd - url = f'/1.0/profiles/{self.name}' + url = f"/1.0/profiles/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - self.client.do('PUT', url, config) - self.actions.append('apply_profile_configs') + url = f"{url}?{urlencode(dict(project=self.project))}" + self.client.do("PUT", url, config) + self.actions.append("apply_profile_configs") def _delete_profile(self): - url = f'/1.0/profiles/{self.name}' + url = f"/1.0/profiles/{self.name}" if self.project: - url = f'{url}?{urlencode(dict(project=self.project))}' - self.client.do('DELETE', url) - self.actions.append('delete') + url = f"{url}?{urlencode(dict(project=self.project))}" + self.client.do("DELETE", url) + self.actions.append("delete") def run(self): """Run the main method.""" @@ -470,23 +465,15 @@ def run(self): self._update_profile() state_changed = len(self.actions) > 0 - result_json = { - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } + result_json = {"changed": state_changed, "old_state": self.old_state, "actions": self.actions} if self.client.debug: - result_json['logs'] = self.client.logs + result_json["logs"] = self.client.logs self.module.exit_json(**result_json) except LXDClientException as e: state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } + fail_params = {"msg": e.msg, "changed": state_changed, "actions": self.actions} if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] + fail_params["logs"] = e.kwargs["logs"] self.module.fail_json(**fail_params) @@ -495,50 +482,29 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict( - type='str', - required=True - ), + name=dict(type="str", required=True), project=dict( - type='str', + type="str", ), new_name=dict( - type='str', + type="str", ), config=dict( - type='dict', + type="dict", ), description=dict( - type='str', + type="str", ), devices=dict( - type='dict', - ), - merge_profile=dict( - type='bool', - default=False - ), - state=dict( - choices=PROFILES_STATES, - default='present' - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] + type="dict", ), - trust_password=dict(type='str', no_log=True) + merge_profile=dict(type="bool", default=False), + state=dict(choices=PROFILES_STATES, default="present"), + url=dict(type="str", default=ANSIBLE_LXD_DEFAULT_URL), + snap_url=dict(type="str", default="unix:/var/snap/lxd/common/lxd/unix.socket"), + client_key=dict(type="path", aliases=["key_file"]), + client_cert=dict(type="path", aliases=["cert_file"]), + trust_password=dict(type="str", no_log=True), ), supports_check_mode=False, ) @@ -547,5 +513,5 @@ def main(): lxd_manage.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py index f5013b73408..a592cb45a64 100644 --- a/plugins/modules/lxd_project.py +++ b/plugins/modules/lxd_project.py @@ -177,23 +177,22 @@ """ from ansible_collections.community.general.plugins.module_utils.lxd import ( - LXDClient, LXDClientException, default_key_file, default_cert_file + LXDClient, + LXDClientException, + default_key_file, + default_cert_file, ) from ansible.module_utils.basic import AnsibleModule import os # ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint -ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' +ANSIBLE_LXD_DEFAULT_URL = "unix:/var/lib/lxd/unix.socket" # PROJECTS_STATES is a list for states supported -PROJECTS_STATES = [ - 'present', 'absent' -] +PROJECTS_STATES = ["present", "absent"] # CONFIG_PARAMS is a list of config attribute names. -CONFIG_PARAMS = [ - 'config', 'description' -] +CONFIG_PARAMS = ["config", "description"] class LXDProjectManagement: @@ -204,37 +203,34 @@ def __init__(self, module): :type module: ``object`` """ self.module = module - self.name = self.module.params['name'] + self.name = self.module.params["name"] self._build_config() - self.state = self.module.params['state'] - self.new_name = self.module.params.get('new_name', None) + self.state = self.module.params["state"] + self.new_name = self.module.params.get("new_name", None) - self.key_file = self.module.params.get('client_key') + self.key_file = self.module.params.get("client_key") if self.key_file is None: self.key_file = default_key_file() - self.cert_file = self.module.params.get('client_cert') + self.cert_file = self.module.params.get("client_cert") if self.cert_file is None: self.cert_file = default_cert_file() self.debug = self.module._verbosity >= 4 try: - if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: - self.url = self.module.params['url'] - elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): - self.url = self.module.params['snap_url'] + if self.module.params["url"] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params["url"] + elif os.path.exists(self.module.params["snap_url"].replace("unix:", "")): + self.url = self.module.params["snap_url"] else: - self.url = self.module.params['url'] + self.url = self.module.params["url"] except Exception as e: self.module.fail_json(msg=e.msg) try: - self.client = LXDClient( - self.url, key_file=self.key_file, cert_file=self.cert_file, - debug=self.debug - ) + self.client = LXDClient(self.url, key_file=self.key_file, cert_file=self.cert_file, debug=self.debug) except LXDClientException as e: self.module.fail_json(msg=e.msg) - self.trust_password = self.module.params.get('trust_password', None) + self.trust_password = self.module.params.get("trust_password", None) self.actions = [] def _build_config(self): @@ -245,66 +241,62 @@ def _build_config(self): self.config[attr] = param_val def _get_project_json(self): - return self.client.do( - 'GET', f'/1.0/projects/{self.name}', - ok_error_codes=[404] - ) + return self.client.do("GET", f"/1.0/projects/{self.name}", ok_error_codes=[404]) @staticmethod def _project_json_to_module_state(resp_json): - if resp_json['type'] == 'error': - return 'absent' - return 'present' + if resp_json["type"] == "error": + return "absent" + return "present" def _update_project(self): - if self.state == 'present': - if self.old_state == 'absent': + if self.state == "present": + if self.old_state == "absent": if self.new_name is None: self._create_project() else: self.module.fail_json( - msg='new_name must not be set when the project does not exist and the state is present', - changed=False) + msg="new_name must not be set when the project does not exist and the state is present", + changed=False, + ) else: if self.new_name is not None and self.new_name != self.name: self._rename_project() if self._needs_to_apply_project_configs(): self._apply_project_configs() - elif self.state == 'absent': - if self.old_state == 'present': + elif self.state == "absent": + if self.old_state == "present": if self.new_name is None: self._delete_project() else: self.module.fail_json( - msg='new_name must not be set when the project exists and the specified state is absent', - changed=False) + msg="new_name must not be set when the project exists and the specified state is absent", + changed=False, + ) def _create_project(self): config = self.config.copy() - config['name'] = self.name - self.client.do('POST', '/1.0/projects', config) - self.actions.append('create') + config["name"] = self.name + self.client.do("POST", "/1.0/projects", config) + self.actions.append("create") def _rename_project(self): - config = {'name': self.new_name} - self.client.do('POST', f'/1.0/projects/{self.name}', config) - self.actions.append('rename') + config = {"name": self.new_name} + self.client.do("POST", f"/1.0/projects/{self.name}", config) + self.actions.append("rename") self.name = self.new_name def _needs_to_change_project_config(self, key): if key not in self.config: return False - old_configs = self.old_project_json['metadata'].get(key, None) + old_configs = self.old_project_json["metadata"].get(key, None) return self.config[key] != old_configs def _needs_to_apply_project_configs(self): - return ( - self._needs_to_change_project_config('config') or - self._needs_to_change_project_config('description') - ) + return self._needs_to_change_project_config("config") or self._needs_to_change_project_config("description") def _merge_dicts(self, source, destination): - """ Return a new dict that merge two dict, + """Return a new dict that merge two dict, with values in source dict overwrite destination dict Args: @@ -327,7 +319,7 @@ def _merge_dicts(self, source, destination): return result def _apply_project_configs(self): - """ Selection of the procedure: rebuild or merge + """Selection of the procedure: rebuild or merge The standard behavior is that all information not contained in the play is discarded. @@ -344,11 +336,11 @@ def _apply_project_configs(self): Returns: None""" old_config = dict() - old_metadata = self.old_project_json['metadata'].copy() + old_metadata = self.old_project_json["metadata"].copy() for attr in CONFIG_PARAMS: old_config[attr] = old_metadata[attr] - if self.module.params['merge_project']: + if self.module.params["merge_project"]: config = self._merge_dicts(self.config, old_config) if config == old_config: # no need to call api if merged config is the same @@ -357,12 +349,12 @@ def _apply_project_configs(self): else: config = self.config.copy() # upload config to lxd - self.client.do('PUT', f'/1.0/projects/{self.name}', config) - self.actions.append('apply_projects_configs') + self.client.do("PUT", f"/1.0/projects/{self.name}", config) + self.actions.append("apply_projects_configs") def _delete_project(self): - self.client.do('DELETE', f'/1.0/projects/{self.name}') - self.actions.append('delete') + self.client.do("DELETE", f"/1.0/projects/{self.name}") + self.actions.append("delete") def run(self): """Run the main method.""" @@ -372,28 +364,19 @@ def run(self): self.client.authenticate(self.trust_password) self.old_project_json = self._get_project_json() - self.old_state = self._project_json_to_module_state( - self.old_project_json) + self.old_state = self._project_json_to_module_state(self.old_project_json) self._update_project() state_changed = len(self.actions) > 0 - result_json = { - 'changed': state_changed, - 'old_state': self.old_state, - 'actions': self.actions - } + result_json = {"changed": state_changed, "old_state": self.old_state, "actions": self.actions} if self.client.debug: - result_json['logs'] = self.client.logs + result_json["logs"] = self.client.logs self.module.exit_json(**result_json) except LXDClientException as e: state_changed = len(self.actions) > 0 - fail_params = { - 'msg': e.msg, - 'changed': state_changed, - 'actions': self.actions - } + fail_params = {"msg": e.msg, "changed": state_changed, "actions": self.actions} if self.client.debug: - fail_params['logs'] = e.kwargs['logs'] + fail_params["logs"] = e.kwargs["logs"] self.module.fail_json(**fail_params) @@ -402,44 +385,23 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict( - type='str', - required=True - ), + name=dict(type="str", required=True), new_name=dict( - type='str', + type="str", ), config=dict( - type='dict', + type="dict", ), description=dict( - type='str', - ), - merge_project=dict( - type='bool', - default=False - ), - state=dict( - choices=PROJECTS_STATES, - default='present' - ), - url=dict( - type='str', - default=ANSIBLE_LXD_DEFAULT_URL - ), - snap_url=dict( - type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' - ), - client_key=dict( - type='path', - aliases=['key_file'] - ), - client_cert=dict( - type='path', - aliases=['cert_file'] + type="str", ), - trust_password=dict(type='str', no_log=True) + merge_project=dict(type="bool", default=False), + state=dict(choices=PROJECTS_STATES, default="present"), + url=dict(type="str", default=ANSIBLE_LXD_DEFAULT_URL), + snap_url=dict(type="str", default="unix:/var/snap/lxd/common/lxd/unix.socket"), + client_key=dict(type="path", aliases=["key_file"]), + client_cert=dict(type="path", aliases=["cert_file"]), + trust_password=dict(type="str", no_log=True), ), supports_check_mode=False, ) @@ -448,5 +410,5 @@ def main(): lxd_manage.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py index fce5e51330f..0294b672b4d 100644 --- a/plugins/modules/macports.py +++ b/plugins/modules/macports.py @@ -107,15 +107,15 @@ def selfupdate(module, port_path): - """ Update Macports and the ports tree. """ + """Update Macports and the ports tree.""" rc, out, err = module.run_command([port_path, "-v", "selfupdate"]) if rc == 0: updated = any( - re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or - re.search(r'Installing new Macports release', s.strip()) - for s in out.split('\n') + re.search(r"Total number of ports parsed:\s+[^0]", s.strip()) + or re.search(r"Installing new Macports release", s.strip()) + for s in out.split("\n") if s ) if updated: @@ -131,7 +131,7 @@ def selfupdate(module, port_path): def upgrade(module, port_path): - """ Upgrade outdated ports. """ + """Upgrade outdated ports.""" rc, out, err = module.run_command([port_path, "upgrade", "outdated"]) @@ -149,10 +149,9 @@ def upgrade(module, port_path): def query_port(module, port_path, name, state="present"): - """ Returns whether a port is installed or not. """ + """Returns whether a port is installed or not.""" if state == "present": - rc, out, err = module.run_command([port_path, "-q", "installed", name]) if rc == 0 and out.strip().startswith(f"{name} "): @@ -161,7 +160,6 @@ def query_port(module, port_path, name, state="present"): return False elif state == "active": - rc, out, err = module.run_command([port_path, "-q", "installed", name]) if rc == 0 and "(active)" in out: @@ -171,7 +169,7 @@ def query_port(module, port_path, name, state="present"): def remove_ports(module, port_path, ports, stdout, stderr): - """ Uninstalls one or more ports if installed. """ + """Uninstalls one or more ports if installed.""" remove_c = 0 # Using a for loop in case of error, we can report the port that failed @@ -189,14 +187,13 @@ def remove_ports(module, port_path, ports, stdout, stderr): remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg=f"Removed {remove_c} port(s)", stdout=stdout, stderr=stderr) module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr) def install_ports(module, port_path, ports, variant, stdout, stderr): - """ Installs one or more ports if not already installed. """ + """Installs one or more ports if not already installed.""" install_c = 0 @@ -219,7 +216,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr): def activate_ports(module, port_path, ports, stdout, stderr): - """ Activate a port if it is inactive. """ + """Activate a port if it is inactive.""" activate_c = 0 @@ -246,7 +243,7 @@ def activate_ports(module, port_path, ports, stdout, stderr): def deactivate_ports(module, port_path, ports, stdout, stderr): - """ Deactivate a port if it is active. """ + """Deactivate a port if it is active.""" deactivated_c = 0 @@ -274,18 +271,18 @@ def deactivate_ports(module, port_path, ports, stdout, stderr): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', aliases=["port"]), - selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), + name=dict(type="list", elements="str", aliases=["port"]), + selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type="bool"), state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), - upgrade=dict(default=False, type='bool'), - variant=dict(aliases=["variants"], type='str') + upgrade=dict(default=False, type="bool"), + variant=dict(aliases=["variants"], type="str"), ) ) stdout = "" stderr = "" - port_path = module.get_bin_path('port', True, ['/opt/local/bin']) + port_path = module.get_bin_path("port", True, ["/opt/local/bin"]) p = module.params @@ -320,5 +317,5 @@ def main(): deactivate_ports(module, port_path, pkgs, stdout, stderr) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py index 6b1a7c1a695..08ce8e51306 100644 --- a/plugins/modules/mail.py +++ b/plugins/modules/mail.py @@ -231,49 +231,48 @@ def main(): - module = AnsibleModule( argument_spec=dict( - username=dict(type='str'), - password=dict(type='str', no_log=True), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=25), - ehlohost=dict(type='str'), - sender=dict(type='str', default='root', aliases=['from']), - to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), - cc=dict(type='list', elements='str', default=[]), - bcc=dict(type='list', elements='str', default=[]), - subject=dict(type='str', required=True, aliases=['msg']), - body=dict(type='str'), - attach=dict(type='list', elements='path', default=[]), - headers=dict(type='list', elements='str', default=[]), - charset=dict(type='str', default='utf-8'), - subtype=dict(type='str', default='plain', choices=['html', 'plain']), - secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), - timeout=dict(type='int', default=20), - message_id_domain=dict(type='str', default='ansible'), + username=dict(type="str"), + password=dict(type="str", no_log=True), + host=dict(type="str", default="localhost"), + port=dict(type="int", default=25), + ehlohost=dict(type="str"), + sender=dict(type="str", default="root", aliases=["from"]), + to=dict(type="list", elements="str", default=["root"], aliases=["recipients"]), + cc=dict(type="list", elements="str", default=[]), + bcc=dict(type="list", elements="str", default=[]), + subject=dict(type="str", required=True, aliases=["msg"]), + body=dict(type="str"), + attach=dict(type="list", elements="path", default=[]), + headers=dict(type="list", elements="str", default=[]), + charset=dict(type="str", default="utf-8"), + subtype=dict(type="str", default="plain", choices=["html", "plain"]), + secure=dict(type="str", default="try", choices=["always", "never", "starttls", "try"]), + timeout=dict(type="int", default=20), + message_id_domain=dict(type="str", default="ansible"), ), - required_together=[['password', 'username']], + required_together=[["password", "username"]], ) - username = module.params.get('username') - password = module.params.get('password') - host = module.params.get('host') - port = module.params.get('port') - local_hostname = module.params.get('ehlohost') - sender = module.params.get('sender') - recipients = module.params.get('to') - copies = module.params.get('cc') - blindcopies = module.params.get('bcc') - subject = module.params.get('subject') - body = module.params.get('body') - attach_files = module.params.get('attach') - headers = module.params.get('headers') - charset = module.params.get('charset') - subtype = module.params.get('subtype') - secure = module.params.get('secure') - timeout = module.params.get('timeout') - message_id_domain = module.params['message_id_domain'] + username = module.params.get("username") + password = module.params.get("password") + host = module.params.get("host") + port = module.params.get("port") + local_hostname = module.params.get("ehlohost") + sender = module.params.get("sender") + recipients = module.params.get("to") + copies = module.params.get("cc") + blindcopies = module.params.get("bcc") + subject = module.params.get("subject") + body = module.params.get("body") + attach_files = module.params.get("attach") + headers = module.params.get("headers") + charset = module.params.get("charset") + subtype = module.params.get("subtype") + secure = module.params.get("secure") + timeout = module.params.get("timeout") + message_id_domain = module.params["message_id_domain"] code = 0 secure_state = False @@ -283,14 +282,18 @@ def main(): body = subject try: - if secure != 'never': + if secure != "never": try: smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) code, smtpmessage = smtp.connect(host, port) secure_state = True except ssl.SSLError as e: - if secure == 'always': - module.fail_json(rc=1, msg=f'Unable to start an encrypted session to {host}:{port}: {to_native(e)}', exception=traceback.format_exc()) + if secure == "always": + module.fail_json( + rc=1, + msg=f"Unable to start an encrypted session to {host}:{port}: {to_native(e)}", + exception=traceback.format_exc(), + ) except Exception: pass @@ -299,78 +302,88 @@ def main(): code, smtpmessage = smtp.connect(host, port) except smtplib.SMTPException as e: - module.fail_json(rc=1, msg=f'Unable to Connect {host}:{port}: {to_native(e)}', exception=traceback.format_exc()) + module.fail_json(rc=1, msg=f"Unable to Connect {host}:{port}: {to_native(e)}", exception=traceback.format_exc()) try: smtp.ehlo() except smtplib.SMTPException as e: - module.fail_json(rc=1, msg=f'Helo failed for host {host}:{port}: {to_native(e)}', exception=traceback.format_exc()) + module.fail_json( + rc=1, msg=f"Helo failed for host {host}:{port}: {to_native(e)}", exception=traceback.format_exc() + ) if int(code) > 0: - if not secure_state and secure in ('starttls', 'try'): - if smtp.has_extn('STARTTLS'): + if not secure_state and secure in ("starttls", "try"): + if smtp.has_extn("STARTTLS"): try: smtp.starttls() secure_state = True except smtplib.SMTPException as e: - module.fail_json(rc=1, msg=f'Unable to start an encrypted session to {host}:{port}: {e}', exception=traceback.format_exc()) + module.fail_json( + rc=1, + msg=f"Unable to start an encrypted session to {host}:{port}: {e}", + exception=traceback.format_exc(), + ) try: smtp.ehlo() except smtplib.SMTPException as e: - module.fail_json(rc=1, msg=f'Helo failed for host {host}:{port}: {e}', exception=traceback.format_exc()) + module.fail_json( + rc=1, msg=f"Helo failed for host {host}:{port}: {e}", exception=traceback.format_exc() + ) else: - if secure == 'starttls': - module.fail_json(rc=1, msg=f'StartTLS is not offered on server {host}:{port}') + if secure == "starttls": + module.fail_json(rc=1, msg=f"StartTLS is not offered on server {host}:{port}") if username and password: - if smtp.has_extn('AUTH'): + if smtp.has_extn("AUTH"): try: smtp.login(username, password) except smtplib.SMTPAuthenticationError: - module.fail_json(rc=1, msg=f'Authentication to {host}:{port} failed, please check your username and/or password') + module.fail_json( + rc=1, msg=f"Authentication to {host}:{port} failed, please check your username and/or password" + ) except smtplib.SMTPException: - module.fail_json(rc=1, msg=f'No Suitable authentication method was found on {host}:{port}') + module.fail_json(rc=1, msg=f"No Suitable authentication method was found on {host}:{port}") else: module.fail_json(rc=1, msg=f"No Authentication on the server at {host}:{port}") if not secure_state and (username and password): - module.warn('Username and Password was sent without encryption') + module.warn("Username and Password was sent without encryption") msg = MIMEMultipart(_charset=charset) - msg['From'] = formataddr((sender_phrase, sender_addr)) - msg['Date'] = formatdate(localtime=True) - msg['Subject'] = Header(subject, charset) - msg['Message-ID'] = make_msgid(domain=message_id_domain) + msg["From"] = formataddr((sender_phrase, sender_addr)) + msg["Date"] = formatdate(localtime=True) + msg["Subject"] = Header(subject, charset) + msg["Message-ID"] = make_msgid(domain=message_id_domain) msg.preamble = "Multipart message" for header in headers: # NOTE: Backward compatible with old syntax using '|' as delimiter - for hdr in [x.strip() for x in header.split('|')]: + for hdr in [x.strip() for x in header.split("|")]: try: - h_key, h_val = hdr.split('=', 1) + h_key, h_val = hdr.split("=", 1) h_val = to_native(Header(h_val, charset)) msg.add_header(h_key, h_val) except Exception: module.warn(f"Skipping header '{hdr}', unable to parse") - if 'X-Mailer' not in msg: - msg.add_header('X-Mailer', 'Ansible mail module') + if "X-Mailer" not in msg: + msg.add_header("X-Mailer", "Ansible mail module") addr_list = [] for addr in [x.strip() for x in blindcopies]: - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase to_list = [] for addr in [x.strip() for x in recipients]: to_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['To'] = ", ".join(to_list) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg["To"] = ", ".join(to_list) cc_list = [] for addr in [x.strip() for x in copies]: cc_list.append(formataddr(parseaddr(addr))) - addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase - msg['Cc'] = ", ".join(cc_list) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg["Cc"] = ", ".join(cc_list) part = MIMEText(f"{body}\n\n", _subtype=subtype, _charset=charset) msg.attach(part) @@ -379,31 +392,37 @@ def main(): # This breaks files with spaces in it :-( for filename in attach_files: try: - part = MIMEBase('application', 'octet-stream') - with open(filename, 'rb') as fp: + part = MIMEBase("application", "octet-stream") + with open(filename, "rb") as fp: part.set_payload(fp.read()) encoders.encode_base64(part) - part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) + part.add_header("Content-disposition", "attachment", filename=os.path.basename(filename)) msg.attach(part) except Exception as e: - module.fail_json(rc=1, msg=f"Failed to send community.general.mail: can't attach file {filename}: {e}", exception=traceback.format_exc()) + module.fail_json( + rc=1, + msg=f"Failed to send community.general.mail: can't attach file {filename}: {e}", + exception=traceback.format_exc(), + ) composed = msg.as_string() try: result = smtp.sendmail(sender_addr, set(addr_list), composed) except Exception as e: - module.fail_json(rc=1, msg=f"Failed to send mail to '{', '.join(set(addr_list))}': {e}", exception=traceback.format_exc()) + module.fail_json( + rc=1, msg=f"Failed to send mail to '{', '.join(set(addr_list))}': {e}", exception=traceback.format_exc() + ) smtp.quit() if result: for key in result: module.warn(f"Failed to send mail to '{key}': {result[key][0]} {result[key][1]}") - module.exit_json(msg='Failed to send mail to at least one recipient', result=result) + module.exit_json(msg="Failed to send mail to at least one recipient", result=result) - module.exit_json(msg='Mail sent successfully', result=result) + module.exit_json(msg="Mail sent successfully", result=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/make.py b/plugins/modules/make.py index 87cd0e96022..482a74774b7 100644 --- a/plugins/modules/make.py +++ b/plugins/modules/make.py @@ -155,7 +155,7 @@ def run_command(command, module, check_rc=True): :param module: Ansible make module instance :return: return code, stdout content, stderr content """ - rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) + rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params["chdir"]) return rc, sanitize_output(out), sanitize_output(err) @@ -170,7 +170,7 @@ def sanitize_output(output): :return: sanitized output """ if output is None: - return '' + return "" else: return output.rstrip("\r\n") @@ -178,55 +178,55 @@ def sanitize_output(output): def main(): module = AnsibleModule( argument_spec=dict( - target=dict(type='str'), - targets=dict(type='list', elements='str'), - params=dict(type='dict'), - chdir=dict(type='path', required=True), - file=dict(type='path'), - make=dict(type='path'), - jobs=dict(type='int'), + target=dict(type="str"), + targets=dict(type="list", elements="str"), + params=dict(type="dict"), + chdir=dict(type="path", required=True), + file=dict(type="path"), + make=dict(type="path"), + jobs=dict(type="int"), ), - mutually_exclusive=[('target', 'targets')], + mutually_exclusive=[("target", "targets")], supports_check_mode=True, ) - make_path = module.params['make'] + make_path = module.params["make"] if make_path is None: # Build up the invocation of `make` we are going to use # For non-Linux OSes, prefer gmake (GNU make) over make - make_path = module.get_bin_path('gmake', required=False) + make_path = module.get_bin_path("gmake", required=False) if not make_path: # Fall back to system make - make_path = module.get_bin_path('make', required=True) - if module.params['params'] is not None: - make_parameters = [k + (f"={v!s}" if v is not None else '') for k, v in module.params['params'].items()] + make_path = module.get_bin_path("make", required=True) + if module.params["params"] is not None: + make_parameters = [k + (f"={v!s}" if v is not None else "") for k, v in module.params["params"].items()] else: make_parameters = [] # build command: # handle any make specific arguments included in params base_command = [make_path] - if module.params['jobs'] is not None: - jobs = str(module.params['jobs']) + if module.params["jobs"] is not None: + jobs = str(module.params["jobs"]) base_command.extend(["-j", jobs]) - if module.params['file'] is not None: - base_command.extend(["-f", module.params['file']]) + if module.params["file"] is not None: + base_command.extend(["-f", module.params["file"]]) # add make target - if module.params['target']: - base_command.append(module.params['target']) - elif module.params['targets']: - base_command.extend(module.params['targets']) + if module.params["target"]: + base_command.append(module.params["target"]) + elif module.params["targets"]: + base_command.extend(module.params["targets"]) # add makefile parameters base_command.extend(make_parameters) # Check if the target is already up to date - rc, out, err = run_command(base_command + ['-q'], module, check_rc=False) + rc, out, err = run_command(base_command + ["-q"], module, check_rc=False) if module.check_mode: # If we've been asked to do a dry run, we only need # to report whether or not the target is up to date - changed = (rc != 0) + changed = rc != 0 else: if rc == 0: # The target is up to date, so we don't have to @@ -247,15 +247,15 @@ def main(): failed=False, stdout=out, stderr=err, - target=module.params['target'], - targets=module.params['targets'], - params=module.params['params'], - chdir=module.params['chdir'], - file=module.params['file'], - jobs=module.params['jobs'], - command=' '.join([shlex_quote(part) for part in base_command]), + target=module.params["target"], + targets=module.params["targets"], + params=module.params["params"], + chdir=module.params["chdir"], + file=module.params["file"], + jobs=module.params["jobs"], + command=" ".join([shlex_quote(part) for part in base_command]), ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py index ac3b8f32922..620f1096a07 100644 --- a/plugins/modules/manageiq_alert_profiles.py +++ b/plugins/modules/manageiq_alert_profiles.py @@ -87,8 +87,7 @@ class ManageIQAlertProfiles: - """ Object to execute alert profile management operations in manageiq. - """ + """Object to execute alert profile management operations in manageiq.""" def __init__(self, manageiq): self.manageiq = manageiq @@ -96,41 +95,36 @@ def __init__(self, manageiq): self.module = self.manageiq.module self.api_url = self.manageiq.api_url self.client = self.manageiq.client - self.url = f'{self.api_url}/alert_definition_profiles' + self.url = f"{self.api_url}/alert_definition_profiles" def get_profiles(self): - """ Get all alert profiles from ManageIQ - """ + """Get all alert profiles from ManageIQ""" try: response = self.client.get(f"{self.url}?expand=alert_definitions,resources") except Exception as e: self.module.fail_json(msg=f"Failed to query alert profiles: {e}") - return response.get('resources') or [] + return response.get("resources") or [] def get_alerts(self, alert_descriptions): - """ Get a list of alert hrefs from a list of alert descriptions - """ + """Get a list of alert hrefs from a list of alert descriptions""" alerts = [] for alert_description in alert_descriptions: alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", description=alert_description) - alerts.append(alert['href']) + alerts.append(alert["href"]) return alerts def add_profile(self, profile): - """ Add a new alert profile to ManageIQ - """ + """Add a new alert profile to ManageIQ""" # find all alerts to add to the profile # we do this first to fail early if one is missing. - alerts = self.get_alerts(profile['alerts']) + alerts = self.get_alerts(profile["alerts"]) # build the profile dict to send to the server - profile_dict = dict(name=profile['name'], - description=profile['name'], - mode=profile['resource_type']) - if profile['notes']: - profile_dict['set_data'] = dict(notes=profile['notes']) + profile_dict = dict(name=profile["name"], description=profile["name"], mode=profile["resource_type"]) + if profile["notes"]: + profile_dict["set_data"] = dict(notes=profile["notes"]) # send it to the server try: @@ -139,17 +133,16 @@ def add_profile(self, profile): self.module.fail_json(msg=f"Creating profile failed {e}") # now that it has been created, we can assign the alerts - self.assign_or_unassign(result['results'][0], alerts, "assign") + self.assign_or_unassign(result["results"][0], alerts, "assign") msg = "Profile {name} created successfully" - msg = msg.format(name=profile['name']) + msg = msg.format(name=profile["name"]) return dict(changed=True, msg=msg) def delete_profile(self, profile): - """ Delete an alert profile from ManageIQ - """ + """Delete an alert profile from ManageIQ""" try: - self.client.post(profile['href'], action="delete") + self.client.post(profile["href"], action="delete") except Exception as e: self.module.fail_json(msg=f"Deleting profile failed: {e}") @@ -157,47 +150,41 @@ def delete_profile(self, profile): return dict(changed=True, msg=msg) def get_alert_href(self, alert): - """ Get an absolute href for an alert - """ + """Get an absolute href for an alert""" return f"{self.api_url}/alert_definitions/{alert['id']}" def assign_or_unassign(self, profile, resources, action): - """ Assign or unassign alerts to profile, and validate the result. - """ + """Assign or unassign alerts to profile, and validate the result.""" alerts = [dict(href=href) for href in resources] subcollection_url = f"{profile['href']}/alert_definitions" try: result = self.client.post(subcollection_url, resources=alerts, action=action) - if len(result['results']) != len(alerts): + if len(result["results"]) != len(alerts): msg = "Failed to {action} alerts to profile '{name}',expected {expected} alerts to be {action}ed,but only {changed} were {action}ed" - msg = msg.format(action=action, - name=profile['name'], - expected=len(alerts), - changed=result['results']) + msg = msg.format(action=action, name=profile["name"], expected=len(alerts), changed=result["results"]) self.module.fail_json(msg=msg) except Exception as e: msg = "Failed to {action} alerts to profile '{name}': {error}" - msg = msg.format(action=action, name=profile['name'], error=e) + msg = msg.format(action=action, name=profile["name"], error=e) self.module.fail_json(msg=msg) - return result['results'] + return result["results"] def update_profile(self, old_profile, desired_profile): - """ Update alert profile in ManageIQ - """ + """Update alert profile in ManageIQ""" changed = False # we need to use client.get to query the alert definitions old_profile = self.client.get(f"{old_profile['href']}?expand=alert_definitions") # figure out which alerts we need to assign / unassign # alerts listed by the user: - desired_alerts = set(self.get_alerts(desired_profile['alerts'])) + desired_alerts = set(self.get_alerts(desired_profile["alerts"])) # alert which currently exist in the profile - if 'alert_definitions' in old_profile: + if "alert_definitions" in old_profile: # we use get_alert_href to have a direct href to the alert - existing_alerts = set(self.get_alert_href(alert) for alert in old_profile['alert_definitions']) + existing_alerts = set(self.get_alert_href(alert) for alert in old_profile["alert_definitions"]) else: # no alerts in this profile existing_alerts = set() @@ -217,26 +204,24 @@ def update_profile(self, old_profile, desired_profile): # update other properties profile_dict = dict() - if old_profile['mode'] != desired_profile['resource_type']: + if old_profile["mode"] != desired_profile["resource_type"]: # mode needs to be updated - profile_dict['mode'] = desired_profile['resource_type'] + profile_dict["mode"] = desired_profile["resource_type"] # check if notes need to be updated - old_notes = old_profile.get('set_data', {}).get('notes') + old_notes = old_profile.get("set_data", {}).get("notes") - if desired_profile['notes'] != old_notes: - profile_dict['set_data'] = dict(notes=desired_profile['notes']) + if desired_profile["notes"] != old_notes: + profile_dict["set_data"] = dict(notes=desired_profile["notes"]) if profile_dict: # if we have any updated values changed = True try: - result = self.client.post(old_profile['href'], - resource=profile_dict, - action="edit") + result = self.client.post(old_profile["href"], resource=profile_dict, action="edit") except Exception as e: msg = "Updating profile '{name}' failed: {error}" - msg = msg.format(name=old_profile['name'], error=e) + msg = msg.format(name=old_profile["name"], error=e) self.module.fail_json(msg=msg) if changed: @@ -248,27 +233,31 @@ def update_profile(self, old_profile, desired_profile): def main(): argument_spec = dict( - name=dict(type='str', required=True), - resource_type=dict(type='str', choices=['Vm', - 'ContainerNode', - 'MiqServer', - 'Host', - 'Storage', - 'EmsCluster', - 'ExtManagementSystem', - 'MiddlewareServer']), - alerts=dict(type='list', elements='str'), - notes=dict(type='str'), - state=dict(default='present', choices=['present', 'absent']), + name=dict(type="str", required=True), + resource_type=dict( + type="str", + choices=[ + "Vm", + "ContainerNode", + "MiqServer", + "Host", + "Storage", + "EmsCluster", + "ExtManagementSystem", + "MiddlewareServer", + ], + ), + alerts=dict(type="list", elements="str"), + notes=dict(type="str"), + state=dict(default="present", choices=["present", "absent"]), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) - module = AnsibleModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['resource_type', 'alerts'])]) + module = AnsibleModule(argument_spec=argument_spec, required_if=[("state", "present", ["resource_type", "alerts"])]) - state = module.params['state'] - name = module.params['name'] + state = module.params["state"] + name = module.params["name"] manageiq = ManageIQ(module) manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py index 3909aea576c..3ec8dc6a732 100644 --- a/plugins/modules/manageiq_alerts.py +++ b/plugins/modules/manageiq_alerts.py @@ -132,36 +132,35 @@ class ManageIQAlert: - """ Represent a ManageIQ alert. Can be initialized with both the format + """Represent a ManageIQ alert. Can be initialized with both the format we receive from the server and the format we get from the user. """ + def __init__(self, alert): - self.description = alert['description'] - self.db = alert['db'] - self.enabled = alert['enabled'] - self.options = alert['options'] + self.description = alert["description"] + self.db = alert["db"] + self.enabled = alert["enabled"] + self.options = alert["options"] self.hash_expression = None self.miq_expressipn = None - if 'hash_expression' in alert: - self.hash_expression = alert['hash_expression'] - if 'miq_expression' in alert: - self.miq_expression = alert['miq_expression'] - if 'exp' in self.miq_expression: + if "hash_expression" in alert: + self.hash_expression = alert["hash_expression"] + if "miq_expression" in alert: + self.miq_expression = alert["miq_expression"] + if "exp" in self.miq_expression: # miq_expression is a field that needs a special case, because # it is returned surrounded by a dict named exp even though we don't # send it with that dict. - self.miq_expression = self.miq_expression['exp'] + self.miq_expression = self.miq_expression["exp"] def __eq__(self, other): - """ Compare two ManageIQAlert objects - """ + """Compare two ManageIQAlert objects""" return self.__dict__ == other.__dict__ class ManageIQAlerts: - """ Object to execute alert management operations in manageiq. - """ + """Object to execute alert management operations in manageiq.""" def __init__(self, manageiq): self.manageiq = manageiq @@ -169,84 +168,80 @@ def __init__(self, manageiq): self.module = self.manageiq.module self.api_url = self.manageiq.api_url self.client = self.manageiq.client - self.alerts_url = f'{self.api_url}/alert_definitions' + self.alerts_url = f"{self.api_url}/alert_definitions" def get_alerts(self): - """ Get all alerts from ManageIQ - """ + """Get all alerts from ManageIQ""" try: response = self.client.get(f"{self.alerts_url}?expand=resources") except Exception as e: self.module.fail_json(msg=f"Failed to query alerts: {e}") - return response.get('resources', []) + return response.get("resources", []) def validate_hash_expression(self, expression): - """ Validate a 'hash expression' alert definition - """ + """Validate a 'hash expression' alert definition""" # hash expressions must have the following fields - for key in ['options', 'eval_method', 'mode']: + for key in ["options", "eval_method", "mode"]: if key not in expression: msg = f"Hash expression is missing required field {key}" self.module.fail_json(msg) def create_alert_dict(self, params): - """ Create a dict representing an alert - """ - if params['expression_type'] == 'hash': + """Create a dict representing an alert""" + if params["expression_type"] == "hash": # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76 - self.validate_hash_expression(params['expression']) - expression_type = 'hash_expression' + self.validate_hash_expression(params["expression"]) + expression_type = "hash_expression" else: # actually miq_expression, but we call it "expression" for backwards-compatibility - expression_type = 'expression' + expression_type = "expression" # build the alret - alert = dict(description=params['description'], - db=params['resource_type'], - options=params['options'], - enabled=params['enabled']) + alert = dict( + description=params["description"], + db=params["resource_type"], + options=params["options"], + enabled=params["enabled"], + ) # add the actual expression. - alert.update({expression_type: params['expression']}) + alert.update({expression_type: params["expression"]}) return alert def add_alert(self, alert): - """ Add a new alert to ManageIQ - """ + """Add a new alert to ManageIQ""" try: - result = self.client.post(self.alerts_url, action='create', resource=alert) + result = self.client.post(self.alerts_url, action="create", resource=alert) msg = "Alert {description} created successfully: {details}" - msg = msg.format(description=alert['description'], details=result) + msg = msg.format(description=alert["description"], details=result) return dict(changed=True, msg=msg) except Exception as e: msg = "Creating alert {description} failed: {error}" if "Resource expression needs be specified" in str(e): # Running on an older version of ManageIQ and trying to create a hash expression - msg = msg.format(description=alert['description'], - error="Your version of ManageIQ does not support hash_expression") + msg = msg.format( + description=alert["description"], error="Your version of ManageIQ does not support hash_expression" + ) else: - msg = msg.format(description=alert['description'], error=e) + msg = msg.format(description=alert["description"], error=e) self.module.fail_json(msg=msg) def delete_alert(self, alert): - """ Delete an alert - """ + """Delete an alert""" try: - result = self.client.post(f"{self.alerts_url}/{alert['id']}", - action="delete") + result = self.client.post(f"{self.alerts_url}/{alert['id']}", action="delete") msg = "Alert {description} deleted: {details}" - msg = msg.format(description=alert['description'], details=result) + msg = msg.format(description=alert["description"], details=result) return dict(changed=True, msg=msg) except Exception as e: msg = "Deleting alert {description} failed: {error}" - msg = msg.format(description=alert['description'], error=e) + msg = msg.format(description=alert["description"], error=e) self.module.fail_json(msg=msg) def update_alert(self, existing_alert, new_alert): - """ Update an existing alert with the values from `new_alert` - """ + """Update an existing alert with the values from `new_alert`""" new_alert_obj = ManageIQAlert(new_alert) if new_alert_obj == ManageIQAlert(existing_alert): # no change needed - alerts are identical @@ -261,13 +256,13 @@ def update_alert(self, existing_alert, new_alert): if new_alert_obj == ManageIQAlert(result): # success! msg = "Alert {description} updated successfully: {details}" - msg = msg.format(description=existing_alert['description'], details=result) + msg = msg.format(description=existing_alert["description"], details=result) return dict(changed=True, msg=msg) else: # unexpected result msg = "Updating alert {description} failed, unexpected result {details}" - msg = msg.format(description=existing_alert['description'], details=result) + msg = msg.format(description=existing_alert["description"], details=result) self.module.fail_json(msg=msg) @@ -275,50 +270,55 @@ def update_alert(self, existing_alert, new_alert): msg = "Updating alert {description} failed: {error}" if "Resource expression needs be specified" in str(e): # Running on an older version of ManageIQ and trying to update a hash expression - msg = msg.format(description=existing_alert['description'], - error="Your version of ManageIQ does not support hash_expression") + msg = msg.format( + description=existing_alert["description"], + error="Your version of ManageIQ does not support hash_expression", + ) else: - msg = msg.format(description=existing_alert['description'], error=e) + msg = msg.format(description=existing_alert["description"], error=e) self.module.fail_json(msg=msg) def main(): argument_spec = dict( - description=dict(type='str'), - resource_type=dict(type='str', choices=['Vm', - 'ContainerNode', - 'MiqServer', - 'Host', - 'Storage', - 'EmsCluster', - 'ExtManagementSystem', - 'MiddlewareServer']), - expression_type=dict(type='str', default='hash', choices=['miq', 'hash']), - expression=dict(type='dict'), - options=dict(type='dict'), - enabled=dict(type='bool'), - state=dict(default='present', - choices=['present', 'absent']), + description=dict(type="str"), + resource_type=dict( + type="str", + choices=[ + "Vm", + "ContainerNode", + "MiqServer", + "Host", + "Storage", + "EmsCluster", + "ExtManagementSystem", + "MiddlewareServer", + ], + ), + expression_type=dict(type="str", default="hash", choices=["miq", "hash"]), + expression=dict(type="dict"), + options=dict(type="dict"), + enabled=dict(type="bool"), + state=dict(default="present", choices=["present", "absent"]), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) - module = AnsibleModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['description', - 'resource_type', - 'expression', - 'enabled', - 'options']), - ('state', 'absent', ['description'])]) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["description", "resource_type", "expression", "enabled", "options"]), + ("state", "absent", ["description"]), + ], + ) - state = module.params['state'] - description = module.params['description'] + state = module.params["state"] + description = module.params["description"] manageiq = ManageIQ(module) manageiq_alerts = ManageIQAlerts(manageiq) - existing_alert = manageiq.find_collection_resource_by("alert_definitions", - description=description) + existing_alert = manageiq.find_collection_resource_by("alert_definitions", description=description) # we need to add or update the alert if state == "present": diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py index bfaa8e0bbe3..1cd24cc8718 100644 --- a/plugins/modules/manageiq_group.py +++ b/plugins/modules/manageiq_group.py @@ -211,7 +211,7 @@ class ManageIQgroup: """ - Object to execute group management operations in manageiq. + Object to execute group management operations in manageiq. """ def __init__(self, manageiq): @@ -222,7 +222,7 @@ def __init__(self, manageiq): self.client = self.manageiq.client def group(self, description): - """ Search for group object by description. + """Search for group object by description. Returns: the group, or None if group was not found. """ @@ -233,13 +233,13 @@ def group(self, description): return groups[0] def tenant(self, tenant_id, tenant_name): - """ Search for tenant entity by name or id + """Search for tenant entity by name or id Returns: the tenant entity, None if no id or name was supplied """ if tenant_id: - tenant = self.client.get_entity('tenants', tenant_id) + tenant = self.client.get_entity("tenants", tenant_id) if not tenant: self.module.fail_json(msg=f"Tenant with id '{tenant_id}' not found in manageiq") return tenant @@ -257,14 +257,14 @@ def tenant(self, tenant_id, tenant_name): return None def role(self, role_id, role_name): - """ Search for a role object by name or id. + """Search for a role object by name or id. Returns: the role entity, None no id or name was supplied the role, or send a module Fail signal if role not found. """ if role_id: - role = self.client.get_entity('roles', role_id) + role = self.client.get_entity("roles", role_id) if not role: self.module.fail_json(msg=f"Role with id '{role_id}' not found in manageiq") return role @@ -282,7 +282,7 @@ def role(self, role_id, role_name): @staticmethod def merge_dict_values(norm_current_values, norm_updated_values): - """ Create an merged update object for manageiq group filters. + """Create an merged update object for manageiq group filters. The input dict contain the tag values per category. If the new values contain the category, all tags for that category are replaced @@ -308,7 +308,7 @@ def merge_dict_values(norm_current_values, norm_updated_values): return res def delete_group(self, group): - """ Deletes a group from manageiq. + """Deletes a group from manageiq. Returns: a dict of: @@ -317,20 +317,27 @@ def delete_group(self, group): """ try: url = f"{self.api_url}/groups/{group['id']}" - result = self.client.post(url, action='delete') + result = self.client.post(url, action="delete") except Exception as e: self.module.fail_json(msg=f"failed to delete group {group['description']}: {e}") - if result['success'] is False: - self.module.fail_json(msg=result['message']) + if result["success"] is False: + self.module.fail_json(msg=result["message"]) - return dict( - changed=True, - msg=f"deleted group {group['description']} with id {group['id']}") + return dict(changed=True, msg=f"deleted group {group['description']} with id {group['id']}") - def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode): - """ Edit a manageiq group. + def edit_group( + self, + group, + description, + role, + tenant, + norm_managed_filters, + managed_filters_merge_mode, + belongsto_filters, + belongsto_filters_merge_mode, + ): + """Edit a manageiq group. Returns: a dict of: @@ -339,66 +346,71 @@ def edit_group(self, group, description, role, tenant, norm_managed_filters, man """ if role or norm_managed_filters or belongsto_filters: - group.reload(attributes=['miq_user_role_name', 'entitlement']) + group.reload(attributes=["miq_user_role_name", "entitlement"]) try: - current_role = group['miq_user_role_name'] + current_role = group["miq_user_role_name"] except AttributeError: current_role = None changed = False resource = {} - if description and group['description'] != description: - resource['description'] = description + if description and group["description"] != description: + resource["description"] = description changed = True - if tenant and group['tenant_id'] != tenant['id']: - resource['tenant'] = dict(id=tenant['id']) + if tenant and group["tenant_id"] != tenant["id"]: + resource["tenant"] = dict(id=tenant["id"]) changed = True - if role and current_role != role['name']: - resource['role'] = dict(id=role['id']) + if role and current_role != role["name"]: + resource["role"] = dict(id=role["id"]) changed = True if norm_managed_filters or belongsto_filters: - # Only compare if filters are supplied - entitlement = group['entitlement'] + entitlement = group["entitlement"] - if 'filters' not in entitlement: + if "filters" not in entitlement: # No existing filters exist, use supplied filters managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) - resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + resource["filters"] = {"managed": managed_tag_filters_post_body, "belongsto": belongsto_filters} changed = True else: - current_filters = entitlement['filters'] - new_filters = self.edit_group_edit_filters(current_filters, - norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode) + current_filters = entitlement["filters"] + new_filters = self.edit_group_edit_filters( + current_filters, + norm_managed_filters, + managed_filters_merge_mode, + belongsto_filters, + belongsto_filters_merge_mode, + ) if new_filters: - resource['filters'] = new_filters + resource["filters"] = new_filters changed = True if not changed: - return dict( - changed=False, - msg=f"group {group['description']} is not changed.") + return dict(changed=False, msg=f"group {group['description']} is not changed.") # try to update group try: - self.client.post(group['href'], action='edit', resource=resource) + self.client.post(group["href"], action="edit", resource=resource) changed = True except Exception as e: self.module.fail_json(msg=f"failed to update group {group['name']}: {e!s}") - return dict( - changed=changed, - msg=f"successfully updated the group {group['description']} with id {group['id']}") + return dict(changed=changed, msg=f"successfully updated the group {group['description']} with id {group['id']}") - def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode): - """ Edit a manageiq group filters. + def edit_group_edit_filters( + self, + current_filters, + norm_managed_filters, + managed_filters_merge_mode, + belongsto_filters, + belongsto_filters_merge_mode, + ): + """Edit a manageiq group filters. Returns: None if no the group was not updated @@ -407,7 +419,7 @@ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed filters_updated = False new_filters_resource = {} - current_belongsto_set = current_filters.get('belongsto', set()) + current_belongsto_set = current_filters.get("belongsto", set()) if belongsto_filters: new_belongsto_set = set(belongsto_filters) @@ -415,13 +427,13 @@ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed new_belongsto_set = set() if current_belongsto_set == new_belongsto_set: - new_filters_resource['belongsto'] = current_filters['belongsto'] + new_filters_resource["belongsto"] = current_filters["belongsto"] else: - if belongsto_filters_merge_mode == 'merge': + if belongsto_filters_merge_mode == "merge": current_belongsto_set.update(new_belongsto_set) - new_filters_resource['belongsto'] = list(current_belongsto_set) + new_filters_resource["belongsto"] = list(current_belongsto_set) else: - new_filters_resource['belongsto'] = list(new_belongsto_set) + new_filters_resource["belongsto"] = list(new_belongsto_set) filters_updated = True # Process belongsto managed filter tags @@ -432,14 +444,14 @@ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters) if norm_current_filters == norm_managed_filters: - if 'managed' in current_filters: - new_filters_resource['managed'] = current_filters['managed'] + if "managed" in current_filters: + new_filters_resource["managed"] = current_filters["managed"] else: - if managed_filters_merge_mode == 'merge': + if managed_filters_merge_mode == "merge": merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters) - new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict) + new_filters_resource["managed"] = self.normalized_managed_tag_filters_to_miq(merged_dict) else: - new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + new_filters_resource["managed"] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) filters_updated = True if not filters_updated: @@ -448,7 +460,7 @@ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed return new_filters_resource def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters): - """ Creates the group in manageiq. + """Creates the group in manageiq. Returns: the created group id, name, created_on timestamp, @@ -456,33 +468,29 @@ def create_group(self, description, role, tenant, norm_managed_filters, belongst """ # check for required arguments for key, value in dict(description=description).items(): - if value in (None, ''): + if value in (None, ""): self.module.fail_json(msg=f"missing required argument: {key}") - url = f'{self.api_url}/groups' + url = f"{self.api_url}/groups" - resource = {'description': description} + resource = {"description": description} if role is not None: - resource['role'] = dict(id=role['id']) + resource["role"] = dict(id=role["id"]) if tenant is not None: - resource['tenant'] = dict(id=tenant['id']) + resource["tenant"] = dict(id=tenant["id"]) if norm_managed_filters or belongsto_filters: managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) - resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + resource["filters"] = {"managed": managed_tag_filters_post_body, "belongsto": belongsto_filters} try: - result = self.client.post(url, action='create', resource=resource) + result = self.client.post(url, action="create", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to create group {description}: {e}") - return dict( - changed=True, - msg=f"successfully created group {description}", - group_id=result['results'][0]['id'] - ) + return dict(changed=True, msg=f"successfully created group {description}", group_id=result["results"][0]["id"]) @staticmethod def normalized_managed_tag_filters_to_miq(norm_managed_filters): @@ -493,14 +501,14 @@ def normalized_managed_tag_filters_to_miq(norm_managed_filters): @staticmethod def manageiq_filters_to_sorted_dict(current_filters): - current_managed_filters = current_filters.get('managed') + current_managed_filters = current_filters.get("managed") if not current_managed_filters: return None res = {} for tag_list in current_managed_filters: tag_list.sort() - key = tag_list[0].split('/')[2] + key = tag_list[0].split("/")[2] res[key] = tag_list return res @@ -526,74 +534,72 @@ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module): @staticmethod def create_result_group(group): - """ Creates the ansible result object from a manageiq group entity + """Creates the ansible result object from a manageiq group entity Returns: a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on """ try: - role_name = group['miq_user_role_name'] + role_name = group["miq_user_role_name"] except AttributeError: role_name = None managed_filters = None belongsto_filters = None - if 'filters' in group['entitlement']: - filters = group['entitlement']['filters'] - belongsto_filters = filters.get('belongsto') - group_managed_filters = filters.get('managed') + if "filters" in group["entitlement"]: + filters = group["entitlement"]["filters"] + belongsto_filters = filters.get("belongsto") + group_managed_filters = filters.get("managed") if group_managed_filters: managed_filters = {} for tag_list in group_managed_filters: - key = tag_list[0].split('/')[2] + key = tag_list[0].split("/")[2] tags = [] for t in tag_list: - tags.append(t.split('/')[3]) + tags.append(t.split("/")[3]) managed_filters[key] = tags return dict( - id=group['id'], - description=group['description'], + id=group["id"], + description=group["description"], role=role_name, - tenant=group['tenant']['name'], + tenant=group["tenant"]["name"], managed_filters=managed_filters, belongsto_filters=belongsto_filters, - group_type=group['group_type'], - created_on=group['created_on'], - updated_on=group['updated_on'], + group_type=group["group_type"], + created_on=group["created_on"], + updated_on=group["updated_on"], ) def main(): argument_spec = dict( - description=dict(required=True, type='str'), - state=dict(choices=['absent', 'present'], default='present'), - role_id=dict(type='int'), - role=dict(type='str'), - tenant_id=dict(type='int'), - tenant=dict(type='str'), - managed_filters=dict(type='dict'), - managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), - belongsto_filters=dict(type='list', elements='str'), - belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), + description=dict(required=True, type="str"), + state=dict(choices=["absent", "present"], default="present"), + role_id=dict(type="int"), + role=dict(type="str"), + tenant_id=dict(type="int"), + tenant=dict(type="str"), + managed_filters=dict(type="dict"), + managed_filters_merge_mode=dict(choices=["merge", "replace"], default="replace"), + belongsto_filters=dict(type="list", elements="str"), + belongsto_filters_merge_mode=dict(choices=["merge", "replace"], default="replace"), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) - module = AnsibleModule( - argument_spec=argument_spec - ) + module = AnsibleModule(argument_spec=argument_spec) - description = module.params['description'] - state = module.params['state'] - role_id = module.params['role_id'] - role_name = module.params['role'] - tenant_id = module.params['tenant_id'] - tenant_name = module.params['tenant'] - managed_filters = module.params['managed_filters'] - managed_filters_merge_mode = module.params['managed_filters_merge_mode'] - belongsto_filters = module.params['belongsto_filters'] - belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode'] + description = module.params["description"] + state = module.params["state"] + role_id = module.params["role_id"] + role_name = module.params["role"] + tenant_id = module.params["tenant_id"] + tenant_name = module.params["tenant"] + managed_filters = module.params["managed_filters"] + managed_filters_merge_mode = module.params["managed_filters_merge_mode"] + belongsto_filters = module.params["belongsto_filters"] + belongsto_filters_merge_mode = module.params["belongsto_filters_merge_mode"] manageiq = ManageIQ(module) manageiq_group = ManageIQgroup(manageiq) @@ -607,29 +613,33 @@ def main(): res_args = manageiq_group.delete_group(group) # if we do not have a group, nothing to do else: - res_args = dict( - changed=False, - msg=f"group '{description}' does not exist in manageiq") + res_args = dict(changed=False, msg=f"group '{description}' does not exist in manageiq") # group should exist if state == "present": - tenant = manageiq_group.tenant(tenant_id, tenant_name) role = manageiq_group.role(role_id, role_name) norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module) # if we have a group, edit it if group: - res_args = manageiq_group.edit_group(group, description, role, tenant, - norm_managed_filters, managed_filters_merge_mode, - belongsto_filters, belongsto_filters_merge_mode) + res_args = manageiq_group.edit_group( + group, + description, + role, + tenant, + norm_managed_filters, + managed_filters_merge_mode, + belongsto_filters, + belongsto_filters_merge_mode, + ) # if we do not have a group, create it else: res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters) - group = manageiq.client.get_entity('groups', res_args['group_id']) + group = manageiq.client.get_entity("groups", res_args["group_id"]) - group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement']) - res_args['group'] = manageiq_group.create_result_group(group) + group.reload(expand="resources", attributes=["miq_user_role_name", "tenant", "entitlement"]) + res_args["group"] = manageiq_group.create_result_group(group) module.exit_json(**res_args) diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py index a5539724dc3..02ca0934011 100644 --- a/plugins/modules/manageiq_policies.py +++ b/plugins/modules/manageiq_policies.py @@ -134,19 +134,21 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, + manageiq_argument_spec, + manageiq_entities, +) def main(): - actions = {'present': 'assign', 'absent': 'unassign'} + actions = {"present": "assign", "absent": "unassign"} argument_spec = dict( - policy_profiles=dict(type='list', elements='dict'), - resource_id=dict(type='int'), - resource_name=dict(type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(type='str', - choices=['present', 'absent'], default='present'), + policy_profiles=dict(type="list", elements="dict"), + resource_id=dict(type="int"), + resource_name=dict(type="str"), + resource_type=dict(required=True, type="str", choices=list(manageiq_entities().keys())), + state=dict(type="str", choices=["present", "absent"], default="present"), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) @@ -155,17 +157,14 @@ def main(): argument_spec=argument_spec, mutually_exclusive=[["resource_id", "resource_name"]], required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['policy_profiles']), - ('state', 'absent', ['policy_profiles']) - ], + required_if=[("state", "present", ["policy_profiles"]), ("state", "absent", ["policy_profiles"])], ) - policy_profiles = module.params['policy_profiles'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] + policy_profiles = module.params["policy_profiles"] + resource_id = module.params["resource_id"] + resource_type_key = module.params["resource_type"] + resource_name = module.params["resource_name"] + state = module.params["state"] # get the action and resource type action = actions[state] diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py index bf96679e297..f925148b7d8 100644 --- a/plugins/modules/manageiq_policies_info.py +++ b/plugins/modules/manageiq_policies_info.py @@ -89,15 +89,18 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, + manageiq_argument_spec, + manageiq_entities, +) def main(): argument_spec = dict( - resource_id=dict(type='int'), - resource_name=dict(type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), + resource_id=dict(type="int"), + resource_name=dict(type="str"), + resource_type=dict(required=True, type="str", choices=list(manageiq_entities().keys())), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) @@ -109,9 +112,9 @@ def main(): supports_check_mode=True, ) - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] + resource_id = module.params["resource_id"] + resource_type_key = module.params["resource_type"] + resource_name = module.params["resource_name"] # get the resource type resource_type = manageiq_entities()[resource_type_key] diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py index db1cd3af7fe..90495d86be4 100644 --- a/plugins/modules/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -515,45 +515,42 @@ def supported_providers(): return dict( Openshift=dict( - class_name='ManageIQ::Providers::Openshift::ContainerManager', - authtype='bearer', - default_role='default', - metrics_role='prometheus', - alerts_role='prometheus_alerts', + class_name="ManageIQ::Providers::Openshift::ContainerManager", + authtype="bearer", + default_role="default", + metrics_role="prometheus", + alerts_role="prometheus_alerts", ), Amazon=dict( - class_name='ManageIQ::Providers::Amazon::CloudManager', + class_name="ManageIQ::Providers::Amazon::CloudManager", ), oVirt=dict( - class_name='ManageIQ::Providers::Redhat::InfraManager', - default_role='default', - metrics_role='metrics', + class_name="ManageIQ::Providers::Redhat::InfraManager", + default_role="default", + metrics_role="metrics", ), VMware=dict( - class_name='ManageIQ::Providers::Vmware::InfraManager', + class_name="ManageIQ::Providers::Vmware::InfraManager", ), Azure=dict( - class_name='ManageIQ::Providers::Azure::CloudManager', - ), - Director=dict( - class_name='ManageIQ::Providers::Openstack::InfraManager', - ssh_keypair_role="ssh_keypair" + class_name="ManageIQ::Providers::Azure::CloudManager", ), + Director=dict(class_name="ManageIQ::Providers::Openstack::InfraManager", ssh_keypair_role="ssh_keypair"), OpenStack=dict( - class_name='ManageIQ::Providers::Openstack::CloudManager', + class_name="ManageIQ::Providers::Openstack::CloudManager", ), GCE=dict( - class_name='ManageIQ::Providers::Google::CloudManager', + class_name="ManageIQ::Providers::Google::CloudManager", ), ) def endpoint_list_spec(): return dict( - provider=dict(type='dict', options=endpoint_argument_spec()), - metrics=dict(type='dict', options=endpoint_argument_spec()), - alerts=dict(type='dict', options=endpoint_argument_spec()), - ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False), + provider=dict(type="dict", options=endpoint_argument_spec()), + metrics=dict(type="dict", options=endpoint_argument_spec()), + alerts=dict(type="dict", options=endpoint_argument_spec()), + ssh_keypair=dict(type="dict", options=endpoint_argument_spec(), no_log=False), ) @@ -561,15 +558,15 @@ def endpoint_argument_spec(): return dict( role=dict(), hostname=dict(required=True), - port=dict(type='int'), - validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + port=dict(type="int"), + validate_certs=dict(default=True, type="bool", aliases=["verify_ssl"]), certificate_authority=dict(), security_protocol=dict( choices=[ - 'ssl-with-validation', - 'ssl-with-validation-custom-ca', - 'ssl-without-validation', - 'non-ssl', + "ssl-with-validation", + "ssl-with-validation-custom-ca", + "ssl-without-validation", + "non-ssl", ], ), userid=dict(), @@ -583,7 +580,7 @@ def endpoint_argument_spec(): def delete_nulls(h): - """ Remove null entries from a hash + """Remove null entries from a hash Returns: a hash without nulls @@ -598,7 +595,7 @@ def delete_nulls(h): class ManageIQProvider: """ - Object to execute provider management operations in manageiq. + Object to execute provider management operations in manageiq. """ def __init__(self, manageiq): @@ -609,40 +606,39 @@ def __init__(self, manageiq): self.client = self.manageiq.client def class_name_to_type(self, class_name): - """ Convert class_name to type + """Convert class_name to type Returns: the type """ - out = [k for k, v in supported_providers().items() if v['class_name'] == class_name] + out = [k for k, v in supported_providers().items() if v["class_name"] == class_name] if len(out) == 1: return out[0] return None def zone_id(self, name): - """ Search for zone id by zone name. + """Search for zone id by zone name. Returns: the zone id, or send a module Fail signal if zone not found. """ - zone = self.manageiq.find_collection_resource_by('zones', name=name) + zone = self.manageiq.find_collection_resource_by("zones", name=name) if not zone: # zone doesn't exist - self.module.fail_json( - msg=f"zone {name} does not exist in manageiq") + self.module.fail_json(msg=f"zone {name} does not exist in manageiq") - return zone['id'] + return zone["id"] def provider(self, name): - """ Search for provider object by name. + """Search for provider object by name. Returns: the provider, or None if provider not found. """ - return self.manageiq.find_collection_resource_by('providers', name=name) + return self.manageiq.find_collection_resource_by("providers", name=name) def build_connection_configurations(self, provider_type, endpoints): - """ Build "connection_configurations" objects from + """Build "connection_configurations" objects from requested endpoints provided by user Returns: @@ -653,59 +649,74 @@ def build_connection_configurations(self, provider_type, endpoints): provider_defaults = supported_providers().get(provider_type, {}) # get endpoint defaults - endpoint = endpoints.get('provider') - default_auth_key = endpoint.get('auth_key') + endpoint = endpoints.get("provider") + default_auth_key = endpoint.get("auth_key") # build a connection_configuration object for each endpoint for endpoint_key in endpoint_keys: endpoint = endpoints.get(endpoint_key) if endpoint: # get role and authtype - role = endpoint.get('role') or provider_defaults.get(f"{endpoint_key}_role", 'default') - if role == 'default': - authtype = provider_defaults.get('authtype') or role + role = endpoint.get("role") or provider_defaults.get(f"{endpoint_key}_role", "default") + if role == "default": + authtype = provider_defaults.get("authtype") or role else: authtype = role # set a connection_configuration - connection_configurations.append({ - 'endpoint': { - 'role': role, - 'hostname': endpoint.get('hostname'), - 'port': endpoint.get('port'), - 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)], - 'security_protocol': endpoint.get('security_protocol'), - 'certificate_authority': endpoint.get('certificate_authority'), - 'path': endpoint.get('path'), - }, - 'authentication': { - 'authtype': authtype, - 'userid': endpoint.get('userid'), - 'password': endpoint.get('password'), - 'auth_key': endpoint.get('auth_key') or default_auth_key, + connection_configurations.append( + { + "endpoint": { + "role": role, + "hostname": endpoint.get("hostname"), + "port": endpoint.get("port"), + "verify_ssl": [0, 1][endpoint.get("validate_certs", True)], + "security_protocol": endpoint.get("security_protocol"), + "certificate_authority": endpoint.get("certificate_authority"), + "path": endpoint.get("path"), + }, + "authentication": { + "authtype": authtype, + "userid": endpoint.get("userid"), + "password": endpoint.get("password"), + "auth_key": endpoint.get("auth_key") or default_auth_key, + }, } - }) + ) return connection_configurations def delete_provider(self, provider): - """ Deletes a provider from manageiq. + """Deletes a provider from manageiq. Returns: a short message describing the operation executed. """ try: url = f"{self.api_url}/providers/{provider['id']}" - result = self.client.post(url, action='delete') + result = self.client.post(url, action="delete") except Exception as e: self.module.fail_json(msg=f"failed to delete provider {provider['name']}: {e}") - return dict(changed=True, msg=result['message']) - - def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version): - """ Edit a provider from manageiq. + return dict(changed=True, msg=result["message"]) + + def edit_provider( + self, + provider, + name, + provider_type, + endpoints, + zone_id, + provider_region, + host_default_vnc_port_start, + host_default_vnc_port_end, + subscription, + project, + uid_ems, + tenant_mapping_enabled, + api_version, + ): + """Edit a provider from manageiq. Returns: a short message describing the operation executed. @@ -714,7 +725,7 @@ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provi resource = dict( name=name, - zone={'id': zone_id}, + zone={"id": zone_id}, provider_region=provider_region, connection_configurations=endpoints, host_default_vnc_port_start=host_default_vnc_port_start, @@ -737,25 +748,35 @@ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provi # try to update provider try: - result = self.client.post(url, action='edit', resource=resource) + result = self.client.post(url, action="edit", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to update provider {provider['name']}: {e}") - return dict( - changed=True, - msg=f"successfully updated the provider {provider['name']}: {result}") - - def create_provider(self, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version): - """ Creates the provider in manageiq. + return dict(changed=True, msg=f"successfully updated the provider {provider['name']}: {result}") + + def create_provider( + self, + name, + provider_type, + endpoints, + zone_id, + provider_region, + host_default_vnc_port_start, + host_default_vnc_port_end, + subscription, + project, + uid_ems, + tenant_mapping_enabled, + api_version, + ): + """Creates the provider in manageiq. Returns: a short message describing the operation executed. """ resource = dict( name=name, - zone={'id': zone_id}, + zone={"id": zone_id}, provider_region=provider_region, host_default_vnc_port_start=host_default_vnc_port_start, host_default_vnc_port_end=host_default_vnc_port_end, @@ -772,47 +793,43 @@ def create_provider(self, name, provider_type, endpoints, zone_id, provider_regi # try to create a new provider try: - url = f'{self.api_url}/providers' - result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource) + url = f"{self.api_url}/providers" + result = self.client.post(url, type=supported_providers()[provider_type]["class_name"], **resource) except Exception as e: self.module.fail_json(msg=f"failed to create provider {name}: {e}") - return dict( - changed=True, - msg=f"successfully created the provider {name}: {result['results']}") + return dict(changed=True, msg=f"successfully created the provider {name}: {result['results']}") def refresh(self, provider, name): - """ Trigger provider refresh. + """Trigger provider refresh. Returns: a short message describing the operation executed. """ try: url = f"{self.api_url}/providers/{provider['id']}" - result = self.client.post(url, action='refresh') + result = self.client.post(url, action="refresh") except Exception as e: self.module.fail_json(msg=f"failed to refresh provider {name}: {e}") - return dict( - changed=True, - msg=f"refreshing provider {name}") + return dict(changed=True, msg=f"refreshing provider {name}") def main(): zone_id = None endpoints = [] argument_spec = dict( - state=dict(choices=['absent', 'present', 'refresh'], default='present'), + state=dict(choices=["absent", "present", "refresh"], default="present"), name=dict(required=True), - zone=dict(default='default'), + zone=dict(default="default"), provider_region=dict(), host_default_vnc_port_start=dict(), host_default_vnc_port_end=dict(), subscription=dict(), project=dict(), - azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), - tenant_mapping_enabled=dict(default=False, type='bool'), - api_version=dict(choices=['v2', 'v3']), + azure_tenant_id=dict(aliases=["keystone_v3_domain_id"]), + tenant_mapping_enabled=dict(default=False, type="bool"), + api_version=dict(choices=["v2", "v3"]), type=dict(choices=list(supported_providers().keys())), ) # add the manageiq connection arguments to the arguments @@ -822,27 +839,23 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['provider']), - ('state', 'refresh', ['name'])], - required_together=[ - ['host_default_vnc_port_start', 'host_default_vnc_port_end'] - ], + required_if=[("state", "present", ["provider"]), ("state", "refresh", ["name"])], + required_together=[["host_default_vnc_port_start", "host_default_vnc_port_end"]], ) - name = module.params['name'] - zone_name = module.params['zone'] - provider_type = module.params['type'] + name = module.params["name"] + zone_name = module.params["zone"] + provider_type = module.params["type"] raw_endpoints = module.params - provider_region = module.params['provider_region'] - host_default_vnc_port_start = module.params['host_default_vnc_port_start'] - host_default_vnc_port_end = module.params['host_default_vnc_port_end'] - subscription = module.params['subscription'] - uid_ems = module.params['azure_tenant_id'] - project = module.params['project'] - tenant_mapping_enabled = module.params['tenant_mapping_enabled'] - api_version = module.params['api_version'] - state = module.params['state'] + provider_region = module.params["provider_region"] + host_default_vnc_port_start = module.params["host_default_vnc_port_start"] + host_default_vnc_port_end = module.params["host_default_vnc_port_end"] + subscription = module.params["subscription"] + uid_ems = module.params["azure_tenant_id"] + project = module.params["project"] + tenant_mapping_enabled = module.params["tenant_mapping_enabled"] + api_version = module.params["api_version"] + state = module.params["state"] manageiq = ManageIQ(module) manageiq_provider = ManageIQProvider(manageiq) @@ -856,9 +869,7 @@ def main(): res_args = manageiq_provider.delete_provider(provider) # if we do not have a provider, nothing to do else: - res_args = dict( - changed=False, - msg=f"provider {name}: does not exist in manageiq") + res_args = dict(changed=False, msg=f"provider {name}: does not exist in manageiq") # provider should exist if state == "present": @@ -868,17 +879,15 @@ def main(): # if we do not have a provider_type, use the current provider_type if provider and not provider_type: - provider_type = manageiq_provider.class_name_to_type(provider['type']) + provider_type = manageiq_provider.class_name_to_type(provider["type"]) # check supported_providers types if not provider_type: - manageiq_provider.module.fail_json( - msg="missing required argument: provider_type") + manageiq_provider.module.fail_json(msg="missing required argument: provider_type") # check supported_providers types if provider_type not in supported_providers().keys(): - manageiq_provider.module.fail_json( - msg=f"provider_type {provider_type} is not supported") + manageiq_provider.module.fail_json(msg=f"provider_type {provider_type} is not supported") # build "connection_configurations" objects from user requested endpoints # "provider" is a required endpoint, if we have it, we have endpoints @@ -887,23 +896,44 @@ def main(): # if we have a provider, edit it if provider: - res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version) + res_args = manageiq_provider.edit_provider( + provider, + name, + provider_type, + endpoints, + zone_id, + provider_region, + host_default_vnc_port_start, + host_default_vnc_port_end, + subscription, + project, + uid_ems, + tenant_mapping_enabled, + api_version, + ) # if we do not have a provider, create it else: - res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region, - host_default_vnc_port_start, host_default_vnc_port_end, - subscription, project, uid_ems, tenant_mapping_enabled, api_version) + res_args = manageiq_provider.create_provider( + name, + provider_type, + endpoints, + zone_id, + provider_region, + host_default_vnc_port_start, + host_default_vnc_port_end, + subscription, + project, + uid_ems, + tenant_mapping_enabled, + api_version, + ) # refresh provider (trigger sync) if state == "refresh": if provider: res_args = manageiq_provider.refresh(provider, name) else: - res_args = dict( - changed=False, - msg=f"provider {name}: does not exist in manageiq") + res_args = dict(changed=False, msg=f"provider {name}: does not exist in manageiq") module.exit_json(**res_args) diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py index 7715a04288e..adf1dd7aead 100644 --- a/plugins/modules/manageiq_tags.py +++ b/plugins/modules/manageiq_tags.py @@ -124,20 +124,21 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ( - ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities + ManageIQ, + ManageIQTags, + manageiq_argument_spec, + manageiq_entities, ) def main(): - actions = {'present': 'assign', 'absent': 'unassign'} + actions = {"present": "assign", "absent": "unassign"} argument_spec = dict( - tags=dict(type='list', elements='dict'), - resource_id=dict(type='int'), - resource_name=dict(type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(type='str', - choices=['present', 'absent'], default='present'), + tags=dict(type="list", elements="dict"), + resource_id=dict(type="int"), + resource_name=dict(type="str"), + resource_type=dict(required=True, type="str", choices=list(manageiq_entities().keys())), + state=dict(type="str", choices=["present", "absent"], default="present"), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) @@ -146,17 +147,14 @@ def main(): argument_spec=argument_spec, mutually_exclusive=[["resource_id", "resource_name"]], required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['tags']), - ('state', 'absent', ['tags']) - ], + required_if=[("state", "present", ["tags"]), ("state", "absent", ["tags"])], ) - tags = module.params['tags'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] + tags = module.params["tags"] + resource_id = module.params["resource_id"] + resource_type_key = module.params["resource_type"] + resource_name = module.params["resource_name"] + state = module.params["state"] # get the action and resource type action = actions[state] diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py index eeb2e74685b..8083d744562 100644 --- a/plugins/modules/manageiq_tags_info.py +++ b/plugins/modules/manageiq_tags_info.py @@ -74,16 +74,18 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ( - ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities + ManageIQ, + ManageIQTags, + manageiq_argument_spec, + manageiq_entities, ) def main(): argument_spec = dict( - resource_id=dict(type='int'), - resource_name=dict(type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), + resource_id=dict(type="int"), + resource_name=dict(type="str"), + resource_type=dict(required=True, type="str", choices=list(manageiq_entities().keys())), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) @@ -95,9 +97,9 @@ def main(): supports_check_mode=True, ) - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] + resource_id = module.params["resource_id"] + resource_type_key = module.params["resource_type"] + resource_name = module.params["resource_name"] # get the action and resource type resource_type = manageiq_entities()[resource_type_key] diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py index b543170bf9e..1811c07bb7d 100644 --- a/plugins/modules/manageiq_tenant.py +++ b/plugins/modules/manageiq_tenant.py @@ -168,7 +168,7 @@ class ManageIQTenant: """ - Object to execute tenant management operations in manageiq. + Object to execute tenant management operations in manageiq. """ def __init__(self, manageiq): @@ -179,7 +179,7 @@ def __init__(self, manageiq): self.client = self.manageiq.client def tenant(self, name, parent_id, parent): - """ Search for tenant object by name and parent_id or parent + """Search for tenant object by name and parent_id or parent or the root tenant if no parent or parent_id is supplied. Returns: the parent tenant, None for the root tenant @@ -195,7 +195,7 @@ def tenant(self, name, parent_id, parent): for tenant in tenants: try: - ancestry = tenant['ancestry'] + ancestry = tenant["ancestry"] except AttributeError: ancestry = None @@ -215,12 +215,12 @@ def tenant(self, name, parent_id, parent): self.module.fail_json(msg=f"Multiple parent tenants not found in manageiq with name '{parent}'") parent_tenant = parent_tenant_res[0] - parent_id = int(parent_tenant['id']) + parent_id = int(parent_tenant["id"]) tenants = self.client.collections.tenants.find_by(name=name) for tenant in tenants: try: - ancestry = tenant['ancestry'] + ancestry = tenant["ancestry"] except AttributeError: ancestry = None @@ -235,37 +235,34 @@ def tenant(self, name, parent_id, parent): return None, self.client.collections.tenants.find_by(ancestry=None)[0] def compare_tenant(self, tenant, name, description): - """ Compare tenant fields with new field values. + """Compare tenant fields with new field values. Returns: false if tenant fields have some difference from new fields, true o/w. """ - found_difference = ( - (name and tenant['name'] != name) or - (description and tenant['description'] != description) - ) + found_difference = (name and tenant["name"] != name) or (description and tenant["description"] != description) return not found_difference def delete_tenant(self, tenant): - """ Deletes a tenant from manageiq. + """Deletes a tenant from manageiq. Returns: dict with `msg` and `changed` """ try: url = f"{self.api_url}/tenants/{tenant['id']}" - result = self.client.post(url, action='delete') + result = self.client.post(url, action="delete") except Exception as e: self.module.fail_json(msg=f"failed to delete tenant {tenant['name']}: {e}") - if result['success'] is False: - self.module.fail_json(msg=result['message']) + if result["success"] is False: + self.module.fail_json(msg=result["message"]) - return dict(changed=True, msg=result['message']) + return dict(changed=True, msg=result["message"]) def edit_tenant(self, tenant, name, description): - """ Edit a manageiq tenant. + """Edit a manageiq tenant. Returns: dict with `msg` and `changed` @@ -274,70 +271,64 @@ def edit_tenant(self, tenant, name, description): # check if we need to update ( compare_tenant is true is no difference found ) if self.compare_tenant(tenant, name, description): - return dict( - changed=False, - msg=f"tenant {tenant['name']} is not changed.", - tenant=tenant['_data']) + return dict(changed=False, msg=f"tenant {tenant['name']} is not changed.", tenant=tenant["_data"]) # try to update tenant try: - result = self.client.post(tenant['href'], action='edit', resource=resource) + result = self.client.post(tenant["href"], action="edit", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to update tenant {tenant['name']}: {e}") - return dict( - changed=True, - msg=f"successfully updated the tenant with id {tenant['id']}") + return dict(changed=True, msg=f"successfully updated the tenant with id {tenant['id']}") def create_tenant(self, name, description, parent_tenant): - """ Creates the tenant in manageiq. + """Creates the tenant in manageiq. Returns: dict with `msg`, `changed` and `tenant_id` """ - parent_id = parent_tenant['id'] + parent_id = parent_tenant["id"] # check for required arguments for key, value in dict(name=name, description=description, parent_id=parent_id).items(): - if value in (None, ''): + if value in (None, ""): self.module.fail_json(msg=f"missing required argument: {key}") - url = f'{self.api_url}/tenants' + url = f"{self.api_url}/tenants" - resource = {'name': name, 'description': description, 'parent': {'id': parent_id}} + resource = {"name": name, "description": description, "parent": {"id": parent_id}} try: - result = self.client.post(url, action='create', resource=resource) - tenant_id = result['results'][0]['id'] + result = self.client.post(url, action="create", resource=resource) + tenant_id = result["results"][0]["id"] except Exception as e: self.module.fail_json(msg=f"failed to create tenant {name}: {e}") return dict( - changed=True, - msg=f"successfully created tenant '{name}' with id '{tenant_id}'", - tenant_id=tenant_id) + changed=True, msg=f"successfully created tenant '{name}' with id '{tenant_id}'", tenant_id=tenant_id + ) def tenant_quota(self, tenant, quota_key): - """ Search for tenant quota object by tenant and quota_key. + """Search for tenant quota object by tenant and quota_key. Returns: the quota for the tenant, or None if the tenant quota was not found. """ tenant_quotas = self.client.get(f"{tenant['href']}/quotas?expand=resources&filter[]=name={quota_key}") - return tenant_quotas['resources'] + return tenant_quotas["resources"] def tenant_quotas(self, tenant): - """ Search for tenant quotas object by tenant. + """Search for tenant quotas object by tenant. Returns: the quotas for the tenant, or None if no tenant quotas were not found. """ tenant_quotas = self.client.get(f"{tenant['href']}/quotas?expand=resources") - return tenant_quotas['resources'] + return tenant_quotas["resources"] def update_tenant_quotas(self, tenant, quotas): - """ Creates the tenant quotas in manageiq. + """Creates the tenant quotas in manageiq. Returns: dict with `msg` and `changed` @@ -354,7 +345,7 @@ def update_tenant_quotas(self, tenant, quotas): if quota_value: # Change the byte values to GB - if quota_key in ['storage_allocated', 'mem_allocated']: + if quota_key in ["storage_allocated", "mem_allocated"]: quota_value_int = int(quota_value) * 1024 * 1024 * 1024 else: quota_value_int = int(quota_value) @@ -368,96 +359,87 @@ def update_tenant_quotas(self, tenant, quotas): else: res = dict(changed=False, msg=f"tenant quota '{quota_key}' does not exist") - if res['changed']: + if res["changed"]: changed = True - messages.append(res['msg']) + messages.append(res["msg"]) - return dict( - changed=changed, - msg=', '.join(messages)) + return dict(changed=changed, msg=", ".join(messages)) def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value): - """ Update the tenant quotas in manageiq. + """Update the tenant quotas in manageiq. Returns: result """ - if current_quota['value'] == quota_value: - return dict( - changed=False, - msg=f"tenant quota {quota_key} already has value {quota_value}") + if current_quota["value"] == quota_value: + return dict(changed=False, msg=f"tenant quota {quota_key} already has value {quota_value}") else: - url = f"{tenant['href']}/quotas/{current_quota['id']}" - resource = {'value': quota_value} + resource = {"value": quota_value} try: - self.client.post(url, action='edit', resource=resource) + self.client.post(url, action="edit", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to update tenant quota {quota_key}: {e}") - return dict( - changed=True, - msg=f"successfully updated tenant quota {quota_key}") + return dict(changed=True, msg=f"successfully updated tenant quota {quota_key}") def create_tenant_quota(self, tenant, quota_key, quota_value): - """ Creates the tenant quotas in manageiq. + """Creates the tenant quotas in manageiq. Returns: result """ url = f"{tenant['href']}/quotas" - resource = {'name': quota_key, 'value': quota_value} + resource = {"name": quota_key, "value": quota_value} try: - self.client.post(url, action='create', resource=resource) + self.client.post(url, action="create", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to create tenant quota {quota_key}: {e}") - return dict( - changed=True, - msg=f"successfully created tenant quota {quota_key}") + return dict(changed=True, msg=f"successfully created tenant quota {quota_key}") def delete_tenant_quota(self, tenant, quota): - """ deletes the tenant quotas in manageiq. + """deletes the tenant quotas in manageiq. Returns: result """ try: - result = self.client.post(quota['href'], action='delete') + result = self.client.post(quota["href"], action="delete") except Exception as e: self.module.fail_json(msg=f"failed to delete tenant quota '{quota['name']}': {e}") - return dict(changed=True, msg=result['message']) + return dict(changed=True, msg=result["message"]) def create_tenant_response(self, tenant, parent_tenant): - """ Creates the ansible result object from a manageiq tenant entity + """Creates the ansible result object from a manageiq tenant entity Returns: a dict with the tenant id, name, description, parent id, quota's """ - tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas']) + tenant_quotas = self.create_tenant_quotas_response(tenant["tenant_quotas"]) try: - ancestry = tenant['ancestry'] + ancestry = tenant["ancestry"] tenant_parent_id = ancestry.split("/")[-1] except AttributeError: # The root tenant does not return the ancestry attribute tenant_parent_id = None return dict( - id=tenant['id'], - name=tenant['name'], - description=tenant['description'], + id=tenant["id"], + name=tenant["name"], + description=tenant["description"], parent_id=tenant_parent_id, - quotas=tenant_quotas + quotas=tenant_quotas, ) @staticmethod def create_tenant_quotas_response(tenant_quotas): - """ Creates the ansible result object from a manageiq tenant_quotas entity + """Creates the ansible result object from a manageiq tenant_quotas entity Returns: a dict with the applied quotas, name and value @@ -468,36 +450,34 @@ def create_tenant_quotas_response(tenant_quotas): result = {} for quota in tenant_quotas: - if quota['unit'] == 'bytes': - value = float(quota['value']) / (1024 * 1024 * 1024) + if quota["unit"] == "bytes": + value = float(quota["value"]) / (1024 * 1024 * 1024) else: - value = quota['value'] - result[quota['name']] = value + value = quota["value"] + result[quota["name"]] = value return result def main(): argument_spec = dict( - name=dict(required=True, type='str'), - description=dict(required=True, type='str'), - parent_id=dict(type='int'), - parent=dict(type='str'), - state=dict(choices=['absent', 'present'], default='present'), - quotas=dict(type='dict', default={}) + name=dict(required=True, type="str"), + description=dict(required=True, type="str"), + parent_id=dict(type="int"), + parent=dict(type="str"), + state=dict(choices=["absent", "present"], default="present"), + quotas=dict(type="dict", default={}), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) - module = AnsibleModule( - argument_spec=argument_spec - ) + module = AnsibleModule(argument_spec=argument_spec) - name = module.params['name'] - description = module.params['description'] - parent_id = module.params['parent_id'] - parent = module.params['parent'] - state = module.params['state'] - quotas = module.params['quotas'] + name = module.params["name"] + description = module.params["description"] + parent_id = module.params["parent_id"] + parent = module.params["parent"] + state = module.params["state"] + quotas = module.params["quotas"] manageiq = ManageIQ(module) manageiq_tenant = ManageIQTenant(manageiq) @@ -516,9 +496,7 @@ def main(): else: msg = f"tenant '{name}' with parent '{parent}' does not exist in manageiq" - res_args = dict( - changed=False, - msg=msg) + res_args = dict(changed=False, msg=msg) # tenant should exist if state == "present": @@ -529,17 +507,17 @@ def main(): # if we do not have a tenant, create it else: res_args = manageiq_tenant.create_tenant(name, description, parent_tenant) - tenant = manageiq.client.get_entity('tenants', res_args['tenant_id']) + tenant = manageiq.client.get_entity("tenants", res_args["tenant_id"]) # quotas as supplied and we have a tenant if quotas: tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas) - if tenant_quotas_res['changed']: - res_args['changed'] = True - res_args['tenant_quotas_msg'] = tenant_quotas_res['msg'] + if tenant_quotas_res["changed"]: + res_args["changed"] = True + res_args["tenant_quotas_msg"] = tenant_quotas_res["msg"] - tenant.reload(expand='resources', attributes=['tenant_quotas']) - res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) + tenant.reload(expand="resources", attributes=["tenant_quotas"]) + res_args["tenant"] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) module.exit_json(**res_args) diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py index d40bea2ed4e..1353a527251 100644 --- a/plugins/modules/manageiq_user.py +++ b/plugins/modules/manageiq_user.py @@ -135,7 +135,7 @@ class ManageIQUser: """ - Object to execute user management operations in manageiq. + Object to execute user management operations in manageiq. """ def __init__(self, manageiq): @@ -146,57 +146,56 @@ def __init__(self, manageiq): self.client = self.manageiq.client def group_id(self, description): - """ Search for group id by group description. + """Search for group id by group description. Returns: the group id, or send a module Fail signal if group not found. """ - group = self.manageiq.find_collection_resource_by('groups', description=description) + group = self.manageiq.find_collection_resource_by("groups", description=description) if not group: # group doesn't exist - self.module.fail_json( - msg=f"group {description} does not exist in manageiq") + self.module.fail_json(msg=f"group {description} does not exist in manageiq") - return group['id'] + return group["id"] def user(self, userid): - """ Search for user object by userid. + """Search for user object by userid. Returns: the user, or None if user not found. """ - return self.manageiq.find_collection_resource_by('users', userid=userid) + return self.manageiq.find_collection_resource_by("users", userid=userid) def compare_user(self, user, name, group_id, password, email): - """ Compare user fields with new field values. + """Compare user fields with new field values. Returns: false if user fields have some difference from new fields, true o/w. """ found_difference = ( - (name and user['name'] != name) or - (password is not None) or - (email and user['email'] != email) or - (group_id and user['current_group_id'] != group_id) + (name and user["name"] != name) + or (password is not None) + or (email and user["email"] != email) + or (group_id and user["current_group_id"] != group_id) ) return not found_difference def delete_user(self, user): - """ Deletes a user from manageiq. + """Deletes a user from manageiq. Returns: a short message describing the operation executed. """ try: url = f"{self.api_url}/users/{user['id']}" - result = self.client.post(url, action='delete') + result = self.client.post(url, action="delete") except Exception as e: self.module.fail_json(msg=f"failed to delete user {user['userid']}: {e}") - return dict(changed=True, msg=result['message']) + return dict(changed=True, msg=result["message"]) def edit_user(self, user, name, group, password, email): - """ Edit a user from manageiq. + """Edit a user from manageiq. Returns: a short message describing the operation executed. @@ -204,40 +203,36 @@ def edit_user(self, user, name, group, password, email): group_id = None url = f"{self.api_url}/users/{user['id']}" - resource = dict(userid=user['userid']) + resource = dict(userid=user["userid"]) if group is not None: group_id = self.group_id(group) - resource['group'] = dict(id=group_id) + resource["group"] = dict(id=group_id) if name is not None: - resource['name'] = name + resource["name"] = name if email is not None: - resource['email'] = email + resource["email"] = email # if there is a password param, but 'update_password' is 'on_create' # then discard the password (since we're editing an existing user) - if self.module.params['update_password'] == 'on_create': + if self.module.params["update_password"] == "on_create": password = None if password is not None: - resource['password'] = password + resource["password"] = password # check if we need to update ( compare_user is true is no difference found ) if self.compare_user(user, name, group_id, password, email): - return dict( - changed=False, - msg=f"user {user['userid']} is not changed.") + return dict(changed=False, msg=f"user {user['userid']} is not changed.") # try to update user try: - result = self.client.post(url, action='edit', resource=resource) + result = self.client.post(url, action="edit", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to update user {user['userid']}: {e}") - return dict( - changed=True, - msg=f"successfully updated the user {user['userid']}: {result}") + return dict(changed=True, msg=f"successfully updated the user {user['userid']}: {result}") def create_user(self, userid, name, group, password, email): - """ Creates the user in manageiq. + """Creates the user in manageiq. Returns: the created user id, name, created_on timestamp, @@ -245,37 +240,34 @@ def create_user(self, userid, name, group, password, email): """ # check for required arguments for key, value in dict(name=name, group=group, password=password).items(): - if value in (None, ''): + if value in (None, ""): self.module.fail_json(msg=f"missing required argument: {key}") group_id = self.group_id(group) - url = f'{self.api_url}/users' + url = f"{self.api_url}/users" - resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}} + resource = {"userid": userid, "name": name, "password": password, "group": {"id": group_id}} if email is not None: - resource['email'] = email + resource["email"] = email # try to create a new user try: - result = self.client.post(url, action='create', resource=resource) + result = self.client.post(url, action="create", resource=resource) except Exception as e: self.module.fail_json(msg=f"failed to create user {userid}: {e}") - return dict( - changed=True, - msg=f"successfully created the user {userid}: {result['results']}") + return dict(changed=True, msg=f"successfully created the user {userid}: {result['results']}") def main(): argument_spec = dict( - userid=dict(required=True, type='str'), + userid=dict(required=True, type="str"), name=dict(), password=dict(no_log=True), group=dict(), email=dict(), - state=dict(choices=['absent', 'present'], default='present'), - update_password=dict(choices=['always', 'on_create'], - default='always'), + state=dict(choices=["absent", "present"], default="present"), + update_password=dict(choices=["always", "on_create"], default="always"), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) @@ -284,12 +276,12 @@ def main(): argument_spec=argument_spec, ) - userid = module.params['userid'] - name = module.params['name'] - password = module.params['password'] - group = module.params['group'] - email = module.params['email'] - state = module.params['state'] + userid = module.params["userid"] + name = module.params["name"] + password = module.params["password"] + group = module.params["group"] + email = module.params["email"] + state = module.params["state"] manageiq = ManageIQ(module) manageiq_user = ManageIQUser(manageiq) @@ -303,9 +295,7 @@ def main(): res_args = manageiq_user.delete_user(user) # if we do not have a user, nothing to do else: - res_args = dict( - changed=False, - msg=f"user {userid}: does not exist in manageiq") + res_args = dict(changed=False, msg=f"user {userid}: does not exist in manageiq") # user should exist if state == "present": diff --git a/plugins/modules/mas.py b/plugins/modules/mas.py index 88c9eae18cd..8ecf0326096 100644 --- a/plugins/modules/mas.py +++ b/plugins/modules/mas.py @@ -107,91 +107,87 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion import platform -NOT_WORKING_MAC_VERSION_MAS_ACCOUNT = '12.0' +NOT_WORKING_MAC_VERSION_MAS_ACCOUNT = "12.0" -class Mas: +class Mas: def __init__(self, module): self.module = module # Initialize data properties - self.mas_path = self.module.get_bin_path('mas') + self.mas_path = self.module.get_bin_path("mas") self._checked_signin = False - self._mac_version = platform.mac_ver()[0] or '0.0' + self._mac_version = platform.mac_ver()[0] or "0.0" self._installed = None # Populated only if needed - self._outdated = None # Populated only if needed + self._outdated = None # Populated only if needed self.count_install = 0 self.count_upgrade = 0 self.count_uninstall = 0 - self.result = { - 'changed': False - } + self.result = {"changed": False} self.check_mas_tool() def app_command(self, command, id): - ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' ''' + """Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall'""" if not self.module.check_mode: - if command != 'uninstall': + if command != "uninstall": self.check_signin() rc, out, err = self.run([command, str(id)]) if rc != 0: - self.module.fail_json( - msg=f"Error running command '{command}' on app '{id}': {out.rstrip()}" - ) + self.module.fail_json(msg=f"Error running command '{command}' on app '{id}': {out.rstrip()}") # No error or dry run self.__dict__[f"count_{command}"] += 1 def check_mas_tool(self): - ''' Verifies that the `mas` tool is available in a recent version ''' + """Verifies that the `mas` tool is available in a recent version""" # Is the `mas` tool available at all? if not self.mas_path: - self.module.fail_json(msg='Required `mas` tool is not installed') + self.module.fail_json(msg="Required `mas` tool is not installed") # Is the version recent enough? - rc, out, err = self.run(['version']) - if rc != 0 or not out.strip() or LooseVersion(out.strip()) < LooseVersion('1.5.0'): + rc, out, err = self.run(["version"]) + if rc != 0 or not out.strip() or LooseVersion(out.strip()) < LooseVersion("1.5.0"): self.module.fail_json(msg=f"`mas` tool in version 1.5.0+ needed, got {out.strip()}") def check_signin(self): - ''' Verifies that the user is signed in to the Mac App Store ''' + """Verifies that the user is signed in to the Mac App Store""" # Only check this once per execution if self._checked_signin: return if LooseVersion(self._mac_version) >= LooseVersion(NOT_WORKING_MAC_VERSION_MAS_ACCOUNT): # Checking if user is signed-in is disabled due to https://github.com/mas-cli/mas/issues/417 - self.module.log('WARNING: You must be signed in via the Mac App Store GUI beforehand else error will occur') + self.module.log("WARNING: You must be signed in via the Mac App Store GUI beforehand else error will occur") else: - rc, out, err = self.run(['account']) - if out.split("\n", 1)[0].rstrip() == 'Not signed in': - self.module.fail_json(msg='You must be signed in to the Mac App Store') + rc, out, err = self.run(["account"]) + if out.split("\n", 1)[0].rstrip() == "Not signed in": + self.module.fail_json(msg="You must be signed in to the Mac App Store") self._checked_signin = True def exit(self): - ''' Exit with the data we have collected over time ''' + """Exit with the data we have collected over time""" msgs = [] if self.count_install > 0: - msgs.append(f'Installed {self.count_install} app(s)') + msgs.append(f"Installed {self.count_install} app(s)") if self.count_upgrade > 0: - msgs.append(f'Upgraded {self.count_upgrade} app(s)') + msgs.append(f"Upgraded {self.count_upgrade} app(s)") if self.count_uninstall > 0: - msgs.append(f'Uninstalled {self.count_uninstall} app(s)') + msgs.append(f"Uninstalled {self.count_uninstall} app(s)") if msgs: - self.result['changed'] = True - self.result['msg'] = ', '.join(msgs) + self.result["changed"] = True + self.result["msg"] = ", ".join(msgs) self.module.exit_json(**self.result) def get_current_state(self, command): - ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' ''' + """Returns the list of all app IDs; command can either be 'list' or 'outdated'""" rc, raw_apps, err = self.run([command]) rows = raw_apps.split("\n") @@ -200,55 +196,55 @@ def get_current_state(self, command): apps = [] for r in rows: # Format: "123456789 App Name" - r = r.split(' ', 1) + r = r.split(" ", 1) if len(r) == 2: apps.append(int(r[0])) return apps def installed(self): - ''' Returns the list of installed apps ''' + """Returns the list of installed apps""" # Populate cache if not already done if self._installed is None: - self._installed = self.get_current_state('list') + self._installed = self.get_current_state("list") return self._installed def is_installed(self, id): - ''' Checks whether the given app is installed ''' + """Checks whether the given app is installed""" return int(id) in self.installed() def is_outdated(self, id): - ''' Checks whether the given app is installed, but outdated ''' + """Checks whether the given app is installed, but outdated""" return int(id) in self.outdated() def outdated(self): - ''' Returns the list of installed, but outdated apps ''' + """Returns the list of installed, but outdated apps""" # Populate cache if not already done if self._outdated is None: - self._outdated = self.get_current_state('outdated') + self._outdated = self.get_current_state("outdated") return self._outdated def run(self, cmd): - ''' Runs a command of the `mas` tool ''' + """Runs a command of the `mas` tool""" cmd.insert(0, self.mas_path) return self.module.run_command(cmd, False) def upgrade_all(self): - ''' Upgrades all installed apps and sets the correct result data ''' + """Upgrades all installed apps and sets the correct result data""" outdated = self.outdated() if not self.module.check_mode: self.check_signin() - rc, out, err = self.run(['upgrade']) + rc, out, err = self.run(["upgrade"]) if rc != 0: self.module.fail_json(msg=f"Could not upgrade all apps: {out.rstrip()}") @@ -258,41 +254,41 @@ def upgrade_all(self): def main(): module = AnsibleModule( argument_spec=dict( - id=dict(type='list', elements='int'), - state=dict(type='str', default='present', choices=['absent', 'latest', 'present']), - upgrade_all=dict(type='bool', default=False, aliases=['upgrade']), + id=dict(type="list", elements="int"), + state=dict(type="str", default="present", choices=["absent", "latest", "present"]), + upgrade_all=dict(type="bool", default=False, aliases=["upgrade"]), ), - supports_check_mode=True + supports_check_mode=True, ) mas = Mas(module) - if module.params['id']: - apps = module.params['id'] + if module.params["id"]: + apps = module.params["id"] else: apps = [] - state = module.params['state'] - upgrade = module.params['upgrade_all'] + state = module.params["state"] + upgrade = module.params["upgrade_all"] # Run operations on the given app IDs for app in sorted(set(apps)): - if state == 'present': + if state == "present": if not mas.is_installed(app): - mas.app_command('install', app) + mas.app_command("install", app) - elif state == 'absent': + elif state == "absent": if mas.is_installed(app): # Ensure we are root if os.getuid() != 0: module.fail_json(msg="Uninstalling apps requires root permissions ('become: true')") - mas.app_command('uninstall', app) + mas.app_command("uninstall", app) - elif state == 'latest': + elif state == "latest": if not mas.is_installed(app): - mas.app_command('install', app) + mas.app_command("install", app) elif mas.is_outdated(app): - mas.app_command('upgrade', app) + mas.app_command("upgrade", app) # Upgrade all apps if requested mas._outdated = None # Clear cache @@ -303,5 +299,5 @@ def main(): mas.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py index 5b643357f5b..97051b49ae5 100644 --- a/plugins/modules/matrix.py +++ b/plugins/modules/matrix.py @@ -94,45 +94,42 @@ def run_module(): module_args = dict( - msg_plain=dict(type='str', required=True), - msg_html=dict(type='str', required=True), - room_id=dict(type='str', required=True), - hs_url=dict(type='str', required=True), - token=dict(type='str', no_log=True), - user_id=dict(type='str'), - password=dict(type='str', no_log=True), + msg_plain=dict(type="str", required=True), + msg_html=dict(type="str", required=True), + room_id=dict(type="str", required=True), + hs_url=dict(type="str", required=True), + token=dict(type="str", no_log=True), + user_id=dict(type="str"), + password=dict(type="str", no_log=True), ) - result = dict( - changed=False, - message='' - ) + result = dict(changed=False, message="") module = AnsibleModule( argument_spec=module_args, - mutually_exclusive=[['password', 'token']], - required_one_of=[['password', 'token']], - required_together=[['user_id', 'password']], - supports_check_mode=True + mutually_exclusive=[["password", "token"]], + required_one_of=[["password", "token"]], + required_together=[["user_id", "password"]], + supports_check_mode=True, ) if not matrix_found: - module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) + module.fail_json(msg=missing_required_lib("matrix-client"), exception=MATRIX_IMP_ERR) if module.check_mode: return result # create a client object - client = MatrixClient(module.params['hs_url']) - if module.params['token'] is not None: - client.api.token = module.params['token'] + client = MatrixClient(module.params["hs_url"]) + if module.params["token"] is not None: + client.api.token = module.params["token"] else: - client.login(module.params['user_id'], module.params['password'], sync=False) + client.login(module.params["user_id"], module.params["password"], sync=False) # make sure we are in a given room and return a room object for it - room = client.join_room(module.params['room_id']) + room = client.join_room(module.params["room_id"]) # send an html formatted messages - room.send_html(module.params['msg_html'], module.params['msg_plain']) + room.send_html(module.params["msg_html"], module.params["msg_plain"]) module.exit_json(**result) @@ -141,5 +138,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py index c828ce61f90..5c8635ff2e3 100644 --- a/plugins/modules/mattermost.py +++ b/plugins/modules/mattermost.py @@ -132,18 +132,18 @@ def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( - url=dict(type='str', required=True), - api_key=dict(type='str', required=True, no_log=True), - text=dict(type='str'), - channel=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), - priority=dict(type='str', choices=['important', 'urgent']), - validate_certs=dict(default=True, type='bool'), - attachments=dict(type='list', elements='dict'), + url=dict(type="str", required=True), + api_key=dict(type="str", required=True, no_log=True), + text=dict(type="str"), + channel=dict(type="str"), + username=dict(type="str", default="Ansible"), + icon_url=dict(type="str", default="https://docs.ansible.com/favicon.ico"), + priority=dict(type="str", choices=["important", "urgent"]), + validate_certs=dict(default=True, type="bool"), + attachments=dict(type="list", elements="dict"), ), required_one_of=[ - ('text', 'attachments'), + ("text", "attachments"), ], ) # init return dict @@ -151,23 +151,23 @@ def main(): # define webhook webhook_url = f"{module.params['url']}/hooks/{module.params['api_key']}" - result['webhook_url'] = webhook_url + result["webhook_url"] = webhook_url # define payload payload = {} - for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: + for param in ["text", "channel", "username", "icon_url", "attachments"]: if module.params[param] is not None: payload[param] = module.params[param] - if module.params['priority'] is not None: - payload['priority'] = {'priority': module.params['priority']} + if module.params["priority"] is not None: + payload["priority"] = {"priority": module.params["priority"]} payload = module.jsonify(payload) - result['payload'] = payload + result["payload"] = payload # http headers headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', + "Content-Type": "application/json", + "Accept": "application/json", } # notes: @@ -177,17 +177,17 @@ def main(): # send request if not in test mode if module.check_mode is False: - response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) + response, info = fetch_url(module=module, url=webhook_url, headers=headers, method="POST", data=payload) # something's wrong - if info['status'] != 200: + if info["status"] != 200: # some problem - result['msg'] = f"Failed to send mattermost message, the error was: {info['msg']}" + result["msg"] = f"Failed to send mattermost message, the error was: {info['msg']}" module.fail_json(**result) # Looks good module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py index 9e24e070175..b9ea99e0863 100644 --- a/plugins/modules/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -249,6 +249,7 @@ LXML_ETREE_IMP_ERR = None try: from lxml import etree + HAS_LXML_ETREE = True except ImportError: LXML_ETREE_IMP_ERR = traceback.format_exc() @@ -257,6 +258,7 @@ BOTO_IMP_ERR = None try: import boto3 + HAS_BOTO = True except ImportError: BOTO_IMP_ERR = traceback.format_exc() @@ -265,6 +267,7 @@ SEMANTIC_VERSION_IMP_ERR = None try: from semantic_version import Version, Spec + HAS_SEMANTIC_VERSION = True except ImportError: SEMANTIC_VERSION_IMP_ERR = traceback.format_exc() @@ -277,11 +280,11 @@ def split_pre_existing_dir(dirname): - ''' + """ Return the first pre-existing directory and a list of the new directories that will be created. - ''' + """ head, tail = os.path.split(dirname) - b_head = to_bytes(head, errors='surrogate_or_strict') + b_head = to_bytes(head, errors="surrogate_or_strict") if not os.path.exists(b_head): if head == dirname: return None, [head] @@ -294,23 +297,25 @@ def split_pre_existing_dir(dirname): def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): - ''' + """ Walk the new directories list and make sure that permissions are as we would expect - ''' + """ if new_directory_list: first_sub_dir = new_directory_list.pop(0) if not pre_existing_dir: working_dir = first_sub_dir else: working_dir = os.path.join(pre_existing_dir, first_sub_dir) - directory_args['path'] = working_dir + directory_args["path"] = working_dir changed = module.set_fs_attributes_if_different(directory_args, changed) - changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) + changed = adjust_recursive_directory_permissions( + working_dir, new_directory_list, module, directory_args, changed + ) return changed class Artifact: - def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'): + def __init__(self, group_id, artifact_id, version, version_by_spec, classifier="", extension="jar"): if not group_id: raise ValueError("group_id must be set") if not artifact_id: @@ -464,16 +469,18 @@ def find_uri_for_artifact(self, artifact): for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): classifier = snapshotArtifact.xpath("classifier/text()") - artifact_classifier = classifier[0] if classifier else '' + artifact_classifier = classifier[0] if classifier else "" extension = snapshotArtifact.xpath("extension/text()") - artifact_extension = extension[0] if extension else '' + artifact_extension = extension[0] if extension else "" if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()") if timestamp_xmlpath: timestamp = timestamp_xmlpath[0] build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] - return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", f"{timestamp}-{build_number}")) + return self._uri_for_artifact( + artifact, artifact.version.replace("SNAPSHOT", f"{timestamp}-{build_number}") + ) return self._uri_for_artifact(artifact, artifact.version) @@ -483,7 +490,11 @@ def _uri_for_artifact(self, artifact, version=None): elif not artifact.is_snapshot(): version = artifact.version if artifact.classifier: - return posixpath.join(self.base, artifact.path(), f"{artifact.artifact_id}-{version}-{artifact.classifier}.{artifact.extension}") + return posixpath.join( + self.base, + artifact.path(), + f"{artifact.artifact_id}-{version}-{artifact.classifier}.{artifact.extension}", + ) return posixpath.join(self.base, artifact.path(), f"{artifact.artifact_id}-{version}.{artifact.extension}") @@ -492,7 +503,7 @@ def _getContent(self, url, failmsg, force=True): if self.local: parsed_url = urlparse(url) if os.path.isfile(parsed_url.path): - with io.open(parsed_url.path, 'rb') as f: + with io.open(parsed_url.path, "rb") as f: return f.read() if force: raise ValueError(f"{failmsg} because can not find file: {url}") @@ -507,42 +518,48 @@ def _request(self, url, failmsg, force=True): url_to_use = url parsed_url = urlparse(url) - if parsed_url.scheme == 's3': + if parsed_url.scheme == "s3": parsed_url = urlparse(url) bucket_name = parsed_url.netloc key_name = parsed_url.path[1:] - client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) - url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10) + client = boto3.client( + "s3", + aws_access_key_id=self.module.params.get("username", ""), + aws_secret_access_key=self.module.params.get("password", ""), + ) + url_to_use = client.generate_presigned_url( + "get_object", Params={"Bucket": bucket_name, "Key": key_name}, ExpiresIn=10 + ) - req_timeout = self.module.params.get('timeout') + req_timeout = self.module.params.get("timeout") # Hack to add parameters in the way that fetch_url expects - self.module.params['url_username'] = self.module.params.get('username', '') - self.module.params['url_password'] = self.module.params.get('password', '') - self.module.params['http_agent'] = self.user_agent + self.module.params["url_username"] = self.module.params.get("username", "") + self.module.params["url_password"] = self.module.params.get("password", "") + self.module.params["http_agent"] = self.user_agent kwargs = {} - if self.module.params['unredirected_headers']: - kwargs['unredirected_headers'] = self.module.params['unredirected_headers'] - - response, info = fetch_url( - self.module, - url_to_use, - timeout=req_timeout, - headers=self.headers, - **kwargs - ) + if self.module.params["unredirected_headers"]: + kwargs["unredirected_headers"] = self.module.params["unredirected_headers"] + + response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers, **kwargs) - if info['status'] == 200: + if info["status"] == 200: return response if force: raise ValueError(f"{failmsg} because of {info['msg']}for URL {url_to_use}") return None - def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): + def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg="md5"): if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": - artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, - artifact.classifier, artifact.extension) + artifact = Artifact( + artifact.group_id, + artifact.artifact_id, + self.find_latest_version_available(artifact), + None, + artifact.classifier, + artifact.extension, + ) url = self.find_uri_for_artifact(artifact) tempfd, tempname = tempfile.mkstemp(dir=tmpdir) @@ -556,7 +573,7 @@ def download(self, tmpdir, artifact, verify_download, filename=None, checksum_al return f"Can not find local file: {parsed_url.path}" else: response = self._request(url, f"Failed to download artifact {artifact}") - with os.fdopen(tempfd, 'wb') as f: + with os.fdopen(tempfd, "wb") as f: shutil.copyfileobj(response, f) if verify_download: @@ -573,7 +590,7 @@ def download(self, tmpdir, artifact, verify_download, filename=None, checksum_al shutil.move(tempname, artifact.get_filename(filename)) return None - def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): + def is_invalid_checksum(self, file, remote_url, checksum_alg="md5"): if os.path.exists(file): local_checksum = self._local_checksum(checksum_alg, file) if self.local: @@ -581,7 +598,10 @@ def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) else: try: - remote_checksum = to_text(self._getContent(f"{remote_url}.{checksum_alg}", "Failed to retrieve checksum", False), errors='strict') + remote_checksum = to_text( + self._getContent(f"{remote_url}.{checksum_alg}", "Failed to retrieve checksum", False), + errors="strict", + ) except UnicodeError as e: return f"Cannot retrieve a valid {checksum_alg} checksum from {remote_url}: {to_native(e)}" if not remote_checksum: @@ -597,19 +617,21 @@ def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): if local_checksum.lower() == remote_checksum.lower(): return None else: - return f"Checksum does not match: we computed {local_checksum} but the repository states {remote_checksum}" + return ( + f"Checksum does not match: we computed {local_checksum} but the repository states {remote_checksum}" + ) return f"Path does not exist: {file}" def _local_checksum(self, checksum_alg, file): - if checksum_alg.lower() == 'md5': + if checksum_alg.lower() == "md5": hash = hashlib.md5() - elif checksum_alg.lower() == 'sha1': + elif checksum_alg.lower() == "sha1": hash = hashlib.sha1() else: raise ValueError(f"Unknown checksum_alg {checksum_alg}") - with io.open(file, 'rb') as f: - for chunk in iter(lambda: f.read(8192), b''): + with io.open(file, "rb") as f: + for chunk in iter(lambda: f.read(8192), b""): hash.update(chunk) return hash.hexdigest() @@ -621,38 +643,38 @@ def main(): artifact_id=dict(required=True), version=dict(), version_by_spec=dict(), - classifier=dict(default=''), - extension=dict(default='jar'), - repository_url=dict(default='https://repo1.maven.org/maven2'), - username=dict(aliases=['aws_secret_key']), - password=dict(no_log=True, aliases=['aws_secret_access_key']), - headers=dict(type='dict'), - force_basic_auth=dict(default=False, type='bool'), + classifier=dict(default=""), + extension=dict(default="jar"), + repository_url=dict(default="https://repo1.maven.org/maven2"), + username=dict(aliases=["aws_secret_key"]), + password=dict(no_log=True, aliases=["aws_secret_access_key"]), + headers=dict(type="dict"), + force_basic_auth=dict(default=False, type="bool"), state=dict(default="present", choices=["present", "absent"]), - timeout=dict(default=10, type='int'), + timeout=dict(default=10, type="int"), dest=dict(type="path", required=True), - validate_certs=dict(default=True, type='bool'), + validate_certs=dict(default=True, type="bool"), client_cert=dict(type="path"), client_key=dict(type="path"), - keep_name=dict(default=False, type='bool'), - verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']), - checksum_alg=dict(default='md5', choices=['md5', 'sha1']), - unredirected_headers=dict(type='list', elements='str'), - directory_mode=dict(type='str'), + keep_name=dict(default=False, type="bool"), + verify_checksum=dict(default="download", choices=["never", "download", "change", "always"]), + checksum_alg=dict(default="md5", choices=["md5", "sha1"]), + unredirected_headers=dict(type="list", elements="str"), + directory_mode=dict(type="str"), ), add_file_common_args=True, - mutually_exclusive=([('version', 'version_by_spec')]) + mutually_exclusive=([("version", "version_by_spec")]), ) - if module.params['unredirected_headers'] is None: + if module.params["unredirected_headers"] is None: # if the user did not supply unredirected params, we use the default - module.params['unredirected_headers'] = ['Authorization', 'Cookie'] + module.params["unredirected_headers"] = ["Authorization", "Cookie"] if not HAS_LXML_ETREE: - module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_ETREE_IMP_ERR) - if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: - module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) + if module.params["version_by_spec"] and not HAS_SEMANTIC_VERSION: + module.fail_json(msg=missing_required_lib("semantic_version"), exception=SEMANTIC_VERSION_IMP_ERR) repository_url = module.params["repository_url"] if not repository_url: @@ -660,13 +682,14 @@ def main(): try: parsed_url = urlparse(repository_url) except AttributeError as e: - module.fail_json(msg=f'url parsing went wrong {e}') + module.fail_json(msg=f"url parsing went wrong {e}") local = parsed_url.scheme == "file" - if parsed_url.scheme == 's3' and not HAS_BOTO: - module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'), - exception=BOTO_IMP_ERR) + if parsed_url.scheme == "s3" and not HAS_BOTO: + module.fail_json( + msg=missing_required_lib("boto3", reason="when using s3:// repository URLs"), exception=BOTO_IMP_ERR + ) group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] @@ -674,14 +697,14 @@ def main(): version_by_spec = module.params["version_by_spec"] classifier = module.params["classifier"] extension = module.params["extension"] - headers = module.params['headers'] + headers = module.params["headers"] state = module.params["state"] dest = module.params["dest"] - b_dest = to_bytes(dest, errors='surrogate_or_strict') + b_dest = to_bytes(dest, errors="surrogate_or_strict") keep_name = module.params["keep_name"] verify_checksum = module.params["verify_checksum"] - verify_download = verify_checksum in ['download', 'always'] - verify_change = verify_checksum in ['change', 'always'] + verify_download = verify_checksum in ["download", "always"] + verify_change = verify_checksum in ["change", "always"] checksum_alg = module.params["checksum_alg"] downloader = MavenDownloader(module, repository_url, local, headers) @@ -698,21 +721,23 @@ def main(): prev_state = "absent" if dest.endswith(os.sep): - b_dest = to_bytes(dest, errors='surrogate_or_strict') + b_dest = to_bytes(dest, errors="surrogate_or_strict") if not os.path.exists(b_dest): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) os.makedirs(b_dest) directory_args = module.load_file_common_arguments(module.params) directory_mode = module.params["directory_mode"] if directory_mode is not None: - directory_args['mode'] = directory_mode + directory_args["mode"] = directory_mode else: - directory_args['mode'] = None - changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) + directory_args["mode"] = None + changed = adjust_recursive_directory_permissions( + pre_existing_dir, new_directory_list, module, directory_args, changed + ) if os.path.isdir(b_dest): version_part = version - if version == 'latest': + if version == "latest": version_part = downloader.find_latest_version_available(artifact) elif version_by_spec: version_part = downloader.find_version_by_spec(artifact) @@ -720,9 +745,12 @@ def main(): filename = f"{artifact_id}{(f'-{version_part}' if keep_name else '')}{(f'-{classifier}' if classifier else '')}.{extension}" dest = posixpath.join(dest, filename) - b_dest = to_bytes(dest, errors='surrogate_or_strict') + b_dest = to_bytes(dest, errors="surrogate_or_strict") - if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): + if os.path.lexists(b_dest) and ( + (not verify_change) + or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg) + ): prev_state = "present" if prev_state == "absent": @@ -738,11 +766,20 @@ def main(): file_args = module.load_file_common_arguments(module.params, path=dest) changed = module.set_fs_attributes_if_different(file_args, changed) if changed: - module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, - extension=extension, repository_url=repository_url, changed=changed) + module.exit_json( + state=state, + dest=dest, + group_id=group_id, + artifact_id=artifact_id, + version=version, + classifier=classifier, + extension=extension, + repository_url=repository_url, + changed=changed, + ) else: module.exit_json(state=state, dest=dest, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py index e7c9c70ea40..9c90fb521ee 100644 --- a/plugins/modules/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -87,25 +87,25 @@ def poll_reload_status(api_key=None, job_id=None, payload=None): - ''' + """ We poll the `job.status` endpoint every 5 seconds up to a maximum of 6 times. This is a relatively arbitrary choice of timeout, however requests rarely take longer than 15 seconds to complete. - ''' + """ memset_api, stderr, msg = None, None, None - payload['id'] = job_id + payload["id"] = job_id - api_method = 'job.status' + api_method = "job.status" _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) - while not response.json()['finished']: + while not response.json()["finished"]: counter = 0 while counter < 6: sleep(5) _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) counter += 1 - if response.json()['error']: + if response.json()["error"]: # the reload job was submitted but polling failed. Don't return this as an overall task failure. stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." else: @@ -116,27 +116,27 @@ def poll_reload_status(api_key=None, job_id=None, payload=None): def reload_dns(args=None): - ''' + """ DNS reloads are a single API call and therefore there's not much which can go wrong outside of auth errors. - ''' + """ retvals, payload = dict(), dict() has_changed, has_failed = False, False memset_api, msg, stderr = None, None, None - api_method = 'dns.reload' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.reload" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = has_failed + retvals["failed"] = has_failed if response.status_code is not None: - retvals['memset_api'] = response.json() + retvals["memset_api"] = response.json() else: - retvals['stderr'] = response.stderr - retvals['msg'] = msg + retvals["stderr"] = response.stderr + retvals["msg"] = msg return retvals # set changed to true if the reload request was accepted. @@ -145,15 +145,15 @@ def reload_dns(args=None): # empty msg var as we don't want to return the API's json response twice. msg = None - if args['poll']: + if args["poll"]: # hand off to the poll function. - job_id = response.json()['id'] - memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) + job_id = response.json()["id"] + memset_api, msg, stderr = poll_reload_status(api_key=args["api_key"], job_id=job_id, payload=payload) # assemble return variables. - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: + retvals["failed"] = has_failed + retvals["changed"] = has_changed + for val in ["msg", "stderr", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -163,11 +163,8 @@ def reload_dns(args=None): def main(): global module module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - poll=dict(default=False, type='bool') - ), - supports_check_mode=False + argument_spec=dict(api_key=dict(required=True, type="str", no_log=True), poll=dict(default=False, type="bool")), + supports_check_mode=False, ) # populate the dict with the user-provided vars. @@ -175,11 +172,11 @@ def main(): retvals = reload_dns(args) - if retvals['failed']: + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py index fafe89c67bb..2129e1a4cf7 100644 --- a/plugins/modules/memset_memstore_info.py +++ b/plugins/modules/memset_memstore_info.py @@ -110,37 +110,37 @@ def get_facts(args=None): - ''' + """ Performs a simple API call and returns a JSON blob. - ''' + """ retvals, payload = dict(), dict() has_changed, has_failed = False, False msg, stderr, memset_api = None, None, None - payload['name'] = args['name'] + payload["name"] = args["name"] - api_method = 'memstore.usage' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "memstore.usage" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg + retvals["failed"] = has_failed + retvals["msg"] = msg if response.status_code is not None: - retvals['stderr'] = f"API returned an error: {response.status_code}" + retvals["stderr"] = f"API returned an error: {response.status_code}" else: - retvals['stderr'] = f"{response.stderr}" + retvals["stderr"] = f"{response.stderr}" return retvals # we don't want to return the same thing twice msg = None memset_api = response.json() - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: + retvals["changed"] = has_changed + retvals["failed"] = has_failed + for val in ["msg", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -150,10 +150,7 @@ def get_facts(args=None): def main(): global module module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), + argument_spec=dict(api_key=dict(required=True, type="str", no_log=True), name=dict(required=True, type="str")), supports_check_mode=True, ) @@ -162,11 +159,11 @@ def main(): retvals = get_facts(args) - if retvals['failed']: + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py index cfccaf28852..b3b6e9dcef2 100644 --- a/plugins/modules/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -241,37 +241,37 @@ def get_facts(args=None): - ''' + """ Performs a simple API call and returns a JSON blob. - ''' + """ retvals, payload = dict(), dict() has_changed, has_failed = False, False msg, stderr, memset_api = None, None, None - payload['name'] = args['name'] + payload["name"] = args["name"] - api_method = 'server.info' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "server.info" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg + retvals["failed"] = has_failed + retvals["msg"] = msg if response.status_code is not None: - retvals['stderr'] = f"API returned an error: {response.status_code}" + retvals["stderr"] = f"API returned an error: {response.status_code}" else: - retvals['stderr'] = f"{response.stderr}" + retvals["stderr"] = f"{response.stderr}" return retvals # we don't want to return the same thing twice msg = None memset_api = response.json() - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'memset_api']: + retvals["changed"] = has_changed + retvals["failed"] = has_failed + for val in ["msg", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -281,10 +281,7 @@ def get_facts(args=None): def main(): global module module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, type='str') - ), + argument_spec=dict(api_key=dict(required=True, type="str", no_log=True), name=dict(required=True, type="str")), supports_check_mode=True, ) @@ -293,11 +290,11 @@ def main(): retvals = get_facts(args) - if retvals['failed']: + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py index 3255e07a61a..4397304b23b 100644 --- a/plugins/modules/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -115,113 +115,113 @@ def api_validation(args=None): - ''' + """ Perform some validation which will be enforced by Memset's API (see: https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' + """ # zone domain length must be less than 250 chars. - if len(args['name']) > 250: - stderr = 'Zone name must be less than 250 characters in length.' + if len(args["name"]) > 250: + stderr = "Zone name must be less than 250 characters in length." module.fail_json(failed=True, msg=stderr, stderr=stderr) def check(args=None): - ''' + """ Support for running with check mode. - ''' + """ retvals = dict() - api_method = 'dns.zone_list' - has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) - zone_exists, counter = check_zone(data=response, name=args['name']) + zone_exists, counter = check_zone(data=response, name=args["name"]) # set changed to true if the operation would cause a change. - has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) + has_changed = (zone_exists and args["state"] == "absent") or (not zone_exists and args["state"] == "present") - retvals['changed'] = has_changed - retvals['failed'] = has_failed + retvals["changed"] = has_changed + retvals["failed"] = has_failed return retvals def create_zone(args=None, zone_exists=None, payload=None): - ''' + """ At this point we already know whether the zone exists, so we just need to make the API reflect the desired state. - ''' + """ has_changed, has_failed = False, False msg, memset_api = None, None if not zone_exists: - payload['ttl'] = args['ttl'] - payload['nickname'] = args['name'] - api_method = 'dns.zone_create' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + payload["ttl"] = args["ttl"] + payload["nickname"] = args["name"] + api_method = "dns.zone_create" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True else: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) for zone in response.json(): - if zone['nickname'] == args['name']: + if zone["nickname"] == args["name"]: break - if zone['ttl'] != args['ttl']: + if zone["ttl"] != args["ttl"]: # update the zone if the desired TTL is different. - payload['id'] = zone['id'] - payload['ttl'] = args['ttl'] - api_method = 'dns.zone_update' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + payload["id"] = zone["id"] + payload["ttl"] = args["ttl"] + api_method = "dns.zone_update" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True # populate return var with zone info. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args["name"], current_zones=response.json()) if zone_exists: payload = dict() - payload['id'] = zone_id - api_method = 'dns.zone_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + payload["id"] = zone_id + api_method = "dns.zone_info" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) memset_api = response.json() return has_failed, has_changed, memset_api, msg def delete_zone(args=None, zone_exists=None, payload=None): - ''' + """ Deletion requires extra sanity checking as the zone cannot be deleted if it contains domains or records. Setting force=true will override this behaviour. - ''' + """ has_changed, has_failed = False, False msg, memset_api = None, None if zone_exists: - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "dns.zone_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) counter = 0 for zone in response.json(): - if zone['nickname'] == args['name']: + if zone["nickname"] == args["name"]: counter += 1 if counter == 1: for zone in response.json(): - if zone['nickname'] == args['name']: - zone_id = zone['id'] - domain_count = len(zone['domains']) - record_count = len(zone['records']) - if (domain_count > 0 or record_count > 0) and args['force'] is False: + if zone["nickname"] == args["name"]: + zone_id = zone["id"] + domain_count = len(zone["domains"]) + record_count = len(zone["records"]) + if (domain_count > 0 or record_count > 0) and args["force"] is False: # we need to fail out if force was not explicitly set. - stderr = 'Zone contains domains or records and force was not used.' + stderr = "Zone contains domains or records and force was not used." has_failed = True has_changed = False module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) - api_method = 'dns.zone_delete' - payload['id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "dns.zone_delete" + payload["id"] = zone_id + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. @@ -232,7 +232,7 @@ def delete_zone(args=None, zone_exists=None, payload=None): # zone at this time. has_failed = True has_changed = False - msg = 'Unable to delete zone as multiple zones with the same name exist.' + msg = "Unable to delete zone as multiple zones with the same name exist." else: has_failed, has_changed = False, False @@ -240,40 +240,40 @@ def delete_zone(args=None, zone_exists=None, payload=None): def create_or_delete(args=None): - ''' + """ We need to perform some initial sanity checking and also look up required info before handing it off to create or delete. - ''' + """ retvals, payload = dict(), dict() has_failed, has_changed = False, False msg, memset_api, stderr = None, None, None # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) if _has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = _msg + retvals["failed"] = _has_failed + retvals["msg"] = _msg if response.stderr is not None: - retvals['stderr'] = response.stderr + retvals["stderr"] = response.stderr return retvals - zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args["name"], current_zones=response.json()) - if args['state'] == 'present': + if args["state"] == "present": has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) - elif args['state'] == 'absent': + elif args["state"] == "absent": has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) - retvals['failed'] = has_failed - retvals['changed'] = has_changed - for val in ['msg', 'stderr', 'memset_api']: + retvals["failed"] = has_failed + retvals["changed"] = has_changed + for val in ["msg", "stderr", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -284,18 +284,18 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - name=dict(required=True, aliases=['nickname'], type='str'), - ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - force=dict(default=False, type='bool') + state=dict(required=True, choices=["present", "absent"], type="str"), + api_key=dict(required=True, type="str", no_log=True), + name=dict(required=True, aliases=["nickname"], type="str"), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type="int"), + force=dict(default=False, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) # populate the dict with the user-provided vars. args = dict(module.params) - args['check_mode'] = module.check_mode + args["check_mode"] = module.check_mode # validate some API-specific limitations. api_validation(args=args) @@ -305,11 +305,11 @@ def main(): else: retvals = create_or_delete(args) - if retvals['failed']: + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py index efa4a1b9946..c39a79a5eda 100644 --- a/plugins/modules/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -85,59 +85,59 @@ def api_validation(args=None): - ''' + """ Perform some validation which will be enforced by Memset's API (see: https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) - ''' + """ # zone domain length must be less than 250 chars - if len(args['domain']) > 250: - stderr = 'Zone domain must be less than 250 characters in length.' + if len(args["domain"]) > 250: + stderr = "Zone domain must be less than 250 characters in length." module.fail_json(failed=True, msg=stderr) def check(args=None): - ''' + """ Support for running with check mode. - ''' + """ retvals = dict() has_changed = False - api_method = 'dns.zone_domain_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_domain_list" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) - domain_exists = check_zone_domain(data=response, domain=args['domain']) + domain_exists = check_zone_domain(data=response, domain=args["domain"]) # set changed to true if the operation would cause a change. - has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) + has_changed = (domain_exists and args["state"] == "absent") or (not domain_exists and args["state"] == "present") - retvals['changed'] = has_changed - retvals['failed'] = has_failed + retvals["changed"] = has_changed + retvals["failed"] = has_failed return retvals def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): - ''' + """ At this point we already know whether the containing zone exists, so we just need to create the domain (or exit if it already exists). - ''' + """ has_changed, has_failed = False, False msg = None - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_domain_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) for zone_domain in response.json(): - if zone_domain['domain'] == args['domain']: + if zone_domain["domain"] == args["domain"]: # zone domain already exists, nothing to change. has_changed = False break else: # we need to create the domain - api_method = 'dns.zone_domain_create' - payload['domain'] = args['domain'] - payload['zone_id'] = zone_id - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "dns.zone_domain_create" + payload["domain"] = args["domain"] + payload["zone_id"] = zone_id + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True @@ -145,23 +145,23 @@ def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): def delete_zone_domain(args=None, payload=None): - ''' + """ Deletion is pretty simple, domains are always unique so we we don't need to do any sanity checking to avoid deleting the wrong thing. - ''' + """ has_changed, has_failed = False, False msg, memset_api = None, None - api_method = 'dns.zone_domain_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_domain_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) - domain_exists = check_zone_domain(data=response, domain=args['domain']) + domain_exists = check_zone_domain(data=response, domain=args["domain"]) if domain_exists: - api_method = 'dns.zone_domain_delete' - payload['domain'] = args['domain'] - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + api_method = "dns.zone_domain_delete" + payload["domain"] = args["domain"] + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True memset_api = response.json() @@ -172,31 +172,31 @@ def delete_zone_domain(args=None, payload=None): def create_or_delete_domain(args=None): - ''' + """ We need to perform some initial sanity checking and also look up required info before handing it off to create or delete. - ''' + """ retvals, payload = dict(), dict() has_changed, has_failed = False, False msg, stderr, memset_api = None, None, None # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = has_failed - retvals['msg'] = msg + retvals["failed"] = has_failed + retvals["msg"] = msg if response.status_code is not None: - retvals['stderr'] = f"API returned an error: {response.status_code}" + retvals["stderr"] = f"API returned an error: {response.status_code}" else: - retvals['stderr'] = response.stderr + retvals["stderr"] = response.stderr return retvals - zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args["zone"], current_zones=response.json()) if not zone_exists: # the zone needs to be unique - this isn't a requirement of Memset's API but it @@ -207,19 +207,21 @@ def create_or_delete_domain(args=None): elif counter > 1: stderr = f"{args['zone']} matches multiple zones, cannot create domain." - retvals['failed'] = has_failed - retvals['msg'] = stderr + retvals["failed"] = has_failed + retvals["msg"] = stderr return retvals - if args['state'] == 'present': - has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) + if args["state"] == "present": + has_failed, has_changed, msg = create_zone_domain( + args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload + ) - if args['state'] == 'absent': + if args["state"] == "absent": has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: + retvals["changed"] = has_changed + retvals["failed"] = has_failed + for val in ["msg", "stderr", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -230,17 +232,17 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - domain=dict(required=True, aliases=['name'], type='str'), - zone=dict(required=True, type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + api_key=dict(required=True, type="str", no_log=True), + domain=dict(required=True, aliases=["name"], type="str"), + zone=dict(required=True, type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) # populate the dict with the user-provided vars. args = dict(module.params) - args['check_mode'] = module.check_mode + args["check_mode"] = module.check_mode # validate some API-specific limitations. api_validation(args=args) @@ -252,19 +254,21 @@ def main(): # we would need to populate the return values with the API's response # in several places so it is easier to do it at the end instead. - if not retvals['failed']: - if args['state'] == 'present' and not module.check_mode: + if not retvals["failed"]: + if args["state"] == "present" and not module.check_mode: payload = dict() - payload['domain'] = args['domain'] - api_method = 'dns.zone_domain_info' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) - retvals['memset_api'] = response.json() - - if retvals['failed']: + payload["domain"] = args["domain"] + api_method = "dns.zone_domain_info" + _has_failed, _msg, response = memset_api_call( + api_key=args["api_key"], api_method=api_method, payload=payload + ) + retvals["memset_api"] = response.json() + + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py index 30dc0ec5a63..e728093c615 100644 --- a/plugins/modules/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -170,29 +170,29 @@ def api_validation(args=None): - ''' + """ Perform some validation which will be enforced by Memset's API (see: https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) - ''' + """ failed_validation = False error = None # priority can only be integer 0 > 999 - if not 0 <= args['priority'] <= 999: + if not 0 <= args["priority"] <= 999: failed_validation = True - error = 'Priority must be in the range 0 > 999 (inclusive).' + error = "Priority must be in the range 0 > 999 (inclusive)." # data value must be max 250 chars - if len(args['address']) > 250: + if len(args["address"]) > 250: failed_validation = True error = "Address must be less than 250 characters in length." # record value must be max 250 chars - if args['record']: - if len(args['record']) > 63: + if args["record"]: + if len(args["record"]) > 63: failed_validation = True error = "Record must be less than 63 characters in length." # relative isn't used for all record types - if args['relative']: - if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: + if args["relative"]: + if args["type"] not in ["CNAME", "MX", "NS", "SRV"]: failed_validation = True error = "Relative is only valid for CNAME, MX, NS and SRV record types." # if any of the above failed then fail early @@ -201,27 +201,27 @@ def api_validation(args=None): def create_zone_record(args=None, zone_id=None, records=None, payload=None): - ''' + """ Sanity checking has already occurred prior to this function being called, so we can go ahead and either create or update the record. As defaults are defined for all values in the argument_spec, this may cause some changes to occur as the defaults are enforced (if the user has only configured required variables). - ''' + """ has_changed, has_failed = False, False msg, memset_api = None, None # assemble the new record. new_record = dict() - new_record['zone_id'] = zone_id - for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: + new_record["zone_id"] = zone_id + for arg in ["priority", "address", "relative", "record", "ttl", "type"]: new_record[arg] = args[arg] # if we have any matches, update them. if records: for zone_record in records: # record exists, add ID to payload. - new_record['id'] = zone_record['id'] + new_record["id"] = zone_record["id"] if zone_record == new_record: # nothing to do; record is already correct so we populate # the return var with the existing record's details. @@ -231,13 +231,15 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): # merge dicts ensuring we change any updated values payload = zone_record.copy() payload.update(new_record) - api_method = 'dns.zone_record_update' - if args['check_mode']: + api_method = "dns.zone_record_update" + if args["check_mode"]: has_changed = True # return the new record to the user in the returned var. memset_api = new_record return has_changed, has_failed, memset_api, msg - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + has_failed, msg, response = memset_api_call( + api_key=args["api_key"], api_method=api_method, payload=payload + ) if not has_failed: has_changed = True memset_api = new_record @@ -245,14 +247,14 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): msg = None else: # no record found, so we need to create it - api_method = 'dns.zone_record_create' + api_method = "dns.zone_record_create" payload = new_record - if args['check_mode']: + if args["check_mode"]: has_changed = True # populate the return var with the new record's details. memset_api = new_record return has_changed, has_failed, memset_api, msg - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True memset_api = new_record @@ -263,22 +265,22 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): def delete_zone_record(args=None, records=None, payload=None): - ''' + """ Matching records can be cleanly deleted without affecting other resource types, so this is pretty simple to achieve. - ''' + """ has_changed, has_failed = False, False msg, memset_api = None, None # if we have any matches, delete them. if records: for zone_record in records: - if args['check_mode']: + if args["check_mode"]: has_changed = True return has_changed, has_failed, memset_api, msg - payload['id'] = zone_record['id'] - api_method = 'dns.zone_record_delete' - has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + payload["id"] = zone_record["id"] + api_method = "dns.zone_record_delete" + has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method, payload=payload) if not has_failed: has_changed = True memset_api = zone_record @@ -289,32 +291,32 @@ def delete_zone_record(args=None, records=None, payload=None): def create_or_delete(args=None): - ''' + """ We need to perform some initial sanity checking and also look up required info before handing it off to create or delete functions. Check mode is integrated into the create or delete functions. - ''' + """ has_failed, has_changed = False, False msg, memset_api, stderr = None, None, None retvals, payload = dict(), dict() # get the zones and check if the relevant zone exists. - api_method = 'dns.zone_list' - _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_list" + _has_failed, msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) if _has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. - retvals['failed'] = _has_failed - retvals['msg'] = msg + retvals["failed"] = _has_failed + retvals["msg"] = msg if response.status_code is not None: - retvals['stderr'] = f"API returned an error: {response.status_code}" + retvals["stderr"] = f"API returned an error: {response.status_code}" else: - retvals['stderr'] = response.stderr + retvals["stderr"] = response.stderr return retvals - zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args["zone"], current_zones=response.json()) if not zone_exists: has_failed = True @@ -322,28 +324,33 @@ def create_or_delete(args=None): stderr = f"DNS zone {args['zone']} does not exist." elif counter > 1: stderr = f"{args['zone']} matches multiple zones." - retvals['failed'] = has_failed - retvals['msg'] = stderr - retvals['stderr'] = stderr + retvals["failed"] = has_failed + retvals["msg"] = stderr + retvals["stderr"] = stderr return retvals # get a list of all records ( as we can't limit records by zone) - api_method = 'dns.zone_record_list' - _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + api_method = "dns.zone_record_list" + _has_failed, _msg, response = memset_api_call(api_key=args["api_key"], api_method=api_method) # find any matching records - records = [record for record in response.json() if record['zone_id'] == zone_id - and record['record'] == args['record'] and record['type'] == args['type']] - - if args['state'] == 'present': - has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) - - if args['state'] == 'absent': + records = [ + record + for record in response.json() + if record["zone_id"] == zone_id and record["record"] == args["record"] and record["type"] == args["type"] + ] + + if args["state"] == "present": + has_changed, has_failed, memset_api, msg = create_zone_record( + args=args, zone_id=zone_id, records=records, payload=payload + ) + + if args["state"] == "absent": has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) - retvals['changed'] = has_changed - retvals['failed'] = has_failed - for val in ['msg', 'stderr', 'memset_api']: + retvals["changed"] = has_changed + retvals["failed"] = has_failed + for val in ["msg", "stderr", "memset_api"]: if val is not None: retvals[val] = eval(val) @@ -354,33 +361,33 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - api_key=dict(required=True, type='str', no_log=True), - zone=dict(required=True, type='str'), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), - address=dict(required=True, aliases=['ip', 'data'], type='str'), - record=dict(default='', type='str'), - ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - priority=dict(default=0, type='int'), - relative=dict(default=False, type='bool') + state=dict(default="present", choices=["present", "absent"], type="str"), + api_key=dict(required=True, type="str", no_log=True), + zone=dict(required=True, type="str"), + type=dict(required=True, choices=["A", "AAAA", "CNAME", "MX", "NS", "SRV", "TXT"], type="str"), + address=dict(required=True, aliases=["ip", "data"], type="str"), + record=dict(default="", type="str"), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type="int"), + priority=dict(default=0, type="int"), + relative=dict(default=False, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) # populate the dict with the user-provided vars. args = dict(module.params) - args['check_mode'] = module.check_mode + args["check_mode"] = module.check_mode # perform some Memset API-specific validation api_validation(args=args) retvals = create_or_delete(args) - if retvals['failed']: + if retvals["failed"]: module.fail_json(**retvals) else: module.exit_json(**retvals) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py index 4f15762f748..e240e6a4455 100644 --- a/plugins/modules/mksysb.py +++ b/plugins/modules/mksysb.py @@ -109,17 +109,17 @@ class MkSysB(ModuleHelper): module = dict( argument_spec=dict( - backup_crypt_files=dict(type='bool', default=True), - backup_dmapi_fs=dict(type='bool', default=True), - create_map_files=dict(type='bool', default=False), - exclude_files=dict(type='bool', default=False), - exclude_wpar_files=dict(type='bool', default=False), - extended_attrs=dict(type='bool', default=True), - name=dict(type='str', required=True), - new_image_data=dict(type='bool', default=True), - software_packing=dict(type='bool', default=False), - storage_path=dict(type='str', required=True), - use_snapshot=dict(type='bool', default=False) + backup_crypt_files=dict(type="bool", default=True), + backup_dmapi_fs=dict(type="bool", default=True), + create_map_files=dict(type="bool", default=False), + exclude_files=dict(type="bool", default=False), + exclude_wpar_files=dict(type="bool", default=False), + extended_attrs=dict(type="bool", default=True), + name=dict(type="str", required=True), + new_image_data=dict(type="bool", default=True), + software_packing=dict(type="bool", default=False), + storage_path=dict(type="str", required=True), + use_snapshot=dict(type="bool", default=False), ), supports_check_mode=True, ) @@ -147,12 +147,25 @@ def process(rc, out, err): runner = CmdRunner( self.module, - ['mksysb', '-X'], + ["mksysb", "-X"], self.command_args_formats, ) - with runner(['create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing', - 'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'], - output_process=process, check_mode_skip=True) as ctx: + with runner( + [ + "create_map_files", + "use_snapshot", + "exclude_files", + "exclude_wpar_files", + "software_packing", + "extended_attrs", + "backup_crypt_files", + "backup_dmapi_fs", + "new_image_data", + "combined_path", + ], + output_process=process, + check_mode_skip=True, + ) as ctx: ctx.run(combined_path=[self.vars.storage_path, self.vars.name]) if self.verbosity >= 4: self.vars.run_info = ctx.run_info @@ -164,5 +177,5 @@ def main(): MkSysB.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py index d8f192b0fa2..cbdfb509dda 100644 --- a/plugins/modules/modprobe.py +++ b/plugins/modules/modprobe.py @@ -89,32 +89,31 @@ from ansible.module_utils.common.text.converters import to_native RELEASE_VER = platform.release() -MODULES_LOAD_LOCATION = '/etc/modules-load.d' -PARAMETERS_FILES_LOCATION = '/etc/modprobe.d' +MODULES_LOAD_LOCATION = "/etc/modules-load.d" +PARAMETERS_FILES_LOCATION = "/etc/modprobe.d" class Modprobe: - def __init__(self, module): self.module = module - self.modprobe_bin = module.get_bin_path('modprobe', True) + self.modprobe_bin = module.get_bin_path("modprobe", True) self.check_mode = module.check_mode - self.desired_state = module.params['state'] - self.name = module.params['name'] - self.params = module.params['params'] - self.persistent = module.params['persistent'] + self.desired_state = module.params["state"] + self.name = module.params["name"] + self.params = module.params["params"] + self.persistent = module.params["persistent"] self.changed = False - self.re_find_module = re.compile(rf'^ *{self.name} *(?:[#;].*)?\n?\Z') - self.re_find_params = re.compile(rf'^options {self.name} \w+=\S+ *(?:[#;].*)?\n?\Z') - self.re_get_params_and_values = re.compile(rf'^options {self.name} (\w+=\S+) *(?:[#;].*)?\n?\Z') + self.re_find_module = re.compile(rf"^ *{self.name} *(?:[#;].*)?\n?\Z") + self.re_find_params = re.compile(rf"^options {self.name} \w+=\S+ *(?:[#;].*)?\n?\Z") + self.re_get_params_and_values = re.compile(rf"^options {self.name} (\w+=\S+) *(?:[#;].*)?\n?\Z") def load_module(self): command = [self.modprobe_bin] if self.check_mode: - command.append('-n') + command.append("-n") command.extend([self.name] + shlex.split(self.params)) rc, out, err = self.module.run_command(command) @@ -126,7 +125,7 @@ def load_module(self): self.changed = True else: rc, stdout, stderr = self.module.run_command( - [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + [self.modprobe_bin, "-n", "--first-time", self.name] + shlex.split(self.params) ) if rc != 0: self.module.warn(stderr) @@ -161,26 +160,23 @@ def permanent_params(self): return params def create_module_file(self): - file_path = os.path.join(MODULES_LOAD_LOCATION, - f"{self.name}.conf") + file_path = os.path.join(MODULES_LOAD_LOCATION, f"{self.name}.conf") if not self.check_mode: - with open(file_path, 'w') as file: + with open(file_path, "w") as file: file.write(f"{self.name}\n") @property def module_options_file_content(self): - file_content = '\n'.join([f'options {self.name} {param}' for param in self.params.split()]) + file_content = "\n".join([f"options {self.name} {param}" for param in self.params.split()]) return f"{file_content}\n" def create_module_options_file(self): - new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, - f"{self.name}.conf") + new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, f"{self.name}.conf") if not self.check_mode: - with open(new_file_path, 'w') as file: + with open(new_file_path, "w") as file: file.write(self.module_options_file_content) def disable_old_params(self): - for modprobe_file in self.modprobe_files: with open(modprobe_file) as file: file_content = file.readlines() @@ -192,11 +188,10 @@ def disable_old_params(self): content_changed = True if not self.check_mode and content_changed: - with open(modprobe_file, 'w') as file: - file.write('\n'.join(file_content)) + with open(modprobe_file, "w") as file: + file.write("\n".join(file_content)) def disable_module_permanent(self): - for module_file in self.modules_files: with open(module_file) as file: file_content = file.readlines() @@ -208,11 +203,10 @@ def disable_module_permanent(self): content_changed = True if not self.check_mode and content_changed: - with open(module_file, 'w') as file: - file.write('\n'.join(file_content)) + with open(module_file, "w") as file: + file.write("\n".join(file_content)) def load_module_permanent(self): - if not self.module_is_loaded_persistently: self.create_module_file() self.changed = True @@ -235,22 +229,22 @@ def unload_module_permanent(self): def modules_files(self): if not os.path.isdir(MODULES_LOAD_LOCATION): return [] - modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path) - for path in os.listdir(MODULES_LOAD_LOCATION)] + modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path) for path in os.listdir(MODULES_LOAD_LOCATION)] return [path for path in modules_paths if os.path.isfile(path)] @property def modprobe_files(self): if not os.path.isdir(PARAMETERS_FILES_LOCATION): return [] - modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path) - for path in os.listdir(PARAMETERS_FILES_LOCATION)] + modules_paths = [ + os.path.join(PARAMETERS_FILES_LOCATION, path) for path in os.listdir(PARAMETERS_FILES_LOCATION) + ] return [path for path in modules_paths if os.path.isfile(path)] def module_loaded(self): is_loaded = False try: - with open('/proc/modules') as modules: + with open("/proc/modules") as modules: module_name = f"{self.name.replace('-', '_')} " for line in modules: if line.startswith(module_name): @@ -259,7 +253,7 @@ def module_loaded(self): if not is_loaded: module_file = f"/{self.name}.ko" - builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + builtin_path = os.path.join("/lib/modules/", RELEASE_VER, "modules.builtin") with open(builtin_path) as builtins: for line in builtins: if line.rstrip().endswith(module_file): @@ -271,9 +265,9 @@ def module_loaded(self): return is_loaded def unload_module(self): - command = [self.modprobe_bin, '-r', self.name] + command = [self.modprobe_bin, "-r", self.name] if self.check_mode: - command.append('-n') + command.append("-n") rc, out, err = self.module.run_command(command) if rc != 0: @@ -284,20 +278,20 @@ def unload_module(self): @property def result(self): return { - 'changed': self.changed, - 'name': self.name, - 'params': self.params, - 'state': self.desired_state, + "changed": self.changed, + "name": self.name, + "params": self.params, + "state": self.desired_state, } def build_module(): return AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - params=dict(type='str', default=''), - persistent=dict(type='str', default='disabled', choices=['disabled', 'present', 'absent']), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + params=dict(type="str", default=""), + persistent=dict(type="str", default="disabled", choices=["disabled", "present", "absent"]), ), supports_check_mode=True, ) @@ -308,18 +302,18 @@ def main(): modprobe = Modprobe(module) - if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + if modprobe.desired_state == "present" and not modprobe.module_loaded(): modprobe.load_module() - elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + elif modprobe.desired_state == "absent" and modprobe.module_loaded(): modprobe.unload_module() - if modprobe.persistent == 'present' and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set): + if modprobe.persistent == "present" and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set): modprobe.load_module_permanent() - elif modprobe.persistent == 'absent' and (modprobe.module_is_loaded_persistently or modprobe.permanent_params): + elif modprobe.persistent == "absent" and (modprobe.module_is_loaded_persistently or modprobe.permanent_params): modprobe.unload_module_permanent() module.exit_json(**modprobe.result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py index ff420224657..75fe9b24252 100644 --- a/plugins/modules/monit.py +++ b/plugins/modules/monit.py @@ -58,27 +58,24 @@ STATE_COMMAND_MAP = { - 'stopped': 'stop', - 'started': 'start', - 'monitored': 'monitor', - 'unmonitored': 'unmonitor', - 'restarted': 'restart' + "stopped": "stop", + "started": "start", + "monitored": "monitor", + "unmonitored": "unmonitor", + "restarted": "restart", } -MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program', - 'Network'] +MONIT_SERVICES = ["Process", "File", "Fifo", "Filesystem", "Directory", "Remote host", "System", "Program", "Network"] class StatusValue(namedtuple("Status", "value, is_pending")): - MISSING = 'missing' - OK = 'ok' - NOT_MONITORED = 'not_monitored' - INITIALIZING = 'initializing' - DOES_NOT_EXIST = 'does_not_exist' - EXECUTION_FAILED = 'execution_failed' - ALL_STATUS = [ - MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED - ] + MISSING = "missing" + OK = "ok" + NOT_MONITORED = "not_monitored" + INITIALIZING = "initializing" + DOES_NOT_EXIST = "does_not_exist" + EXECUTION_FAILED = "execution_failed" + ALL_STATUS = [MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED] def __new__(cls, value, is_pending=False): return super().__new__(cls, value, is_pending) @@ -87,7 +84,7 @@ def pending(self): return StatusValue(self.value, True) def __getattr__(self, item): - if item in (f'is_{status}' for status in self.ALL_STATUS): + if item in (f"is_{status}" for status in self.ALL_STATUS): return self.value == getattr(self, item[3:].upper()) raise AttributeError(item) @@ -124,17 +121,19 @@ def monit_version(self): return self._monit_version def _get_monit_version(self): - rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True) - version_line = out.split('\n')[0] + rc, out, err = self.module.run_command([self.monit_bin_path, "-V"], check_rc=True) + version_line = out.split("\n")[0] raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group() - return raw_version, tuple(map(int, raw_version.split('.'))) + return raw_version, tuple(map(int, raw_version.split("."))) def exit_fail(self, msg, status=None, **kwargs): - kwargs.update({ - 'msg': msg, - 'monit_version': self._raw_version, - 'process_status': str(status) if status else None, - }) + kwargs.update( + { + "msg": msg, + "monit_version": self._raw_version, + "process_status": str(status) if status else None, + } + ) self.module.fail_json(**kwargs) def exit_success(self, state): @@ -156,7 +155,7 @@ def get_status(self, validate=False): return self._parse_status(out, err) def _parse_status(self, output, err): - escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES]) + escaped_monit_services = "|".join([re.escape(x) for x in MONIT_SERVICES]) pattern = f"({escaped_monit_services}) '{re.escape(self.process_name)}'" if not re.search(pattern, output, re.IGNORECASE): return Status.MISSING @@ -166,31 +165,31 @@ def _parse_status(self, output, err): self.exit_fail("Unable to find process status", stdout=output, stderr=err) status_val = status_val[0].strip().upper() - if ' | ' in status_val: - status_val = status_val.split(' | ')[0] - if ' - ' not in status_val: - status_val = status_val.replace(' ', '_') + if " | " in status_val: + status_val = status_val.split(" | ")[0] + if " - " not in status_val: + status_val = status_val.replace(" ", "_") try: return getattr(Status, status_val) except AttributeError: self.module.warn(f"Unknown monit status '{status_val}', treating as execution failed") return Status.EXECUTION_FAILED else: - status_val, substatus = status_val.split(' - ') + status_val, substatus = status_val.split(" - ") action, state = substatus.split() - if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']: + if action in ["START", "INITIALIZING", "RESTART", "MONITOR"]: status = Status.OK else: status = Status.NOT_MONITORED - if state == 'pending': + if state == "pending": status = status.pending() return status def is_process_present(self): - command = [self.monit_bin_path, 'summary'] + self.command_args + command = [self.monit_bin_path, "summary"] + self.command_args rc, out, err = self.module.run_command(command, check_rc=True) - return bool(re.findall(rf'\b{self.process_name}\b', out)) + return bool(re.findall(rf"\b{self.process_name}\b", out)) def is_process_running(self): return self.get_status().is_ok @@ -207,7 +206,7 @@ def wait_for_status_change(self, current_status): loop_count = 0 while running_status.value == current_status.value: if loop_count >= self._status_change_retry_count: - self.exit_fail('waited too long for monit to change state', running_status) + self.exit_fail("waited too long for monit to change state", running_status) loop_count += 1 time.sleep(0.5) @@ -235,13 +234,13 @@ def wait_for_monit_to_stop_pending(self, current_status=None): return current_status def reload(self): - rc, out, err = self.module.run_command([self.monit_bin_path, 'reload']) + rc, out, err = self.module.run_command([self.monit_bin_path, "reload"]) if rc != 0: - self.exit_fail('monit reload failed', stdout=out, stderr=err) - self.exit_success(state='reloaded') + self.exit_fail("monit reload failed", stdout=out, stderr=err) + self.exit_success(state="reloaded") def present(self): - self.run_command('reload') + self.run_command("reload") timeout_time = time.time() + self.timeout while not self.is_process_present(): @@ -250,7 +249,7 @@ def present(self): time.sleep(5) - self.exit_success(state='present') + self.exit_success(state="present") def change_state(self, state, expected_status, invert_expected=None): current_status = self.get_status() @@ -262,53 +261,56 @@ def change_state(self, state, expected_status, invert_expected=None): status_match = not status_match if status_match: self.exit_success(state=state) - self.exit_fail(f'{self.process_name} process not {state}', status) + self.exit_fail(f"{self.process_name} process not {state}", status) def stop(self): - self.change_state('stopped', Status.NOT_MONITORED) + self.change_state("stopped", Status.NOT_MONITORED) def unmonitor(self): - self.change_state('unmonitored', Status.NOT_MONITORED) + self.change_state("unmonitored", Status.NOT_MONITORED) def restart(self): - self.change_state('restarted', Status.OK) + self.change_state("restarted", Status.OK) def start(self): - self.change_state('started', Status.OK) + self.change_state("started", Status.OK) def monitor(self): - self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True) + self.change_state("monitored", Status.NOT_MONITORED, invert_expected=True) def main(): arg_spec = dict( name=dict(required=True), - timeout=dict(default=300, type='int'), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) + timeout=dict(default=300, type="int"), + state=dict( + required=True, + choices=["present", "started", "restarted", "stopped", "monitored", "unmonitored", "reloaded"], + ), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - name = module.params['name'] - state = module.params['state'] - timeout = module.params['timeout'] + name = module.params["name"] + state = module.params["state"] + timeout = module.params["timeout"] - monit = Monit(module, module.get_bin_path('monit', True), name, timeout) + monit = Monit(module, module.get_bin_path("monit", True), name, timeout) def exit_if_check_mode(): if module.check_mode: module.exit_json(changed=True) - if state == 'reloaded': + if state == "reloaded": exit_if_check_mode() monit.reload() present = monit.is_process_present() - if not present and not state == 'present': - module.fail_json(msg=f'{name} process not presently configured with monit', name=name) + if not present and not state == "present": + module.fail_json(msg=f"{name} process not presently configured with monit", name=name) - if state == 'present': + if state == "present": if present: module.exit_json(changed=False, name=name, state=state) exit_if_check_mode() @@ -317,31 +319,31 @@ def exit_if_check_mode(): monit.wait_for_monit_to_stop_pending() running = monit.is_process_running() - if running and state in ['started', 'monitored']: + if running and state in ["started", "monitored"]: module.exit_json(changed=False, name=name, state=state) - if running and state == 'stopped': + if running and state == "stopped": exit_if_check_mode() monit.stop() - if running and state == 'unmonitored': + if running and state == "unmonitored": exit_if_check_mode() monit.unmonitor() - elif state == 'restarted': + elif state == "restarted": exit_if_check_mode() monit.restart() - elif not running and state == 'started': + elif not running and state == "started": exit_if_check_mode() monit.start() - elif not running and state == 'monitored': + elif not running and state == "monitored": exit_if_check_mode() monit.monitor() module.exit_json(changed=False, name=name, state=state) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py index ecf9b293c5d..ffc8e38a2f2 100644 --- a/plugins/modules/mqtt.py +++ b/plugins/modules/mqtt.py @@ -137,6 +137,7 @@ # Main # + def main(): # From https://docs.python.org/3/library/ssl.html#constants, this: # @@ -145,37 +146,37 @@ def main(): # # @TODO: update the use of `ssl` constants tls_map = { - 'tlsv1.2': ssl.PROTOCOL_TLSv1_2, - 'tlsv1.1': ssl.PROTOCOL_TLSv1_1, + "tlsv1.2": ssl.PROTOCOL_TLSv1_2, + "tlsv1.1": ssl.PROTOCOL_TLSv1_1, } module = AnsibleModule( argument_spec=dict( - server=dict(default='localhost'), - port=dict(default=1883, type='int'), + server=dict(default="localhost"), + port=dict(default=1883, type="int"), topic=dict(required=True), payload=dict(required=True), client_id=dict(), qos=dict(default="0", choices=["0", "1", "2"]), - retain=dict(default=False, type='bool'), + retain=dict(default=False, type="bool"), username=dict(), password=dict(no_log=True), - ca_cert=dict(type='path', aliases=['ca_certs']), - client_cert=dict(type='path', aliases=['certfile']), - client_key=dict(type='path', aliases=['keyfile']), - tls_version=dict(choices=['tlsv1.1', 'tlsv1.2']) + ca_cert=dict(type="path", aliases=["ca_certs"]), + client_cert=dict(type="path", aliases=["certfile"]), + client_key=dict(type="path", aliases=["keyfile"]), + tls_version=dict(choices=["tlsv1.1", "tlsv1.2"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_PAHOMQTT: - module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) + module.fail_json(msg=missing_required_lib("paho-mqtt"), exception=PAHOMQTT_IMP_ERR) - server = module.params.get("server", 'localhost') + server = module.params.get("server", "localhost") port = module.params.get("port", 1883) topic = module.params.get("topic") payload = module.params.get("payload") - client_id = module.params.get("client_id", '') + client_id = module.params.get("client_id", "") qos = int(module.params.get("qos", 0)) retain = module.params.get("retain") username = module.params.get("username", None) @@ -188,12 +189,12 @@ def main(): if client_id is None: client_id = f"{socket.getfqdn()}_{os.getpid()}" - if payload and payload == 'None': + if payload and payload == "None": payload = None auth = None if username is not None: - auth = {'username': username, 'password': password} + auth = {"username": username, "password": password} tls = None if ca_certs is not None: @@ -201,32 +202,21 @@ def main(): tls_version = tls_map.get(tls_version, ssl.PROTOCOL_TLS) tls = { - 'ca_certs': ca_certs, - 'certfile': certfile, - 'keyfile': keyfile, - 'tls_version': tls_version, + "ca_certs": ca_certs, + "certfile": certfile, + "keyfile": keyfile, + "tls_version": tls_version, } try: mqtt.single( - topic, - payload, - qos=qos, - retain=retain, - client_id=client_id, - hostname=server, - port=port, - auth=auth, - tls=tls + topic, payload, qos=qos, retain=retain, client_id=client_id, hostname=server, port=port, auth=auth, tls=tls ) except Exception as e: - module.fail_json( - msg=f"unable to publish to MQTT broker {e}", - exception=traceback.format_exc() - ) + module.fail_json(msg=f"unable to publish to MQTT broker {e}", exception=traceback.format_exc()) module.exit_json(changed=False, topic=topic) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py index f8e4ac0194c..4fbef15856e 100644 --- a/plugins/modules/mssql_db.py +++ b/plugins/modules/mssql_db.py @@ -131,12 +131,12 @@ def db_delete(conn, cursor, db): def db_import(conn, cursor, module, db, target): if os.path.isfile(target): - with open(target, 'r') as backup: + with open(target, "r") as backup: sqlQuery = f"USE [{db}]\n" for line in backup: if line is None: break - elif line.startswith('GO'): + elif line.startswith("GO"): cursor.execute(sqlQuery) sqlQuery = f"USE [{db}]\n" else: @@ -151,30 +151,29 @@ def db_import(conn, cursor, module, db, target): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['db']), - login_user=dict(default=''), - login_password=dict(default='', no_log=True), + name=dict(required=True, aliases=["db"]), + login_user=dict(default=""), + login_password=dict(default="", no_log=True), login_host=dict(required=True), - login_port=dict(default='1433'), + login_port=dict(default="1433"), target=dict(), - autocommit=dict(type='bool', default=False), - state=dict( - default='present', choices=['present', 'absent', 'import']) + autocommit=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent", "import"]), ) ) if not mssql_found: - module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) + module.fail_json(msg=missing_required_lib("pymssql"), exception=PYMSSQL_IMP_ERR) - db = module.params['name'] - state = module.params['state'] - autocommit = module.params['autocommit'] + db = module.params["name"] + state = module.params["state"] + autocommit = module.params["autocommit"] target = module.params["target"] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] + login_user = module.params["login_user"] + login_password = module.params["login_password"] + login_host = module.params["login_host"] + login_port = module.params["login_port"] login_querystring = login_host if login_port != "1433": @@ -184,15 +183,17 @@ def main(): module.fail_json(msg="when supplying login_user arguments login_password must be provided") try: - conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database="master") cursor = conn.cursor() except Exception as e: if "Unknown database" in str(e): errno, errstr = e.args module.fail_json(msg=f"ERROR: {errno} {errstr}") else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + module.fail_json( + msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf" + ) conn.autocommit(True) changed = False @@ -234,5 +235,5 @@ def main(): module.exit_json(changed=changed, db=db) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py index a7f8a4622a2..9adf835267d 100644 --- a/plugins/modules/mssql_script.py +++ b/plugins/modules/mssql_script.py @@ -265,6 +265,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib import traceback import json + PYMSSQL_IMP_ERR = None try: import pymssql @@ -281,87 +282,83 @@ def clean_output(o): def run_module(): module_args = dict( - name=dict(aliases=['db'], default=''), + name=dict(aliases=["db"], default=""), login_user=dict(), login_password=dict(no_log=True), login_host=dict(required=True), - login_port=dict(type='int', default=1433), + login_port=dict(type="int", default=1433), script=dict(required=True), - output=dict(default='default', choices=['dict', 'default']), - params=dict(type='dict'), - transaction=dict(type='bool', default=False), + output=dict(default="default", choices=["dict", "default"]), + params=dict(type="dict"), + transaction=dict(type="bool", default=False), ) result = dict( changed=False, ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not MSSQL_FOUND: - module.fail_json(msg=missing_required_lib( - 'pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - script = module.params['script'] - output = module.params['output'] - sql_params = module.params['params'] + module.fail_json(msg=missing_required_lib("pymssql"), exception=PYMSSQL_IMP_ERR) + + db = module.params["name"] + login_user = module.params["login_user"] + login_password = module.params["login_password"] + login_host = module.params["login_host"] + login_port = module.params["login_port"] + script = module.params["script"] + output = module.params["output"] + sql_params = module.params["params"] # Added param to set the transactional mode (true/false) - transaction = module.params['transaction'] + transaction = module.params["transaction"] login_querystring = login_host if login_port != 1433: login_querystring = f"{login_host}:{login_port}" if login_user is not None and login_password is None: - module.fail_json( - msg="when supplying login_user argument, login_password must also be provided") + module.fail_json(msg="when supplying login_user argument, login_password must also be provided") try: - conn = pymssql.connect( - user=login_user, password=login_password, host=login_querystring, database=db) + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database=db) cursor = conn.cursor() except Exception as e: if "Unknown database" in str(e): errno, errstr = e.args module.fail_json(msg=f"ERROR: {errno} {errstr}") else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + module.fail_json( + msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf" + ) # If transactional mode is requested, start a transaction conn.autocommit(not transaction) - query_results_key = 'query_results' - if output == 'dict': + query_results_key = "query_results" + if output == "dict": cursor = conn.cursor(as_dict=True) - query_results_key = 'query_results_dict' + query_results_key = "query_results_dict" # Process the script into batches queries = [] current_batch = [] for statement in script.splitlines(True): # Ignore the Byte Order Mark, if found - if statement.strip() == '\uFEFF': + if statement.strip() == "\ufeff": continue # Assume each 'GO' is on its own line but may have leading/trailing whitespace # and be of mixed-case - if statement.strip().upper() != 'GO': + if statement.strip().upper() != "GO": current_batch.append(statement) else: - queries.append(''.join(current_batch)) + queries.append("".join(current_batch)) current_batch = [] if len(current_batch) > 0: - queries.append(''.join(current_batch)) + queries.append("".join(current_batch)) - result['changed'] = True + result["changed"] = True if module.check_mode: module.exit_json(**result) @@ -380,15 +377,15 @@ def run_module(): # We know we executed the statement so this error just means we have no resultset # which is ok (eg UPDATE/INSERT) if ( - type(e).__name__ == 'OperationalError' and - str(e) == 'Statement not executed or executed statement has no resultset' + type(e).__name__ == "OperationalError" + and str(e) == "Statement not executed or executed statement has no resultset" ): query_results.append([]) else: # Rollback transaction before failing the module in case of error if transaction: conn.rollback() - error_msg = f'{type(e).__name__}: {e}' + error_msg = f"{type(e).__name__}: {e}" module.fail_json(msg="query failed", query=query, error=error_msg, **result) # Commit transaction before exiting the module in case of no error @@ -406,5 +403,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py index bb83afcac7c..0aeaa23ef31 100644 --- a/plugins/modules/nagios.py +++ b/plugins/modules/nagios.py @@ -276,84 +276,84 @@ def which_cmdfile(): locations = [ # rhel - '/etc/nagios/nagios.cfg', + "/etc/nagios/nagios.cfg", # debian - '/etc/nagios3/nagios.cfg', + "/etc/nagios3/nagios.cfg", # older debian - '/etc/nagios2/nagios.cfg', + "/etc/nagios2/nagios.cfg", # bsd, solaris - '/usr/local/etc/nagios/nagios.cfg', + "/usr/local/etc/nagios/nagios.cfg", # groundwork it monitoring - '/usr/local/groundwork/nagios/etc/nagios.cfg', + "/usr/local/groundwork/nagios/etc/nagios.cfg", # open monitoring distribution - '/omd/sites/oppy/tmp/nagios/nagios.cfg', + "/omd/sites/oppy/tmp/nagios/nagios.cfg", # ??? - '/usr/local/nagios/etc/nagios.cfg', - '/usr/local/nagios/nagios.cfg', - '/opt/nagios/etc/nagios.cfg', - '/opt/nagios/nagios.cfg', + "/usr/local/nagios/etc/nagios.cfg", + "/usr/local/nagios/nagios.cfg", + "/opt/nagios/etc/nagios.cfg", + "/opt/nagios/nagios.cfg", # icinga on debian/ubuntu - '/etc/icinga/icinga.cfg', + "/etc/icinga/icinga.cfg", # icinga installed from source (default location) - '/usr/local/icinga/etc/icinga.cfg', + "/usr/local/icinga/etc/icinga.cfg", ] for path in locations: if os.path.exists(path): for line in open(path): - if line.startswith('command_file'): - return line.split('=')[1].strip() + if line.startswith("command_file"): + return line.split("=")[1].strip() return None def main(): ACTION_CHOICES = [ - 'downtime', - 'delete_downtime', - 'silence', - 'unsilence', - 'enable_alerts', - 'disable_alerts', - 'silence_nagios', - 'unsilence_nagios', - 'command', - 'servicegroup_host_downtime', - 'servicegroup_service_downtime', - 'acknowledge', - 'forced_check', + "downtime", + "delete_downtime", + "silence", + "unsilence", + "enable_alerts", + "disable_alerts", + "silence_nagios", + "unsilence_nagios", + "command", + "servicegroup_host_downtime", + "servicegroup_service_downtime", + "acknowledge", + "forced_check", ] module = AnsibleModule( argument_spec=dict( - action=dict(type='str', required=True, choices=ACTION_CHOICES), - author=dict(type='str', default='Ansible'), - comment=dict(type='str', default='Scheduling downtime'), - host=dict(type='str'), - servicegroup=dict(type='str'), - start=dict(type='str'), - minutes=dict(type='int', default=30), - cmdfile=dict(type='str', default=which_cmdfile()), - services=dict(type='list', elements='str', aliases=['service']), - command=dict(type='str'), + action=dict(type="str", required=True, choices=ACTION_CHOICES), + author=dict(type="str", default="Ansible"), + comment=dict(type="str", default="Scheduling downtime"), + host=dict(type="str"), + servicegroup=dict(type="str"), + start=dict(type="str"), + minutes=dict(type="int", default=30), + cmdfile=dict(type="str", default=which_cmdfile()), + services=dict(type="list", elements="str", aliases=["service"]), + command=dict(type="str"), ), required_if=[ - ('action', 'downtime', ['host', 'services']), - ('action', 'delete_downtime', ['host', 'services']), - ('action', 'silence', ['host']), - ('action', 'unsilence', ['host']), - ('action', 'enable_alerts', ['host', 'services']), - ('action', 'disable_alerts', ['host', 'services']), - ('action', 'command', ['command']), - ('action', 'servicegroup_host_downtime', ['host', 'servicegroup']), - ('action', 'servicegroup_service_downtime', ['host', 'servicegroup']), - ('action', 'acknowledge', ['host', 'services']), - ('action', 'forced_check', ['host', 'services']), + ("action", "downtime", ["host", "services"]), + ("action", "delete_downtime", ["host", "services"]), + ("action", "silence", ["host"]), + ("action", "unsilence", ["host"]), + ("action", "enable_alerts", ["host", "services"]), + ("action", "disable_alerts", ["host", "services"]), + ("action", "command", ["command"]), + ("action", "servicegroup_host_downtime", ["host", "servicegroup"]), + ("action", "servicegroup_service_downtime", ["host", "servicegroup"]), + ("action", "acknowledge", ["host", "services"]), + ("action", "forced_check", ["host", "services"]), ], ) - if not module.params['cmdfile']: - module.fail_json(msg='unable to locate nagios.cfg') + if not module.params["cmdfile"]: + module.fail_json(msg="unable to locate nagios.cfg") ansible_nagios = Nagios(module, **module.params) if module.check_mode: @@ -379,25 +379,25 @@ class Nagios: def __init__(self, module, **kwargs): self.module = module - self.action = kwargs['action'] - self.author = kwargs['author'] - self.comment = kwargs['comment'] - self.host = kwargs['host'] - self.servicegroup = kwargs['servicegroup'] - if kwargs['start'] is not None: - self.start = int(kwargs['start']) + self.action = kwargs["action"] + self.author = kwargs["author"] + self.comment = kwargs["comment"] + self.host = kwargs["host"] + self.servicegroup = kwargs["servicegroup"] + if kwargs["start"] is not None: + self.start = int(kwargs["start"]) else: self.start = None - self.minutes = kwargs['minutes'] - self.cmdfile = kwargs['cmdfile'] - self.command = kwargs['command'] - - if kwargs['services'] is None : - self.services = kwargs['services'] - elif len(kwargs['services']) == 1 and kwargs['services'][0] in ['host', 'all']: - self.services = kwargs['services'][0] + self.minutes = kwargs["minutes"] + self.cmdfile = kwargs["cmdfile"] + self.command = kwargs["command"] + + if kwargs["services"] is None: + self.services = kwargs["services"] + elif len(kwargs["services"]) == 1 and kwargs["services"][0] in ["host", "all"]: + self.services = kwargs["services"][0] else: - self.services = kwargs['services'] + self.services = kwargs["services"] self.command_results = [] @@ -414,23 +414,18 @@ def _write_command(self, cmd): """ if not os.path.exists(self.cmdfile): - self.module.fail_json(msg='nagios command file does not exist', - cmdfile=self.cmdfile) + self.module.fail_json(msg="nagios command file does not exist", cmdfile=self.cmdfile) if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): - self.module.fail_json(msg='nagios command file is not a fifo file', - cmdfile=self.cmdfile) + self.module.fail_json(msg="nagios command file is not a fifo file", cmdfile=self.cmdfile) try: - with open(self.cmdfile, 'w') as fp: + with open(self.cmdfile, "w") as fp: fp.write(cmd) fp.flush() self.command_results.append(cmd.strip()) except IOError: - self.module.fail_json(msg='unable to write to nagios command file', - cmdfile=self.cmdfile) + self.module.fail_json(msg="unable to write to nagios command file", cmdfile=self.cmdfile) - def _fmt_dt_str(self, cmd, host, duration, author=None, - comment=None, start=None, - svc=None, fixed=1, trigger=0): + def _fmt_dt_str(self, cmd, host, duration, author=None, comment=None, start=None, svc=None, fixed=1, trigger=0): """ Format an external-command downtime string. @@ -456,7 +451,7 @@ def _fmt_dt_str(self, cmd, host, duration, author=None, start = entry_time hdr = f"[{entry_time}] {cmd};{host};" - duration_s = (duration * 60) + duration_s = duration * 60 end = start + duration_s if not author: @@ -466,20 +461,17 @@ def _fmt_dt_str(self, cmd, host, duration, author=None, comment = self.comment if svc is not None: - dt_args = [svc, str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] + dt_args = [svc, str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] else: # Downtime for a host if no svc specified - dt_args = [str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] + dt_args = [str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] dt_arg_str = ";".join(dt_args) dt_str = f"{hdr}{dt_arg_str}\n" return dt_str - def _fmt_ack_str(self, cmd, host, author=None, - comment=None, svc=None, sticky=0, notify=1, persistent=0): + def _fmt_ack_str(self, cmd, host, author=None, comment=None, svc=None, sticky=0, notify=1, persistent=0): """ Format an external-command acknowledge string. @@ -540,17 +532,17 @@ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): if svc is not None: dt_del_args.append(svc) else: - dt_del_args.append('') + dt_del_args.append("") if start is not None: dt_del_args.append(str(start)) else: - dt_del_args.append('') + dt_del_args.append("") if comment is not None: dt_del_args.append(comment) else: - dt_del_args.append('') + dt_del_args.append("") dt_del_arg_str = ";".join(dt_del_args) dt_del_str = f"{hdr}{dt_del_arg_str}\n" @@ -1097,10 +1089,7 @@ def silence_host(self, host): Syntax: DISABLE_HOST_NOTIFICATIONS; """ - cmd = [ - "DISABLE_HOST_SVC_NOTIFICATIONS", - "DISABLE_HOST_NOTIFICATIONS" - ] + cmd = ["DISABLE_HOST_SVC_NOTIFICATIONS", "DISABLE_HOST_NOTIFICATIONS"] nagios_return = True return_str_list = [] for c in cmd: @@ -1125,10 +1114,7 @@ def unsilence_host(self, host): Syntax: ENABLE_HOST_NOTIFICATIONS; """ - cmd = [ - "ENABLE_HOST_SVC_NOTIFICATIONS", - "ENABLE_HOST_NOTIFICATIONS" - ] + cmd = ["ENABLE_HOST_SVC_NOTIFICATIONS", "ENABLE_HOST_NOTIFICATIONS"] nagios_return = True return_str_list = [] for c in cmd: @@ -1148,7 +1134,7 @@ def silence_nagios(self): This is a 'SHUT UP, NAGIOS' command """ - cmd = 'DISABLE_NOTIFICATIONS' + cmd = "DISABLE_NOTIFICATIONS" self._write_command(self._fmt_notif_str(cmd)) def unsilence_nagios(self): @@ -1158,7 +1144,7 @@ def unsilence_nagios(self): This is a 'OK, NAGIOS, GO'' command """ - cmd = 'ENABLE_NOTIFICATIONS' + cmd = "ENABLE_NOTIFICATIONS" self._write_command(self._fmt_notif_str(cmd)) def nagios_cmd(self, cmd): @@ -1170,10 +1156,10 @@ def nagios_cmd(self, cmd): You just have to provide the properly formatted command """ - pre = f'[{int(time.time())}]' + pre = f"[{int(time.time())}]" - post = '\n' - cmdstr = f'{pre} {cmd}{post}' + post = "\n" + cmdstr = f"{pre} {cmd}{post}" self._write_command(cmdstr) def act(self): @@ -1182,85 +1168,81 @@ def act(self): needful (at the earliest). """ # host or service downtime? - if self.action == 'downtime': - if self.services == 'host': - self.schedule_host_downtime(self.host, minutes=self.minutes, - start=self.start) - elif self.services == 'all': - self.schedule_host_svc_downtime(self.host, minutes=self.minutes, - start=self.start) + if self.action == "downtime": + if self.services == "host": + self.schedule_host_downtime(self.host, minutes=self.minutes, start=self.start) + elif self.services == "all": + self.schedule_host_svc_downtime(self.host, minutes=self.minutes, start=self.start) else: - self.schedule_svc_downtime(self.host, - services=self.services, - minutes=self.minutes, - start=self.start) + self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes, start=self.start) - elif self.action == 'acknowledge': - if self.services == 'host': + elif self.action == "acknowledge": + if self.services == "host": self.acknowledge_host_problem(self.host) else: self.acknowledge_svc_problem(self.host, services=self.services) - elif self.action == 'delete_downtime': - if self.services == 'host': + elif self.action == "delete_downtime": + if self.services == "host": self.delete_host_downtime(self.host) - elif self.services == 'all': - self.delete_host_downtime(self.host, comment='') + elif self.services == "all": + self.delete_host_downtime(self.host, comment="") else: self.delete_host_downtime(self.host, services=self.services) - elif self.action == 'forced_check': - if self.services == 'host': + elif self.action == "forced_check": + if self.services == "host": self.schedule_forced_host_check(self.host) - elif self.services == 'all': + elif self.services == "all": self.schedule_forced_host_svc_check(self.host) else: self.schedule_forced_svc_check(self.host, services=self.services) elif self.action == "servicegroup_host_downtime": if self.servicegroup: - self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + self.schedule_servicegroup_host_downtime( + servicegroup=self.servicegroup, minutes=self.minutes, start=self.start + ) elif self.action == "servicegroup_service_downtime": if self.servicegroup: - self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + self.schedule_servicegroup_svc_downtime( + servicegroup=self.servicegroup, minutes=self.minutes, start=self.start + ) # toggle the host AND service alerts - elif self.action == 'silence': + elif self.action == "silence": self.silence_host(self.host) - elif self.action == 'unsilence': + elif self.action == "unsilence": self.unsilence_host(self.host) # toggle host/svc alerts - elif self.action == 'enable_alerts': - if self.services == 'host': + elif self.action == "enable_alerts": + if self.services == "host": self.enable_host_notifications(self.host) - elif self.services == 'all': + elif self.services == "all": self.enable_host_svc_notifications(self.host) else: - self.enable_svc_notifications(self.host, - services=self.services) + self.enable_svc_notifications(self.host, services=self.services) - elif self.action == 'disable_alerts': - if self.services == 'host': + elif self.action == "disable_alerts": + if self.services == "host": self.disable_host_notifications(self.host) - elif self.services == 'all': + elif self.services == "all": self.disable_host_svc_notifications(self.host) else: - self.disable_svc_notifications(self.host, - services=self.services) - elif self.action == 'silence_nagios': + self.disable_svc_notifications(self.host, services=self.services) + elif self.action == "silence_nagios": self.silence_nagios() - elif self.action == 'unsilence_nagios': + elif self.action == "unsilence_nagios": self.unsilence_nagios() - else: # self.action == 'command' + else: # self.action == 'command' self.nagios_cmd(self.command) - self.module.exit_json(nagios_commands=self.command_results, - changed=True) + self.module.exit_json(nagios_commands=self.command_results, changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py index 52ec6c1915a..72b75976b46 100644 --- a/plugins/modules/netcup_dns.py +++ b/plugins/modules/netcup_dns.py @@ -208,39 +208,52 @@ def main(): argument_spec=dict( api_key=dict(required=True, no_log=True), api_password=dict(required=True, no_log=True), - customer_id=dict(required=True, type='int'), - + customer_id=dict(required=True, type="int"), domain=dict(required=True), - record=dict(default='@', aliases=['name']), - type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', - 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', - 'SSHFP']), + record=dict(default="@", aliases=["name"]), + type=dict( + required=True, + choices=[ + "A", + "AAAA", + "MX", + "CNAME", + "CAA", + "SRV", + "TXT", + "TLSA", + "NS", + "DS", + "OPENPGPKEY", + "SMIMEA", + "SSHFP", + ], + ), value=dict(required=True), - priority=dict(type='int'), - solo=dict(type='bool', default=False), - state=dict(choices=['present', 'absent'], default='present'), - timeout=dict(type='int', default=5), - + priority=dict(type="int"), + solo=dict(type="bool", default=False), + state=dict(choices=["present", "absent"], default="present"), + timeout=dict(type="int", default=5), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_NCDNSAPI: - module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) - - api_key = module.params.get('api_key') - api_password = module.params.get('api_password') - customer_id = module.params.get('customer_id') - domain = module.params.get('domain') - record_type = module.params.get('type') - record = module.params.get('record') - value = module.params.get('value') - priority = module.params.get('priority') - solo = module.params.get('solo') - state = module.params.get('state') - timeout = module.params.get('timeout') - - if record_type == 'MX' and not priority: + module.fail_json(msg=missing_required_lib("nc-dnsapi"), exception=NCDNSAPI_IMP_ERR) + + api_key = module.params.get("api_key") + api_password = module.params.get("api_password") + customer_id = module.params.get("customer_id") + domain = module.params.get("domain") + record_type = module.params.get("type") + record = module.params.get("record") + value = module.params.get("value") + priority = module.params.get("priority") + solo = module.params.get("solo") + state = module.params.get("state") + timeout = module.params.get("timeout") + + if record_type == "MX" and not priority: module.fail_json(msg="record type MX required the 'priority' argument") has_changed = False @@ -259,12 +272,15 @@ def main(): break - if state == 'present': + if state == "present": if solo: - obsolete_records = [r for r in all_records if - r.hostname == record.hostname - and r.type == record.type - and not r.destination == record.destination] + obsolete_records = [ + r + for r in all_records + if r.hostname == record.hostname + and r.type == record.type + and not r.destination == record.destination + ] if obsolete_records: if not module.check_mode: @@ -277,7 +293,7 @@ def main(): all_records = api.add_dns_record(domain, record) has_changed = True - elif state == 'absent' and record_exists: + elif state == "absent" and record_exists: if not module.check_mode: all_records = api.delete_dns_record(domain, record) @@ -293,5 +309,5 @@ def record_data(r): return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py index 73e49649db8..0e1cd881b90 100644 --- a/plugins/modules/newrelic_deployment.py +++ b/plugins/modules/newrelic_deployment.py @@ -96,7 +96,6 @@ def main(): - module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), @@ -106,12 +105,12 @@ def main(): description=dict(), revision=dict(required=True), user=dict(), - validate_certs=dict(default=True, type='bool'), - app_name_exact_match=dict(type='bool', default=False), + validate_certs=dict(default=True, type="bool"), + app_name_exact_match=dict(type="bool", default=False), ), - required_one_of=[['app_name', 'application_id']], - required_if=[('app_name_exact_match', True, ['app_name'])], - supports_check_mode=True + required_one_of=[["app_name", "application_id"]], + required_if=[("app_name_exact_match", True, ["app_name"])], + supports_check_mode=True, ) # build list of params @@ -139,15 +138,13 @@ def main(): # Send the data to New Relic url = f"https://api.newrelic.com/v2/applications/{quote(str(app_id), safe='')}/deployments.json" - data = { - 'deployment': params - } + data = {"deployment": params} headers = { - 'Api-Key': module.params["token"], - 'Content-Type': 'application/json', + "Api-Key": module.params["token"], + "Content-Type": "application/json", } response, info = fetch_url(module, url, data=module.jsonify(data), headers=headers, method="POST") - if info['status'] in (200, 201): + if info["status"] in (200, 201): module.exit_json(changed=True) else: module.fail_json(msg=f"Unable to insert deployment marker: {info['msg']}") @@ -158,15 +155,15 @@ def get_application_id(module): data = f"filter[name]={module.params['app_name']}" application_id = None headers = { - 'Api-Key': module.params["token"], + "Api-Key": module.params["token"], } response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] not in (200, 201): + if info["status"] not in (200, 201): module.fail_json(msg=f"Unable to get application: {info['msg']}") result = json.loads(response.read()) if result is None or len(result.get("applications", "")) == 0: - module.fail_json(msg=f"No application found with name \"{module.params['app_name']}\"") + module.fail_json(msg=f'No application found with name "{module.params["app_name"]}"') if module.params["app_name_exact_match"]: for item in result["applications"]: @@ -174,12 +171,12 @@ def get_application_id(module): application_id = item["id"] break if application_id is None: - module.fail_json(msg=f"No application found with exact name \"{module.params['app_name']}\"") + module.fail_json(msg=f'No application found with exact name "{module.params["app_name"]}"') else: application_id = result["applications"][0]["id"] return application_id -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py index 80774ca39af..dc0009ecf2a 100644 --- a/plugins/modules/nexmo.py +++ b/plugins/modules/nexmo.py @@ -75,25 +75,25 @@ from ansible.module_utils.urls import fetch_url, url_argument_spec -NEXMO_API = 'https://rest.nexmo.com/sms/json' +NEXMO_API = "https://rest.nexmo.com/sms/json" def send_msg(module): failed = list() responses = dict() msg = { - 'api_key': module.params.get('api_key'), - 'api_secret': module.params.get('api_secret'), - 'from': module.params.get('src'), - 'text': module.params.get('msg') + "api_key": module.params.get("api_key"), + "api_secret": module.params.get("api_secret"), + "from": module.params.get("src"), + "text": module.params.get("msg"), } - for number in module.params.get('dest'): - msg['to'] = number + for number in module.params.get("dest"): + msg["to"] = number url = f"{NEXMO_API}?{urlencode(msg)}" - headers = dict(Accept='application/json') + headers = dict(Accept="application/json") response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: + if info["status"] != 200: failed.append(number) responses[number] = dict(failed=True) @@ -103,18 +103,17 @@ def send_msg(module): failed.append(number) responses[number] = dict(failed=True) else: - for message in responses[number]['messages']: - if int(message['status']) != 0: + for message in responses[number]["messages"]: + if int(message["status"]) != 0: failed.append(number) responses[number] = dict(failed=True, **responses[number]) if failed: - msg = 'One or messages failed to send' + msg = "One or messages failed to send" else: - msg = '' + msg = "" - module.exit_json(failed=bool(failed), msg=msg, changed=False, - responses=responses) + module.exit_json(failed=bool(failed), msg=msg, changed=False, responses=responses) def main(): @@ -123,18 +122,16 @@ def main(): dict( api_key=dict(required=True, no_log=True), api_secret=dict(required=True, no_log=True), - src=dict(required=True, type='int'), - dest=dict(required=True, type='list', elements='int'), + src=dict(required=True, type="int"), + dest=dict(required=True, type="list", elements="int"), msg=dict(required=True), ), ) - module = AnsibleModule( - argument_spec=argument_spec - ) + module = AnsibleModule(argument_spec=argument_spec) send_msg(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nginx_status_info.py b/plugins/modules/nginx_status_info.py index 50a5506fc91..707deebc567 100644 --- a/plugins/modules/nginx_status_info.py +++ b/plugins/modules/nginx_status_info.py @@ -99,42 +99,43 @@ class NginxStatusInfo: - def __init__(self): - self.url = module.params.get('url') - self.timeout = module.params.get('timeout') + self.url = module.params.get("url") + self.timeout = module.params.get("timeout") def run(self): result = { - 'active_connections': None, - 'accepts': None, - 'handled': None, - 'requests': None, - 'reading': None, - 'writing': None, - 'waiting': None, - 'data': None, + "active_connections": None, + "accepts": None, + "handled": None, + "requests": None, + "reading": None, + "writing": None, + "waiting": None, + "data": None, } (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) if not response: module.fail_json(msg=f"No valid or no response from url {self.url} within {self.timeout} seconds (timeout)") - data = to_text(response.read(), errors='surrogate_or_strict') + data = to_text(response.read(), errors="surrogate_or_strict") if not data: return result - result['data'] = data - expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ - r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' + result["data"] = data + expr = ( + r"Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n" + r"Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)" + ) match = re.match(expr, data, re.S) if match: - result['active_connections'] = int(match.group(1)) - result['accepts'] = int(match.group(2)) - result['handled'] = int(match.group(3)) - result['requests'] = int(match.group(4)) - result['reading'] = int(match.group(5)) - result['writing'] = int(match.group(6)) - result['waiting'] = int(match.group(7)) + result["active_connections"] = int(match.group(1)) + result["accepts"] = int(match.group(2)) + result["handled"] = int(match.group(3)) + result["requests"] = int(match.group(4)) + result["reading"] = int(match.group(5)) + result["writing"] = int(match.group(6)) + result["waiting"] = int(match.group(7)) return result @@ -142,8 +143,8 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - url=dict(type='str', required=True), - timeout=dict(type='int', default=10), + url=dict(type="str", required=True), + timeout=dict(type="int", default=10), ), supports_check_mode=True, ) @@ -152,5 +153,5 @@ def main(): module.exit_json(changed=False, **nginx_status_info) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py index 1b8a03f5593..772a1d81efc 100644 --- a/plugins/modules/nictagadm.py +++ b/plugins/modules/nictagadm.py @@ -107,40 +107,39 @@ class NicTag: - def __init__(self, module): self.module = module - self.name = module.params['name'] - self.mac = module.params['mac'] - self.etherstub = module.params['etherstub'] - self.mtu = module.params['mtu'] - self.force = module.params['force'] - self.state = module.params['state'] + self.name = module.params["name"] + self.mac = module.params["mac"] + self.etherstub = module.params["etherstub"] + self.mtu = module.params["mtu"] + self.force = module.params["force"] + self.state = module.params["state"] - self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) + self.nictagadm_bin = self.module.get_bin_path("nictagadm", True) def is_valid_mac(self): return is_mac(self.mac.lower()) def nictag_exists(self): - cmd = [self.nictagadm_bin, 'exists', self.name] + cmd = [self.nictagadm_bin, "exists", self.name] (rc, dummy, dummy) = self.module.run_command(cmd) return rc == 0 def add_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'add'] + cmd = [self.nictagadm_bin, "-v", "add"] if self.etherstub: - cmd.append('-l') + cmd.append("-l") if self.mtu: - cmd.append('-p') + cmd.append("-p") cmd.append(f"mtu={self.mtu}") if self.mac: - cmd.append('-p') + cmd.append("-p") cmd.append(f"mac={self.mac}") cmd.append(self.name) @@ -148,10 +147,10 @@ def add_nictag(self): return self.module.run_command(cmd) def delete_nictag(self): - cmd = [self.nictagadm_bin, '-v', 'delete'] + cmd = [self.nictagadm_bin, "-v", "delete"] if self.force: - cmd.append('-f') + cmd.append("-f") cmd.append(self.name) @@ -161,29 +160,29 @@ def delete_nictag(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - mac=dict(type='str'), - etherstub=dict(type='bool', default=False), - mtu=dict(type='int'), - force=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type="str", required=True), + mac=dict(type="str"), + etherstub=dict(type="bool", default=False), + mtu=dict(type="int"), + force=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), ), mutually_exclusive=[ - ['etherstub', 'mac'], - ['etherstub', 'mtu'], + ["etherstub", "mac"], + ["etherstub", "mtu"], ], required_if=[ - ['etherstub', False, ['name', 'mac']], - ['state', 'absent', ['name', 'force']], + ["etherstub", False, ["name", "mac"]], + ["state", "absent", ["name", "force"]], ], - supports_check_mode=True + supports_check_mode=True, ) nictag = NicTag(module) rc = None - out = '' - err = '' + out = "" + err = "" result = dict( changed=False, etherstub=nictag.etherstub, @@ -195,19 +194,16 @@ def main(): ) if not nictag.is_valid_mac(): - module.fail_json(msg='Invalid MAC Address Value', - name=nictag.name, - mac=nictag.mac, - etherstub=nictag.etherstub) + module.fail_json(msg="Invalid MAC Address Value", name=nictag.name, mac=nictag.mac, etherstub=nictag.etherstub) - if nictag.state == 'absent': + if nictag.state == "absent": if nictag.nictag_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = nictag.delete_nictag() if rc != 0: module.fail_json(name=nictag.name, msg=err, rc=rc) - elif nictag.state == 'present': + elif nictag.state == "present": if not nictag.nictag_exists(): if module.check_mode: module.exit_json(changed=True) @@ -216,14 +212,14 @@ def main(): module.fail_json(name=nictag.name, msg=err, rc=rc) if rc is not None: - result['changed'] = True + result["changed"] = True if out: - result['stdout'] = out + result["stdout"] = out if err: - result['stderr'] = err + result["stderr"] = err module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py index 2ad51616050..e98c8b0ad70 100644 --- a/plugins/modules/nmcli.py +++ b/plugins/modules/nmcli.py @@ -1704,135 +1704,135 @@ class Nmcli: All subclasses MUST define platform and distribution (which may be None). """ - platform = 'Generic' + platform = "Generic" distribution = None SECRET_OPTIONS = ( - '802-11-wireless-security.leap-password', - '802-11-wireless-security.psk', - '802-11-wireless-security.wep-key0', - '802-11-wireless-security.wep-key1', - '802-11-wireless-security.wep-key2', - '802-11-wireless-security.wep-key3' + "802-11-wireless-security.leap-password", + "802-11-wireless-security.psk", + "802-11-wireless-security.wep-key0", + "802-11-wireless-security.wep-key1", + "802-11-wireless-security.wep-key2", + "802-11-wireless-security.wep-key3", ) def __init__(self, module): self.module = module - self.state = module.params['state'] - self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] - self.autoconnect = module.params['autoconnect'] - self.autoconnect_priority = module.params['autoconnect_priority'] - self.autoconnect_retries = module.params['autoconnect_retries'] - self.conn_name = module.params['conn_name'] - self.conn_reload = module.params['conn_reload'] - self.slave_type = module.params['slave_type'] - self.master = module.params['master'] - self.ifname = module.params['ifname'] - self.type = module.params['type'] - self.ip4 = module.params['ip4'] - self.gw4 = module.params['gw4'] - self.gw4_ignore_auto = module.params['gw4_ignore_auto'] - self.routes4 = module.params['routes4'] - self.routes4_extended = module.params['routes4_extended'] - self.route_metric4 = module.params['route_metric4'] - self.routing_rules4 = module.params['routing_rules4'] - self.never_default4 = module.params['never_default4'] - self.dns4 = module.params['dns4'] - self.dns4_search = module.params['dns4_search'] - self.dns4_options = module.params['dns4_options'] - self.dns4_ignore_auto = module.params['dns4_ignore_auto'] - self.method4 = module.params['method4'] - self.may_fail4 = module.params['may_fail4'] - self.ip6 = module.params['ip6'] - self.gw6 = module.params['gw6'] - self.gw6_ignore_auto = module.params['gw6_ignore_auto'] - self.routes6 = module.params['routes6'] - self.routes6_extended = module.params['routes6_extended'] - self.route_metric6 = module.params['route_metric6'] - self.dns6 = module.params['dns6'] - self.dns6_search = module.params['dns6_search'] - self.dns6_options = module.params['dns6_options'] - self.dns6_ignore_auto = module.params['dns6_ignore_auto'] - self.method6 = module.params['method6'] - self.ip_privacy6 = module.params['ip_privacy6'] - self.addr_gen_mode6 = module.params['addr_gen_mode6'] - self.mtu = module.params['mtu'] - self.stp = module.params['stp'] - self.priority = module.params['priority'] - self.mode = module.params['mode'] - self.miimon = module.params['miimon'] - self.primary = module.params['primary'] - self.downdelay = module.params['downdelay'] - self.updelay = module.params['updelay'] - self.xmit_hash_policy = module.params['xmit_hash_policy'] - self.fail_over_mac = module.params['fail_over_mac'] - self.arp_interval = module.params['arp_interval'] - self.arp_ip_target = module.params['arp_ip_target'] - self.slavepriority = module.params['slavepriority'] - self.forwarddelay = module.params['forwarddelay'] - self.hellotime = module.params['hellotime'] - self.maxage = module.params['maxage'] - self.ageingtime = module.params['ageingtime'] - self.hairpin = module.params['hairpin'] - self.path_cost = module.params['path_cost'] - self.mac = module.params['mac'] - self.runner = module.params['runner'] - self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] - self.runner_fast_rate = module.params['runner_fast_rate'] - self.vlanid = module.params['vlanid'] - self.vlandev = module.params['vlandev'] - self.flags = module.params['flags'] - self.ingress = module.params['ingress'] - self.egress = module.params['egress'] - self.vxlan_id = module.params['vxlan_id'] - self.vxlan_local = module.params['vxlan_local'] - self.vxlan_remote = module.params['vxlan_remote'] - self.ip_tunnel_dev = module.params['ip_tunnel_dev'] - self.ip_tunnel_local = module.params['ip_tunnel_local'] - self.ip_tunnel_remote = module.params['ip_tunnel_remote'] - self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] - self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] - self.nmcli_bin = self.module.get_bin_path('nmcli', True) - self.dhcp_client_id = module.params['dhcp_client_id'] - self.zone = module.params['zone'] - self.ssid = module.params['ssid'] - self.wifi = module.params['wifi'] - self.wifi_sec = module.params['wifi_sec'] - self.gsm = module.params['gsm'] - self.macvlan = module.params['macvlan'] - self.wireguard = module.params['wireguard'] - self.vpn = module.params['vpn'] - self.transport_mode = module.params['transport_mode'] - self.infiniband_mac = module.params['infiniband_mac'] - self.sriov = module.params['sriov'] + self.state = module.params["state"] + self.ignore_unsupported_suboptions = module.params["ignore_unsupported_suboptions"] + self.autoconnect = module.params["autoconnect"] + self.autoconnect_priority = module.params["autoconnect_priority"] + self.autoconnect_retries = module.params["autoconnect_retries"] + self.conn_name = module.params["conn_name"] + self.conn_reload = module.params["conn_reload"] + self.slave_type = module.params["slave_type"] + self.master = module.params["master"] + self.ifname = module.params["ifname"] + self.type = module.params["type"] + self.ip4 = module.params["ip4"] + self.gw4 = module.params["gw4"] + self.gw4_ignore_auto = module.params["gw4_ignore_auto"] + self.routes4 = module.params["routes4"] + self.routes4_extended = module.params["routes4_extended"] + self.route_metric4 = module.params["route_metric4"] + self.routing_rules4 = module.params["routing_rules4"] + self.never_default4 = module.params["never_default4"] + self.dns4 = module.params["dns4"] + self.dns4_search = module.params["dns4_search"] + self.dns4_options = module.params["dns4_options"] + self.dns4_ignore_auto = module.params["dns4_ignore_auto"] + self.method4 = module.params["method4"] + self.may_fail4 = module.params["may_fail4"] + self.ip6 = module.params["ip6"] + self.gw6 = module.params["gw6"] + self.gw6_ignore_auto = module.params["gw6_ignore_auto"] + self.routes6 = module.params["routes6"] + self.routes6_extended = module.params["routes6_extended"] + self.route_metric6 = module.params["route_metric6"] + self.dns6 = module.params["dns6"] + self.dns6_search = module.params["dns6_search"] + self.dns6_options = module.params["dns6_options"] + self.dns6_ignore_auto = module.params["dns6_ignore_auto"] + self.method6 = module.params["method6"] + self.ip_privacy6 = module.params["ip_privacy6"] + self.addr_gen_mode6 = module.params["addr_gen_mode6"] + self.mtu = module.params["mtu"] + self.stp = module.params["stp"] + self.priority = module.params["priority"] + self.mode = module.params["mode"] + self.miimon = module.params["miimon"] + self.primary = module.params["primary"] + self.downdelay = module.params["downdelay"] + self.updelay = module.params["updelay"] + self.xmit_hash_policy = module.params["xmit_hash_policy"] + self.fail_over_mac = module.params["fail_over_mac"] + self.arp_interval = module.params["arp_interval"] + self.arp_ip_target = module.params["arp_ip_target"] + self.slavepriority = module.params["slavepriority"] + self.forwarddelay = module.params["forwarddelay"] + self.hellotime = module.params["hellotime"] + self.maxage = module.params["maxage"] + self.ageingtime = module.params["ageingtime"] + self.hairpin = module.params["hairpin"] + self.path_cost = module.params["path_cost"] + self.mac = module.params["mac"] + self.runner = module.params["runner"] + self.runner_hwaddr_policy = module.params["runner_hwaddr_policy"] + self.runner_fast_rate = module.params["runner_fast_rate"] + self.vlanid = module.params["vlanid"] + self.vlandev = module.params["vlandev"] + self.flags = module.params["flags"] + self.ingress = module.params["ingress"] + self.egress = module.params["egress"] + self.vxlan_id = module.params["vxlan_id"] + self.vxlan_local = module.params["vxlan_local"] + self.vxlan_remote = module.params["vxlan_remote"] + self.ip_tunnel_dev = module.params["ip_tunnel_dev"] + self.ip_tunnel_local = module.params["ip_tunnel_local"] + self.ip_tunnel_remote = module.params["ip_tunnel_remote"] + self.ip_tunnel_input_key = module.params["ip_tunnel_input_key"] + self.ip_tunnel_output_key = module.params["ip_tunnel_output_key"] + self.nmcli_bin = self.module.get_bin_path("nmcli", True) + self.dhcp_client_id = module.params["dhcp_client_id"] + self.zone = module.params["zone"] + self.ssid = module.params["ssid"] + self.wifi = module.params["wifi"] + self.wifi_sec = module.params["wifi_sec"] + self.gsm = module.params["gsm"] + self.macvlan = module.params["macvlan"] + self.wireguard = module.params["wireguard"] + self.vpn = module.params["vpn"] + self.transport_mode = module.params["transport_mode"] + self.infiniband_mac = module.params["infiniband_mac"] + self.sriov = module.params["sriov"] if self.method4: self.ipv4_method = self.method4 - elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4: - self.ipv4_method = 'disabled' + elif self.type in ("dummy", "macvlan", "wireguard") and not self.ip4: + self.ipv4_method = "disabled" elif self.ip4: - self.ipv4_method = 'manual' + self.ipv4_method = "manual" else: self.ipv4_method = None if self.method6: self.ipv6_method = self.method6 - elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6: - self.ipv6_method = 'disabled' + elif self.type in ("dummy", "macvlan", "wireguard") and not self.ip6: + self.ipv6_method = "disabled" elif self.ip6: - self.ipv6_method = 'manual' + self.ipv6_method = "manual" else: self.ipv6_method = None if self.type == "vrf": - self.table = module.params['table'] + self.table = module.params["table"] self.edit_commands = [] self.extra_options_validation() def extra_options_validation(self): - """ Additional validation of options set passed to module that cannot be implemented in module's argspecs. """ + """Additional validation of options set passed to module that cannot be implemented in module's argspecs.""" if self.type not in ("bridge-slave", "team-slave", "bond-slave"): if self.master is None and self.slave_type is not None: self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.") @@ -1843,55 +1843,57 @@ def execute_command(self, cmd, use_unsafe_shell=False, data=None): def execute_edit_commands(self, commands, arguments): arguments = arguments or [] - cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + cmd = [self.nmcli_bin, "con", "edit"] + arguments data = "\n".join(commands) return self.execute_command(cmd, data=data) def connection_options(self, detect_change=False): # Options common to multiple connection types. options = { - 'connection.autoconnect': self.autoconnect, - 'connection.autoconnect-priority': self.autoconnect_priority, - 'connection.autoconnect-retries': self.autoconnect_retries, - 'connection.zone': self.zone, + "connection.autoconnect": self.autoconnect, + "connection.autoconnect-priority": self.autoconnect_priority, + "connection.autoconnect-retries": self.autoconnect_retries, + "connection.zone": self.zone, } # IP address options. # The ovs-interface type can be both ip_conn_type and have a master # An interface that has a master but is of slave type vrf can have an IP address if (self.ip_conn_type and (not self.master or self.slave_type == "vrf")) or self.type == "ovs-interface": - options.update({ - 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), - 'ipv4.dhcp-client-id': self.dhcp_client_id, - 'ipv4.dns': self.dns4, - 'ipv4.dns-search': self.dns4_search, - 'ipv4.dns-options': self.dns4_options, - 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, - 'ipv4.gateway': self.gw4, - 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, - 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended), - 'ipv4.route-metric': self.route_metric4, - 'ipv4.routing-rules': self.routing_rules4, - 'ipv4.never-default': self.never_default4, - 'ipv4.method': self.ipv4_method, - 'ipv4.may-fail': self.may_fail4, - 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), - 'ipv6.dns': self.dns6, - 'ipv6.dns-search': self.dns6_search, - 'ipv6.dns-options': self.dns6_options, - 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, - 'ipv6.gateway': self.gw6, - 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, - 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended), - 'ipv6.route-metric': self.route_metric6, - 'ipv6.method': self.ipv6_method, - 'ipv6.ip6-privacy': self.ip_privacy6, - 'ipv6.addr-gen-mode': self.addr_gen_mode6 - }) + options.update( + { + "ipv4.addresses": self.enforce_ipv4_cidr_notation(self.ip4), + "ipv4.dhcp-client-id": self.dhcp_client_id, + "ipv4.dns": self.dns4, + "ipv4.dns-search": self.dns4_search, + "ipv4.dns-options": self.dns4_options, + "ipv4.ignore-auto-dns": self.dns4_ignore_auto, + "ipv4.gateway": self.gw4, + "ipv4.ignore-auto-routes": self.gw4_ignore_auto, + "ipv4.routes": self.enforce_routes_format(self.routes4, self.routes4_extended), + "ipv4.route-metric": self.route_metric4, + "ipv4.routing-rules": self.routing_rules4, + "ipv4.never-default": self.never_default4, + "ipv4.method": self.ipv4_method, + "ipv4.may-fail": self.may_fail4, + "ipv6.addresses": self.enforce_ipv6_cidr_notation(self.ip6), + "ipv6.dns": self.dns6, + "ipv6.dns-search": self.dns6_search, + "ipv6.dns-options": self.dns6_options, + "ipv6.ignore-auto-dns": self.dns6_ignore_auto, + "ipv6.gateway": self.gw6, + "ipv6.ignore-auto-routes": self.gw6_ignore_auto, + "ipv6.routes": self.enforce_routes_format(self.routes6, self.routes6_extended), + "ipv6.route-metric": self.route_metric6, + "ipv6.method": self.ipv6_method, + "ipv6.ip6-privacy": self.ip_privacy6, + "ipv6.addr-gen-mode": self.addr_gen_mode6, + } + ) # when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes' # force ignoring to save idempotency - if self.ipv4_method and self.ipv4_method != 'disabled': - options.update({'ipv4.may-fail': self.may_fail4}) + if self.ipv4_method and self.ipv4_method != "disabled": + options.update({"ipv4.may-fail": self.may_fail4}) # Layer 2 options. if self.mac: @@ -1902,184 +1904,241 @@ def connection_options(self, detect_change=False): # Connections that can have a master. if self.slave_conn_type: - options.update({ - 'connection.master': self.master, - 'connection.slave-type': self.slave_type, - }) + options.update( + { + "connection.master": self.master, + "connection.slave-type": self.slave_type, + } + ) # Options specific to a connection type. - if self.type == 'bond': - options.update({ - 'arp-interval': self.arp_interval, - 'arp-ip-target': self.arp_ip_target, - 'downdelay': self.downdelay, - 'miimon': self.miimon, - 'mode': self.mode, - 'primary': self.primary, - 'updelay': self.updelay, - 'xmit_hash_policy': self.xmit_hash_policy, - 'fail_over_mac': self.fail_over_mac, - }) - elif self.type == 'bond-slave': - if self.slave_type and self.slave_type != 'bond': - self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " - f"Allowed slave-type for '{self.type}' is 'bond'.")) + if self.type == "bond": + options.update( + { + "arp-interval": self.arp_interval, + "arp-ip-target": self.arp_ip_target, + "downdelay": self.downdelay, + "miimon": self.miimon, + "mode": self.mode, + "primary": self.primary, + "updelay": self.updelay, + "xmit_hash_policy": self.xmit_hash_policy, + "fail_over_mac": self.fail_over_mac, + } + ) + elif self.type == "bond-slave": + if self.slave_type and self.slave_type != "bond": + self.module.fail_json( + msg=( + f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " + f"Allowed slave-type for '{self.type}' is 'bond'." + ) + ) if not self.slave_type: - self.module.warn("Connection 'slave-type' property automatically set to 'bond' " - "because of using 'bond-slave' connection type.") - options.update({ - 'connection.slave-type': 'bond', - }) - elif self.type == 'bridge': - options.update({ - 'bridge.ageing-time': self.ageingtime, - 'bridge.forward-delay': self.forwarddelay, - 'bridge.hello-time': self.hellotime, - 'bridge.max-age': self.maxage, - 'bridge.priority': self.priority, - 'bridge.stp': self.stp, - }) + self.module.warn( + "Connection 'slave-type' property automatically set to 'bond' " + "because of using 'bond-slave' connection type." + ) + options.update( + { + "connection.slave-type": "bond", + } + ) + elif self.type == "bridge": + options.update( + { + "bridge.ageing-time": self.ageingtime, + "bridge.forward-delay": self.forwarddelay, + "bridge.hello-time": self.hellotime, + "bridge.max-age": self.maxage, + "bridge.priority": self.priority, + "bridge.stp": self.stp, + } + ) # priority make sense when stp enabled, otherwise nmcli keeps bridge-priority to 32768 regrdless of input. # force ignoring to save idempotency if self.stp: - options.update({'bridge.priority': self.priority}) - elif self.type == 'team': - options.update({ - 'team.runner': self.runner, - 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, - }) + options.update({"bridge.priority": self.priority}) + elif self.type == "team": + options.update( + { + "team.runner": self.runner, + "team.runner-hwaddr-policy": self.runner_hwaddr_policy, + } + ) if self.runner_fast_rate is not None: - options.update({ - 'team.runner-fast-rate': self.runner_fast_rate, - }) - elif self.type == 'bridge-slave': - if self.slave_type and self.slave_type != 'bridge': - self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " - f"Allowed slave-type for '{self.type}' is 'bridge'.")) + options.update( + { + "team.runner-fast-rate": self.runner_fast_rate, + } + ) + elif self.type == "bridge-slave": + if self.slave_type and self.slave_type != "bridge": + self.module.fail_json( + msg=( + f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " + f"Allowed slave-type for '{self.type}' is 'bridge'." + ) + ) if not self.slave_type: - self.module.warn("Connection 'slave-type' property automatically set to 'bridge' " - "because of using 'bridge-slave' connection type.") - options.update({'connection.slave-type': 'bridge'}) + self.module.warn( + "Connection 'slave-type' property automatically set to 'bridge' " + "because of using 'bridge-slave' connection type." + ) + options.update({"connection.slave-type": "bridge"}) self.module.warn( "Connection type as 'bridge-slave' implies 'ethernet' connection with 'bridge' slave-type. " "Consider using slave_type='bridge' with necessary type." ) - options.update({ - 'bridge-port.path-cost': self.path_cost, - 'bridge-port.hairpin-mode': self.hairpin, - 'bridge-port.priority': self.slavepriority, - }) - elif self.type == 'team-slave': - if self.slave_type and self.slave_type != 'team': - self.module.fail_json(msg=(f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " - f"Allowed slave-type for '{self.type}' is 'team'.")) + options.update( + { + "bridge-port.path-cost": self.path_cost, + "bridge-port.hairpin-mode": self.hairpin, + "bridge-port.priority": self.slavepriority, + } + ) + elif self.type == "team-slave": + if self.slave_type and self.slave_type != "team": + self.module.fail_json( + msg=( + f"Connection type '{self.type}' cannot be combined with '{self.slave_type}' slave-type. " + f"Allowed slave-type for '{self.type}' is 'team'." + ) + ) if not self.slave_type: - self.module.warn("Connection 'slave-type' property automatically set to 'team' " - "because of using 'team-slave' connection type.") - options.update({ - 'connection.slave-type': 'team', - }) + self.module.warn( + "Connection 'slave-type' property automatically set to 'team' " + "because of using 'team-slave' connection type." + ) + options.update( + { + "connection.slave-type": "team", + } + ) elif self.tunnel_conn_type: - options.update({ - 'ip-tunnel.local': self.ip_tunnel_local, - 'ip-tunnel.mode': self.type, - 'ip-tunnel.parent': self.ip_tunnel_dev, - 'ip-tunnel.remote': self.ip_tunnel_remote, - }) - if self.type == 'gre': - options.update({ - 'ip-tunnel.input-key': self.ip_tunnel_input_key, - 'ip-tunnel.output-key': self.ip_tunnel_output_key - }) - elif self.type == 'vlan': - options.update({ - 'vlan.id': self.vlanid, - 'vlan.parent': self.vlandev, - 'vlan.flags': self.flags, - 'vlan.ingress': self.ingress, - 'vlan.egress': self.egress, - }) - elif self.type == 'vxlan': - options.update({ - 'vxlan.id': self.vxlan_id, - 'vxlan.local': self.vxlan_local, - 'vxlan.remote': self.vxlan_remote, - }) - elif self.type == 'wifi': - options.update({ - '802-11-wireless.ssid': self.ssid, - 'connection.slave-type': ('bond' if self.slave_type is None else self.slave_type) if self.master else None, - }) + options.update( + { + "ip-tunnel.local": self.ip_tunnel_local, + "ip-tunnel.mode": self.type, + "ip-tunnel.parent": self.ip_tunnel_dev, + "ip-tunnel.remote": self.ip_tunnel_remote, + } + ) + if self.type == "gre": + options.update( + {"ip-tunnel.input-key": self.ip_tunnel_input_key, "ip-tunnel.output-key": self.ip_tunnel_output_key} + ) + elif self.type == "vlan": + options.update( + { + "vlan.id": self.vlanid, + "vlan.parent": self.vlandev, + "vlan.flags": self.flags, + "vlan.ingress": self.ingress, + "vlan.egress": self.egress, + } + ) + elif self.type == "vxlan": + options.update( + { + "vxlan.id": self.vxlan_id, + "vxlan.local": self.vxlan_local, + "vxlan.remote": self.vxlan_remote, + } + ) + elif self.type == "wifi": + options.update( + { + "802-11-wireless.ssid": self.ssid, + "connection.slave-type": ("bond" if self.slave_type is None else self.slave_type) + if self.master + else None, + } + ) if self.wifi: for name, value in self.wifi.items(): - options.update({ - f'802-11-wireless.{name}': value - }) + options.update({f"802-11-wireless.{name}": value}) if self.wifi_sec: for name, value in self.wifi_sec.items(): - options.update({ - f'802-11-wireless-security.{name}': value - }) - elif self.type == 'gsm': + options.update({f"802-11-wireless-security.{name}": value}) + elif self.type == "gsm": if self.gsm: for name, value in self.gsm.items(): - options.update({ - f'gsm.{name}': value, - }) - elif self.type == 'macvlan': + options.update( + { + f"gsm.{name}": value, + } + ) + elif self.type == "macvlan": if self.macvlan: for name, value in self.macvlan.items(): - options.update({ - f'macvlan.{name}': value, - }) - elif self.state == 'present': - raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan') - elif self.type == 'wireguard': + options.update( + { + f"macvlan.{name}": value, + } + ) + elif self.state == "present": + raise NmcliModuleError("type is macvlan but all of the following are missing: macvlan") + elif self.type == "wireguard": if self.wireguard: for name, value in self.wireguard.items(): - options.update({ - f'wireguard.{name}': value, - }) - elif self.type == 'vpn': + options.update( + { + f"wireguard.{name}": value, + } + ) + elif self.type == "vpn": if self.vpn: - vpn_data_values = '' + vpn_data_values = "" for name, value in self.vpn.items(): - if name == 'service-type': - options.update({ - 'vpn.service-type': value, - }) - elif name == 'permissions': - options.update({ - 'connection.permissions': value, - }) + if name == "service-type": + options.update( + { + "vpn.service-type": value, + } + ) + elif name == "permissions": + options.update( + { + "connection.permissions": value, + } + ) else: - if vpn_data_values != '': - vpn_data_values += ', ' + if vpn_data_values != "": + vpn_data_values += ", " if isinstance(value, bool): value = self.bool_to_string(value) - vpn_data_values += f'{name}={value}' - options.update({ - 'vpn.data': vpn_data_values, - }) - elif self.type == 'infiniband': - options.update({ - 'infiniband.transport-mode': self.transport_mode, - }) + vpn_data_values += f"{name}={value}" + options.update( + { + "vpn.data": vpn_data_values, + } + ) + elif self.type == "infiniband": + options.update( + { + "infiniband.transport-mode": self.transport_mode, + } + ) if self.infiniband_mac: - options['infiniband.mac-address'] = self.infiniband_mac - elif self.type == 'vrf': - options.update({ - 'table': self.table, - }) + options["infiniband.mac-address"] = self.infiniband_mac + elif self.type == "vrf": + options.update( + { + "table": self.table, + } + ) - if self.type == 'ethernet': + if self.type == "ethernet": if self.sriov: for name, value in self.sriov.items(): - options.update({ - f'sriov.{name}': value, - }) + options.update( + { + f"sriov.{name}": value, + } + ) # Convert settings values based on the situation. for setting, value in options.items(): @@ -2089,13 +2148,13 @@ def connection_options(self, detect_change=False): # Convert all bool options to yes/no. convert_func = self.bool_to_string if detect_change: - if setting in ('vlan.id', 'vxlan.id'): + if setting in ("vlan.id", "vxlan.id"): # Convert VLAN/VXLAN IDs to text when detecting changes. convert_func = to_text elif setting == self.mtu_setting: # MTU is 'auto' by default when detecting changes. convert_func = self.mtu_to_string - elif setting == 'ipv6.ip6-privacy': + elif setting == "ipv6.ip6-privacy": convert_func = self.ip6_privacy_to_num elif setting_type is list: # Convert lists to strings for nmcli create/modify commands. @@ -2109,116 +2168,116 @@ def connection_options(self, detect_change=False): @property def ip_conn_type(self): return self.type in ( - 'bond', - 'bridge', - 'dummy', - 'ethernet', - '802-3-ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'vlan', - 'wifi', - '802-11-wireless', - 'gsm', - 'macvlan', - 'wireguard', - 'vpn', - 'loopback', - 'ovs-interface', - 'vrf' + "bond", + "bridge", + "dummy", + "ethernet", + "802-3-ethernet", + "generic", + "gre", + "infiniband", + "ipip", + "sit", + "team", + "vlan", + "wifi", + "802-11-wireless", + "gsm", + "macvlan", + "wireguard", + "vpn", + "loopback", + "ovs-interface", + "vrf", ) @property def mac_setting(self): - if self.type == 'bridge': - return 'bridge.mac-address' + if self.type == "bridge": + return "bridge.mac-address" else: - return '802-3-ethernet.cloned-mac-address' + return "802-3-ethernet.cloned-mac-address" @property def mtu_conn_type(self): return self.type in ( - 'bond', - 'bond-slave', - 'dummy', - 'ethernet', - 'infiniband', - 'team-slave', - 'vlan', + "bond", + "bond-slave", + "dummy", + "ethernet", + "infiniband", + "team-slave", + "vlan", ) @property def mtu_setting(self): - if self.type == 'infiniband': - return 'infiniband.mtu' + if self.type == "infiniband": + return "infiniband.mtu" else: - return '802-3-ethernet.mtu' + return "802-3-ethernet.mtu" @staticmethod def mtu_to_string(mtu): if not mtu: - return 'auto' + return "auto" else: return to_text(mtu) @staticmethod def ip6_privacy_to_num(privacy): ip6_privacy_values = { - 'disabled': '0', - 'prefer-public-addr': '1 (enabled, prefer public IP)', - 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', - 'unknown': '-1', + "disabled": "0", + "prefer-public-addr": "1 (enabled, prefer public IP)", + "prefer-temp-addr": "2 (enabled, prefer temporary IP)", + "unknown": "-1", } if privacy is None: return None if privacy not in ip6_privacy_values: - raise AssertionError(f'{privacy} is invalid ip_privacy6 option') + raise AssertionError(f"{privacy} is invalid ip_privacy6 option") return ip6_privacy_values[privacy] @property def slave_conn_type(self): return self.type in ( - 'ethernet', - 'bridge', - 'bond', - 'vlan', - 'team', - 'wifi', - 'bond-slave', - 'bridge-slave', - 'team-slave', - 'wifi', - 'infiniband', - 'ovs-port', - 'ovs-interface', + "ethernet", + "bridge", + "bond", + "vlan", + "team", + "wifi", + "bond-slave", + "bridge-slave", + "team-slave", + "wifi", + "infiniband", + "ovs-port", + "ovs-interface", ) @property def tunnel_conn_type(self): return self.type in ( - 'gre', - 'ipip', - 'sit', + "gre", + "ipip", + "sit", ) @staticmethod def enforce_ipv4_cidr_notation(ip4_addresses): if ip4_addresses is None: return None - return [address if '/' in address else f"{address}/32" for address in ip4_addresses] + return [address if "/" in address else f"{address}/32" for address in ip4_addresses] @staticmethod def enforce_ipv6_cidr_notation(ip6_addresses): if ip6_addresses is None: return None - return [address if '/' in address else f"{address}/128" for address in ip6_addresses] + return [address if "/" in address else f"{address}/128" for address in ip6_addresses] def enforce_routes_format(self, routes, routes_extended): if routes is not None: @@ -2230,16 +2289,16 @@ def enforce_routes_format(self, routes, routes_extended): @staticmethod def route_to_string(route): - result_str = '' - result_str += route['ip'] - if route.get('next_hop') is not None: + result_str = "" + result_str += route["ip"] + if route.get("next_hop") is not None: result_str += f" {route['next_hop']}" - if route.get('metric') is not None: + if route.get("metric") is not None: result_str += f" {route['metric']!s}" for attribute, value in sorted(route.items()): - if attribute not in ('ip', 'next_hop', 'metric') and value is not None: - result_str += f' {attribute}={str(value).lower()}' + if attribute not in ("ip", "next_hop", "metric") and value is not None: + result_str += f" {attribute}={str(value).lower()}" return result_str @@ -2259,39 +2318,42 @@ def list_to_string(lst): @staticmethod def settings_type(setting): - if setting in {'bridge.stp', - 'bridge-port.hairpin-mode', - 'connection.autoconnect', - 'ipv4.never-default', - 'ipv4.ignore-auto-dns', - 'ipv4.ignore-auto-routes', - 'ipv4.may-fail', - 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes', - '802-11-wireless.hidden', - 'team.runner-fast-rate'}: + if setting in { + "bridge.stp", + "bridge-port.hairpin-mode", + "connection.autoconnect", + "ipv4.never-default", + "ipv4.ignore-auto-dns", + "ipv4.ignore-auto-routes", + "ipv4.may-fail", + "ipv6.ignore-auto-dns", + "ipv6.ignore-auto-routes", + "802-11-wireless.hidden", + "team.runner-fast-rate", + }: return bool - elif setting in {'ipv4.addresses', - 'ipv6.addresses', - 'ipv4.dns', - 'ipv4.dns-search', - 'ipv4.dns-options', - 'ipv4.routes', - 'ipv4.routing-rules', - 'ipv6.dns', - 'ipv6.dns-search', - 'ipv6.dns-options', - 'ipv6.routes', - '802-11-wireless-security.group', - '802-11-wireless-security.leap-password-flags', - '802-11-wireless-security.pairwise', - '802-11-wireless-security.proto', - '802-11-wireless-security.psk-flags', - '802-11-wireless-security.wep-key-flags', - '802-11-wireless.mac-address-blacklist'}: + elif setting in { + "ipv4.addresses", + "ipv6.addresses", + "ipv4.dns", + "ipv4.dns-search", + "ipv4.dns-options", + "ipv4.routes", + "ipv4.routing-rules", + "ipv6.dns", + "ipv6.dns-search", + "ipv6.dns-options", + "ipv6.routes", + "802-11-wireless-security.group", + "802-11-wireless-security.leap-password-flags", + "802-11-wireless-security.pairwise", + "802-11-wireless-security.proto", + "802-11-wireless-security.psk-flags", + "802-11-wireless-security.wep-key-flags", + "802-11-wireless.mac-address-blacklist", + }: return list - elif setting in {'connection.autoconnect-priority', - 'connection.autoconnect-retries'}: + elif setting in {"connection.autoconnect-priority", "connection.autoconnect-retries"}: return int return str @@ -2299,18 +2361,18 @@ def get_route_params(self, raw_values): routes_params = [] for raw_value in raw_values: route_params = {} - for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value): - if parameter == 'nh': - route_params['next_hop'] = value - elif parameter == 'mt': - route_params['metric'] = value + for parameter, value in re.findall(r"([\w-]*)\s?=\s?([^\s,}]*)", raw_value): + if parameter == "nh": + route_params["next_hop"] = value + elif parameter == "mt": + route_params["metric"] = value else: route_params[parameter] = value routes_params.append(route_params) return [self.route_to_string(route_params) for route_params in routes_params] def list_connection_info(self): - cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] + cmd = [self.nmcli_bin, "--fields", "name", "--terse", "con", "show"] (rc, out, err) = self.execute_command(cmd) if rc != 0: raise NmcliModuleError(err) @@ -2320,44 +2382,44 @@ def connection_exists(self): return self.conn_name in self.list_connection_info() def down_connection(self): - cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] + cmd = [self.nmcli_bin, "con", "down", self.conn_name] return self.execute_command(cmd) def up_connection(self): - cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] + cmd = [self.nmcli_bin, "con", "up", self.conn_name] return self.execute_command(cmd) def reload_connection(self): - cmd = [self.nmcli_bin, 'con', 'reload'] + cmd = [self.nmcli_bin, "con", "reload"] return self.execute_command(cmd) def connection_update(self, nmcli_command): - if nmcli_command == 'create': - cmd = [self.nmcli_bin, 'con', 'add', 'type'] + if nmcli_command == "create": + cmd = [self.nmcli_bin, "con", "add", "type"] if self.tunnel_conn_type: - cmd.append('ip-tunnel') + cmd.append("ip-tunnel") else: cmd.append(self.type) - cmd.append('con-name') - elif nmcli_command == 'modify': - cmd = [self.nmcli_bin, 'con', 'modify'] + cmd.append("con-name") + elif nmcli_command == "modify": + cmd = [self.nmcli_bin, "con", "modify"] else: self.module.fail_json(msg="Invalid nmcli command.") cmd.append(self.conn_name) # Use connection name as default for interface name on creation. - if nmcli_command == 'create' and self.ifname is None: + if nmcli_command == "create" and self.ifname is None: ifname = self.conn_name else: ifname = self.ifname options = { - 'connection.interface-name': ifname, + "connection.interface-name": ifname, } # VPN doesn't need an interface but if sended it must be a valid interface. - if self.type == 'vpn' and self.ifname is None: - del options['connection.interface-name'] + if self.type == "vpn" and self.ifname is None: + del options["connection.interface-name"] options.update(self.connection_options()) @@ -2365,20 +2427,20 @@ def connection_update(self, nmcli_command): for key, value in options.items(): if value is not None: if key in self.SECRET_OPTIONS: - self.edit_commands += [f'set {key} {value}'] + self.edit_commands += [f"set {key} {value}"] continue - if key == 'xmit_hash_policy': - cmd.extend(['+bond.options', f'xmit_hash_policy={value}']) + if key == "xmit_hash_policy": + cmd.extend(["+bond.options", f"xmit_hash_policy={value}"]) continue - if key == 'fail_over_mac': - cmd.extend(['+bond.options', f'fail_over_mac={value}']) + if key == "fail_over_mac": + cmd.extend(["+bond.options", f"fail_over_mac={value}"]) continue cmd.extend([key, value]) return self.execute_command(cmd) def create_connection(self): - status = self.connection_update('create') + status = self.connection_update("create") if status[0] == 0 and self.edit_commands: status = self.edit_connection() if self.create_connection_up: @@ -2387,64 +2449,64 @@ def create_connection(self): @property def create_connection_up(self): - if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): + if self.type in ("bond", "dummy", "ethernet", "infiniband", "wifi"): if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): return True - elif self.type == 'team': + elif self.type == "team": if (self.dns4 is not None) or (self.dns6 is not None): return True return False def remove_connection(self): # self.down_connection() - cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] + cmd = [self.nmcli_bin, "con", "del", self.conn_name] return self.execute_command(cmd) def modify_connection(self): - status = self.connection_update('modify') + status = self.connection_update("modify") if status[0] == 0 and self.edit_commands: status = self.edit_connection() return status def edit_connection(self): - commands = self.edit_commands + ['save', 'quit'] + commands = self.edit_commands + ["save", "quit"] return self.execute_edit_commands(commands, arguments=[self.conn_name]) def show_connection(self): - cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] + cmd = [self.nmcli_bin, "--show-secrets", "con", "show", self.conn_name] (rc, out, err) = self.execute_command(cmd) if rc != 0: raise NmcliModuleError(err) - p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') + p_enum_value = re.compile(r"^([-]?\d+) \((\w+)\)$") conn_info = dict() for line in out.splitlines(): - pair = line.split(':', 1) + pair = line.split(":", 1) key = pair[0].strip() key_type = self.settings_type(key) if key and len(pair) > 1: raw_value = pair[1].lstrip() - if raw_value == '--': + if raw_value == "--": if key_type == list: conn_info[key] = [] else: conn_info[key] = None - elif key == 'bond.options': + elif key == "bond.options": # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. - opts = raw_value.split(',') + opts = raw_value.split(",") for opt in opts: - alias_pair = opt.split('=', 1) + alias_pair = opt.split("=", 1) if len(alias_pair) > 1: alias_key = alias_pair[0] alias_value = alias_pair[1] conn_info[alias_key] = alias_value - elif key in ('ipv4.routes', 'ipv6.routes'): - conn_info[key] = [s.strip() for s in raw_value.split(';')] + elif key in ("ipv4.routes", "ipv6.routes"): + conn_info[key] = [s.strip() for s in raw_value.split(";")] elif key_type == list: - conn_info[key] = [s.strip() for s in raw_value.split(',')] + conn_info[key] = [s.strip() for s in raw_value.split(",")] else: m_enum = p_enum_value.match(raw_value) if m_enum is not None: @@ -2458,34 +2520,34 @@ def show_connection(self): def get_supported_properties(self, setting): properties = [] - if setting == '802-11-wireless-security': - set_property = 'psk' - set_value = 'FAKEVALUE' - commands = [f'set {setting}.{set_property} {set_value}'] + if setting == "802-11-wireless-security": + set_property = "psk" + set_value = "FAKEVALUE" + commands = [f"set {setting}.{set_property} {set_value}"] else: commands = [] - commands += [f'print {setting}', 'quit', 'yes'] + commands += [f"print {setting}", "quit", "yes"] - (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + (rc, out, err) = self.execute_edit_commands(commands, arguments=["type", self.type]) if rc != 0: raise NmcliModuleError(err) for line in out.splitlines(): - prefix = f'{setting}.' + prefix = f"{setting}." if line.startswith(prefix): - pair = line.split(':', 1) - property = pair[0].strip().replace(prefix, '') + pair = line.split(":", 1) + property = pair[0].strip().replace(prefix, "") properties.append(property) return properties def check_for_unsupported_properties(self, setting): - if setting == '802-11-wireless': - setting_key = 'wifi' - elif setting == '802-11-wireless-security': - setting_key = 'wifi_sec' + if setting == "802-11-wireless": + setting_key = "wifi" + elif setting == "802-11-wireless-security": + setting_key = "wifi_sec" else: setting_key = setting @@ -2499,7 +2561,7 @@ def check_for_unsupported_properties(self, setting): if unsupported_properties: msg_options = [] for property in unsupported_properties: - msg_options.append(f'{setting_key}.{property}') + msg_options.append(f"{setting_key}.{property}") msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) if self.ignore_unsupported_suboptions: @@ -2523,11 +2585,11 @@ def _compare_conn_params(self, conn_info, options): if key in conn_info: current_value = conn_info[key] - if key == '802-11-wireless.wake-on-wlan' and current_value is not None: - match = re.match('0x([0-9A-Fa-f]+)', current_value) + if key == "802-11-wireless.wake-on-wlan" and current_value is not None: + match = re.match("0x([0-9A-Fa-f]+)", current_value) if match: current_value = str(int(match.group(1), 16)) - if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: + if key in ("ipv4.routes", "ipv6.routes") and current_value is not None: current_value = self.get_route_params(current_value) if key == self.mac_setting: # MAC addresses are case insensitive, nmcli always reports them in uppercase @@ -2535,24 +2597,33 @@ def _compare_conn_params(self, conn_info, options): # ensure current_value is also converted to uppercase in case nmcli changes behaviour if current_value: current_value = current_value.upper() - if key == 'gsm.apn': + if key == "gsm.apn": # Depending on version nmcli adds double-qoutes to gsm.apn # Need to strip them in order to compare both if current_value: current_value = current_value.strip('"') if key == self.mtu_setting and self.mtu is None: self.mtu = 0 - if key == 'vpn.data': + if key == "vpn.data": if current_value: - current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(',')) - value = sorted(part.strip() for part in value.split(',')) + current_value = sorted( + re.sub(r"\s*=\s*", "=", part.strip(), count=1) for part in current_value.split(",") + ) + value = sorted(part.strip() for part in value.split(",")) else: # parameter does not exist current_value = None if isinstance(current_value, list) and isinstance(value, list): # compare values between two lists - if key in ('ipv4.addresses', 'ipv6.addresses', 'ipv4.dns', 'ipv6.dns', 'ipv4.dns-search', 'ipv6.dns-search'): + if key in ( + "ipv4.addresses", + "ipv6.addresses", + "ipv4.dns", + "ipv6.dns", + "ipv4.dns-search", + "ipv6.dns-search", + ): # The order of IP addresses matters because the first one # is the default source address for outbound connections. # Similarly, the order of DNS nameservers and search @@ -2560,7 +2631,15 @@ def _compare_conn_params(self, conn_info, options): changed |= current_value != value else: changed |= sorted(current_value) != sorted(value) - elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): + elif all( + [ + key == self.mtu_setting, + self.type == "dummy", + current_value is None, + value == "auto", + self.mtu is None, + ] + ): value = None else: value = to_text(value) @@ -2571,25 +2650,25 @@ def _compare_conn_params(self, conn_info, options): diff_after[key] = value diff = { - 'before': diff_before, - 'after': diff_after, + "before": diff_before, + "after": diff_after, } return (changed, diff) def is_connection_changed(self): options = { - 'connection.interface-name': self.ifname, + "connection.interface-name": self.ifname, } # VPN doesn't need an interface but if sended it must be a valid interface. - if self.type == 'vpn' and self.ifname is None: - del options['connection.interface-name'] + if self.type == "vpn" and self.ifname is None: + del options["connection.interface-name"] if not self.type: - current_con_type = self.show_connection().get('connection.type') + current_con_type = self.show_connection().get("connection.type") if current_con_type: - if current_con_type == '802-11-wireless': - current_con_type = 'wifi' + if current_con_type == "802-11-wireless": + current_con_type = "wifi" self.type = current_con_type options.update(self.connection_options(detect_change=True)) @@ -2600,164 +2679,189 @@ def main(): # Parsing argument file module = AnsibleModule( argument_spec=dict( - ignore_unsupported_suboptions=dict(type='bool', default=False), - autoconnect=dict(type='bool', default=True), - autoconnect_priority=dict(type='int'), - autoconnect_retries=dict(type='int'), - state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), - conn_name=dict(type='str', required=True), - conn_reload=dict(type='bool', default=False), - master=dict(type='str'), - slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port', 'vrf']), - ifname=dict(type='str'), - type=dict(type='str', - choices=[ - 'bond', - 'bond-slave', - 'bridge', - 'bridge-slave', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'team-slave', - 'vlan', - 'vxlan', - 'wifi', - 'gsm', - 'macvlan', - 'wireguard', - 'vpn', - 'loopback', - 'ovs-interface', - 'ovs-bridge', - 'ovs-port', - 'vrf', - ]), - ip4=dict(type='list', elements='str'), - gw4=dict(type='str'), - gw4_ignore_auto=dict(type='bool', default=False), - routes4=dict(type='list', elements='str'), - routes4_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - tos=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric4=dict(type='int'), - routing_rules4=dict(type='list', elements='str'), - never_default4=dict(type='bool', default=False), - dns4=dict(type='list', elements='str'), - dns4_search=dict(type='list', elements='str'), - dns4_options=dict(type='list', elements='str'), - dns4_ignore_auto=dict(type='bool', default=False), - method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), - may_fail4=dict(type='bool', default=True), - dhcp_client_id=dict(type='str'), - ip6=dict(type='list', elements='str'), - gw6=dict(type='str'), - gw6_ignore_auto=dict(type='bool', default=False), - dns6=dict(type='list', elements='str'), - dns6_search=dict(type='list', elements='str'), - dns6_options=dict(type='list', elements='str'), - dns6_ignore_auto=dict(type='bool', default=False), - routes6=dict(type='list', elements='str'), - routes6_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric6=dict(type='int'), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), - ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), - addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']), + ignore_unsupported_suboptions=dict(type="bool", default=False), + autoconnect=dict(type="bool", default=True), + autoconnect_priority=dict(type="int"), + autoconnect_retries=dict(type="int"), + state=dict(type="str", required=True, choices=["absent", "present", "up", "down"]), + conn_name=dict(type="str", required=True), + conn_reload=dict(type="bool", default=False), + master=dict(type="str"), + slave_type=dict(type="str", choices=["bond", "bridge", "team", "ovs-port", "vrf"]), + ifname=dict(type="str"), + type=dict( + type="str", + choices=[ + "bond", + "bond-slave", + "bridge", + "bridge-slave", + "dummy", + "ethernet", + "generic", + "gre", + "infiniband", + "ipip", + "sit", + "team", + "team-slave", + "vlan", + "vxlan", + "wifi", + "gsm", + "macvlan", + "wireguard", + "vpn", + "loopback", + "ovs-interface", + "ovs-bridge", + "ovs-port", + "vrf", + ], + ), + ip4=dict(type="list", elements="str"), + gw4=dict(type="str"), + gw4_ignore_auto=dict(type="bool", default=False), + routes4=dict(type="list", elements="str"), + routes4_extended=dict( + type="list", + elements="dict", + options=dict( + ip=dict(type="str", required=True), + next_hop=dict(type="str"), + metric=dict(type="int"), + table=dict(type="int"), + tos=dict(type="int"), + cwnd=dict(type="int"), + mtu=dict(type="int"), + onlink=dict(type="bool"), + ), + ), + route_metric4=dict(type="int"), + routing_rules4=dict(type="list", elements="str"), + never_default4=dict(type="bool", default=False), + dns4=dict(type="list", elements="str"), + dns4_search=dict(type="list", elements="str"), + dns4_options=dict(type="list", elements="str"), + dns4_ignore_auto=dict(type="bool", default=False), + method4=dict(type="str", choices=["auto", "link-local", "manual", "shared", "disabled"]), + may_fail4=dict(type="bool", default=True), + dhcp_client_id=dict(type="str"), + ip6=dict(type="list", elements="str"), + gw6=dict(type="str"), + gw6_ignore_auto=dict(type="bool", default=False), + dns6=dict(type="list", elements="str"), + dns6_search=dict(type="list", elements="str"), + dns6_options=dict(type="list", elements="str"), + dns6_ignore_auto=dict(type="bool", default=False), + routes6=dict(type="list", elements="str"), + routes6_extended=dict( + type="list", + elements="dict", + options=dict( + ip=dict(type="str", required=True), + next_hop=dict(type="str"), + metric=dict(type="int"), + table=dict(type="int"), + cwnd=dict(type="int"), + mtu=dict(type="int"), + onlink=dict(type="bool"), + ), + ), + route_metric6=dict(type="int"), + method6=dict(type="str", choices=["ignore", "auto", "dhcp", "link-local", "manual", "shared", "disabled"]), + ip_privacy6=dict(type="str", choices=["disabled", "prefer-public-addr", "prefer-temp-addr", "unknown"]), + addr_gen_mode6=dict(type="str", choices=["default", "default-or-eui64", "eui64", "stable-privacy"]), # Bond Specific vars - mode=dict(type='str', default='balance-rr', - choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), - miimon=dict(type='int'), - downdelay=dict(type='int'), - updelay=dict(type='int'), - xmit_hash_policy=dict(type='str'), - fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), - arp_interval=dict(type='int'), - arp_ip_target=dict(type='str'), - primary=dict(type='str'), + mode=dict( + type="str", + default="balance-rr", + choices=[ + "802.3ad", + "active-backup", + "balance-alb", + "balance-rr", + "balance-tlb", + "balance-xor", + "broadcast", + ], + ), + miimon=dict(type="int"), + downdelay=dict(type="int"), + updelay=dict(type="int"), + xmit_hash_policy=dict(type="str"), + fail_over_mac=dict(type="str", choices=["none", "active", "follow"]), + arp_interval=dict(type="int"), + arp_ip_target=dict(type="str"), + primary=dict(type="str"), # general usage - mtu=dict(type='int'), - mac=dict(type='str'), - zone=dict(type='str'), + mtu=dict(type="int"), + mac=dict(type="str"), + zone=dict(type="str"), # bridge specific vars - stp=dict(type='bool', default=True), - priority=dict(type='int', default=128), - slavepriority=dict(type='int', default=32), - forwarddelay=dict(type='int', default=15), - hellotime=dict(type='int', default=2), - maxage=dict(type='int', default=20), - ageingtime=dict(type='int', default=300), - hairpin=dict(type='bool', default=False), - path_cost=dict(type='int', default=100), + stp=dict(type="bool", default=True), + priority=dict(type="int", default=128), + slavepriority=dict(type="int", default=32), + forwarddelay=dict(type="int", default=15), + hellotime=dict(type="int", default=2), + maxage=dict(type="int", default=20), + ageingtime=dict(type="int", default=300), + hairpin=dict(type="bool", default=False), + path_cost=dict(type="int", default=100), # team specific vars - runner=dict(type='str', default='roundrobin', - choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + runner=dict( + type="str", + default="roundrobin", + choices=["broadcast", "roundrobin", "activebackup", "loadbalance", "lacp"], + ), # team active-backup runner specific options - runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), + runner_hwaddr_policy=dict(type="str", choices=["same_all", "by_active", "only_active"]), # team lacp runner specific options - runner_fast_rate=dict(type='bool'), + runner_fast_rate=dict(type="bool"), # vlan specific vars - vlanid=dict(type='int'), - vlandev=dict(type='str'), - flags=dict(type='str'), - ingress=dict(type='str'), - egress=dict(type='str'), + vlanid=dict(type="int"), + vlandev=dict(type="str"), + flags=dict(type="str"), + ingress=dict(type="str"), + egress=dict(type="str"), # vxlan specific vars - vxlan_id=dict(type='int'), - vxlan_local=dict(type='str'), - vxlan_remote=dict(type='str'), + vxlan_id=dict(type="int"), + vxlan_local=dict(type="str"), + vxlan_remote=dict(type="str"), # ip-tunnel specific vars - ip_tunnel_dev=dict(type='str'), - ip_tunnel_local=dict(type='str'), - ip_tunnel_remote=dict(type='str'), + ip_tunnel_dev=dict(type="str"), + ip_tunnel_local=dict(type="str"), + ip_tunnel_remote=dict(type="str"), # ip-tunnel type gre specific vars - ip_tunnel_input_key=dict(type='str', no_log=True), - ip_tunnel_output_key=dict(type='str', no_log=True), + ip_tunnel_input_key=dict(type="str", no_log=True), + ip_tunnel_output_key=dict(type="str", no_log=True), # 802-11-wireless* specific vars - ssid=dict(type='str'), - wifi=dict(type='dict'), - wifi_sec=dict(type='dict', no_log=True), - gsm=dict(type='dict'), - macvlan=dict(type='dict', options=dict( - mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True), - parent=dict(type='str', required=True), - promiscuous=dict(type='bool'), - tap=dict(type='bool'))), - wireguard=dict(type='dict'), - vpn=dict(type='dict'), - sriov=dict(type='dict'), - table=dict(type='int'), + ssid=dict(type="str"), + wifi=dict(type="dict"), + wifi_sec=dict(type="dict", no_log=True), + gsm=dict(type="dict"), + macvlan=dict( + type="dict", + options=dict( + mode=dict(type="int", choices=[1, 2, 3, 4, 5], required=True), + parent=dict(type="str", required=True), + promiscuous=dict(type="bool"), + tap=dict(type="bool"), + ), + ), + wireguard=dict(type="dict"), + vpn=dict(type="dict"), + sriov=dict(type="dict"), + table=dict(type="int"), # infiniband specific vars - transport_mode=dict(type='str', choices=['datagram', 'connected']), - infiniband_mac=dict(type='str'), - + transport_mode=dict(type="str", choices=["datagram", "connected"]), + infiniband_mac=dict(type="str"), ), - mutually_exclusive=[['never_default4', 'gw4'], - ['routes4_extended', 'routes4'], - ['routes6_extended', 'routes6']], + mutually_exclusive=[ + ["never_default4", "gw4"], + ["routes4_extended", "routes4"], + ["routes6_extended", "routes6"], + ], required_if=[ ("type", "wifi", ["ssid"]), ("type", "team-slave", ["master", "ifname"]), @@ -2765,12 +2869,12 @@ def main(): ], supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") nmcli = Nmcli(module) - (rc, out, err) = (None, '', '') - result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} + (rc, out, err) = (None, "", "") + result = {"conn_name": nmcli.conn_name, "state": nmcli.state} # team checks if nmcli.type == "team": @@ -2778,58 +2882,58 @@ def main(): nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp": nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp") - if nmcli.type == 'wifi': + if nmcli.type == "wifi": unsupported_properties = {} if nmcli.wifi: - if 'ssid' in nmcli.wifi: + if "ssid" in nmcli.wifi: module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") - del nmcli.wifi['ssid'] - unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + del nmcli.wifi["ssid"] + unsupported_properties["wifi"] = nmcli.check_for_unsupported_properties("802-11-wireless") if nmcli.wifi_sec: - unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + unsupported_properties["wifi_sec"] = nmcli.check_for_unsupported_properties("802-11-wireless-security") if nmcli.ignore_unsupported_suboptions and unsupported_properties: for setting_key, properties in unsupported_properties.items(): for property in properties: del getattr(nmcli, setting_key)[property] try: - if nmcli.state == 'absent': + if nmcli.state == "absent": if nmcli.connection_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = nmcli.down_connection() (rc, out, err) = nmcli.remove_connection() if rc != 0: - module.fail_json(name=f'Error removing connection named {nmcli.conn_name}', msg=err, rc=rc) + module.fail_json(name=f"Error removing connection named {nmcli.conn_name}", msg=err, rc=rc) - elif nmcli.state == 'present': + elif nmcli.state == "present": if nmcli.connection_exists(): changed, diff = nmcli.is_connection_changed() if module._diff: - result['diff'] = diff + result["diff"] = diff if changed: # modify connection (note: this function is check mode aware) # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) - result['Exists'] = 'Connections do exist so we are modifying them' + result["Exists"] = "Connections do exist so we are modifying them" if module.check_mode: module.exit_json(changed=True, **result) (rc, out, err) = nmcli.modify_connection() if nmcli.conn_reload: (rc, out, err) = nmcli.reload_connection() else: - result['Exists'] = 'Connections already exist and no changes made' + result["Exists"] = "Connections already exist and no changes made" if module.check_mode: module.exit_json(changed=False, **result) if not nmcli.connection_exists(): - result['Connection'] = f'Connection {nmcli.conn_name} of Type {nmcli.type} is being added' + result["Connection"] = f"Connection {nmcli.conn_name} of Type {nmcli.type} is being added" if module.check_mode: module.exit_json(changed=True, **result) (rc, out, err) = nmcli.create_connection() if rc is not None and rc != 0: module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) - elif nmcli.state == 'up': + elif nmcli.state == "up": if nmcli.connection_exists(): if module.check_mode: module.exit_json(changed=True) @@ -2837,9 +2941,9 @@ def main(): (rc, out, err) = nmcli.reload_connection() (rc, out, err) = nmcli.up_connection() if rc != 0: - module.fail_json(name=f'Error bringing up connection named {nmcli.conn_name}', msg=err, rc=rc) + module.fail_json(name=f"Error bringing up connection named {nmcli.conn_name}", msg=err, rc=rc) - elif nmcli.state == 'down': + elif nmcli.state == "down": if nmcli.connection_exists(): if module.check_mode: module.exit_json(changed=True) @@ -2847,22 +2951,22 @@ def main(): (rc, out, err) = nmcli.reload_connection() (rc, out, err) = nmcli.down_connection() if rc != 0: - module.fail_json(name=f'Error bringing down connection named {nmcli.conn_name}', msg=err, rc=rc) + module.fail_json(name=f"Error bringing down connection named {nmcli.conn_name}", msg=err, rc=rc) except NmcliModuleError as e: module.fail_json(name=nmcli.conn_name, msg=str(e)) if rc is None: - result['changed'] = False + result["changed"] = False else: - result['changed'] = True + result["changed"] = True if out: - result['stdout'] = out + result["stdout"] = out if err: - result['stderr'] = err + result["stderr"] = err module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nomad_job.py b/plugins/modules/nomad_job.py index 4016a2845bd..6841c95d61d 100644 --- a/plugins/modules/nomad_job.py +++ b/plugins/modules/nomad_job.py @@ -99,6 +99,7 @@ import_nomad = None try: import nomad + import_nomad = True except ImportError: import_nomad = False @@ -107,69 +108,62 @@ def run(): module = AnsibleModule( argument_spec=dict( - host=dict(required=True, type='str'), - port=dict(type='int', default=4646), - state=dict(required=True, choices=['present', 'absent']), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path'), - client_key=dict(type='path'), - namespace=dict(type='str'), - name=dict(type='str'), - content_format=dict(choices=['hcl', 'json'], default='hcl'), - content=dict(type='str'), - force_start=dict(type='bool', default=False), - token=dict(type='str', no_log=True) + host=dict(required=True, type="str"), + port=dict(type="int", default=4646), + state=dict(required=True, choices=["present", "absent"]), + use_ssl=dict(type="bool", default=True), + timeout=dict(type="int", default=5), + validate_certs=dict(type="bool", default=True), + client_cert=dict(type="path"), + client_key=dict(type="path"), + namespace=dict(type="str"), + name=dict(type="str"), + content_format=dict(choices=["hcl", "json"], default="hcl"), + content=dict(type="str"), + force_start=dict(type="bool", default=False), + token=dict(type="str", no_log=True), ), supports_check_mode=True, - mutually_exclusive=[ - ["name", "content"] - ], - required_one_of=[ - ['name', 'content'] - ] + mutually_exclusive=[["name", "content"]], + required_one_of=[["name", "content"]], ) if not import_nomad: module.fail_json(msg=missing_required_lib("python-nomad")) - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + certificate_ssl = (module.params.get("client_cert"), module.params.get("client_key")) nomad_client = nomad.Nomad( - host=module.params.get('host'), - port=module.params.get('port'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), + host=module.params.get("host"), + port=module.params.get("port"), + secure=module.params.get("use_ssl"), + timeout=module.params.get("timeout"), + verify=module.params.get("validate_certs"), cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') + namespace=module.params.get("namespace"), + token=module.params.get("token"), ) - if module.params.get('state') == "present": - - if module.params.get('name') and not module.params.get('force_start'): - module.fail_json(msg='For start job with name, force_start is needed') + if module.params.get("state") == "present": + if module.params.get("name") and not module.params.get("force_start"): + module.fail_json(msg="For start job with name, force_start is needed") changed = False - if module.params.get('content'): - - if module.params.get('content_format') == 'json': - - job_json = module.params.get('content') + if module.params.get("content"): + if module.params.get("content_format") == "json": + job_json = module.params.get("content") try: job_json = json.loads(job_json) except ValueError as e: module.fail_json(msg=to_native(e)) job = dict() - job['job'] = job_json + job["job"] = job_json try: - job_id = job_json.get('ID') + job_id = job_json.get("ID") if job_id is None: module.fail_json(msg="Cannot retrieve job with ID None") plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": + if not plan["Diff"].get("Type") == "None": changed = True if not module.check_mode: result = nomad_client.jobs.register_job(job) @@ -180,20 +174,19 @@ def run(): except Exception as e: module.fail_json(msg=to_native(e)) - if module.params.get('content_format') == 'hcl': - + if module.params.get("content_format") == "hcl": try: - job_hcl = module.params.get('content') + job_hcl = module.params.get("content") job_json = nomad_client.jobs.parse(job_hcl) job = dict() - job['job'] = job_json + job["job"] = job_json except nomad.api.exceptions.BadRequestNomadException as err: msg = f"{err.nomad_resp.reason} {err.nomad_resp.text}" module.fail_json(msg=to_native(msg)) try: - job_id = job_json.get('ID') + job_id = job_json.get("ID") plan = nomad_client.job.plan_job(job_id, job, diff=True) - if not plan['Diff'].get('Type') == "None": + if not plan["Diff"].get("Type") == "None": changed = True if not module.check_mode: result = nomad_client.jobs.register_job(job) @@ -204,21 +197,20 @@ def run(): except Exception as e: module.fail_json(msg=to_native(e)) - if module.params.get('force_start'): - + if module.params.get("force_start"): try: job = dict() - if module.params.get('name'): - job_name = module.params.get('name') + if module.params.get("name"): + job_name = module.params.get("name") else: - job_name = job_json['Name'] + job_name = job_json["Name"] job_json = nomad_client.job.get_job(job_name) - if job_json['Status'] == 'running': + if job_json["Status"] == "running": result = job_json else: - job_json['Status'] = 'running' - job_json['Stop'] = False - job['job'] = job_json + job_json["Status"] = "running" + job_json["Stop"] = False + job["job"] = job_json if not module.check_mode: result = nomad_client.jobs.register_job(job) else: @@ -230,20 +222,19 @@ def run(): except Exception as e: module.fail_json(msg=to_native(e)) - if module.params.get('state') == "absent": - + if module.params.get("state") == "absent": try: - if not module.params.get('name') is None: - job_name = module.params.get('name') + if not module.params.get("name") is None: + job_name = module.params.get("name") else: - if module.params.get('content_format') == 'hcl': - job_json = nomad_client.jobs.parse(module.params.get('content')) - job_name = job_json['Name'] - if module.params.get('content_format') == 'json': - job_json = module.params.get('content') - job_name = job_json['Name'] + if module.params.get("content_format") == "hcl": + job_json = nomad_client.jobs.parse(module.params.get("content")) + job_name = job_json["Name"] + if module.params.get("content_format") == "json": + job_json = module.params.get("content") + job_name = job_json["Name"] job = nomad_client.job.get_job(job_name) - if job['Status'] == 'dead': + if job["Status"] == "dead": changed = False result = job else: @@ -259,7 +250,6 @@ def run(): def main(): - run() diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py index 7591171ac39..5bc9cc9f799 100644 --- a/plugins/modules/nomad_job_info.py +++ b/plugins/modules/nomad_job_info.py @@ -268,6 +268,7 @@ import_nomad = None try: import nomad + import_nomad = True except ImportError: import_nomad = False @@ -276,34 +277,34 @@ def run(): module = AnsibleModule( argument_spec=dict( - host=dict(required=True, type='str'), - port=dict(type='int', default=4646), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path'), - client_key=dict(type='path'), - namespace=dict(type='str'), - name=dict(type='str'), - token=dict(type='str', no_log=True) + host=dict(required=True, type="str"), + port=dict(type="int", default=4646), + use_ssl=dict(type="bool", default=True), + timeout=dict(type="int", default=5), + validate_certs=dict(type="bool", default=True), + client_cert=dict(type="path"), + client_key=dict(type="path"), + namespace=dict(type="str"), + name=dict(type="str"), + token=dict(type="str", no_log=True), ), - supports_check_mode=True + supports_check_mode=True, ) if not import_nomad: module.fail_json(msg=missing_required_lib("python-nomad")) - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + certificate_ssl = (module.params.get("client_cert"), module.params.get("client_key")) nomad_client = nomad.Nomad( - host=module.params.get('host'), - port=module.params.get('port'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), + host=module.params.get("host"), + port=module.params.get("port"), + secure=module.params.get("use_ssl"), + timeout=module.params.get("timeout"), + verify=module.params.get("validate_certs"), cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') + namespace=module.params.get("namespace"), + token=module.params.get("token"), ) changed = False @@ -311,15 +312,15 @@ def run(): try: job_list = nomad_client.jobs.get_jobs() for job in job_list: - result.append(nomad_client.job.get_job(job.get('ID'))) + result.append(nomad_client.job.get_job(job.get("ID"))) except Exception as e: module.fail_json(msg=to_native(e)) - if module.params.get('name'): + if module.params.get("name"): filter = list() try: for job in result: - if job.get('ID') == module.params.get('name'): + if job.get("ID") == module.params.get("name"): filter.append(job) result = filter if not filter: @@ -331,7 +332,6 @@ def run(): def main(): - run() diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py index 660ceeb074e..0c18d1e1151 100644 --- a/plugins/modules/nomad_token.py +++ b/plugins/modules/nomad_token.py @@ -134,46 +134,45 @@ def get_token(name, nomad_client): tokens = nomad_client.acl.get_tokens() - token = next((token for token in tokens - if token.get('Name') == name), None) + token = next((token for token in tokens if token.get("Name") == name), None) return token def transform_response(nomad_response): transformed_response = { - "accessor_id": nomad_response['AccessorID'], - "create_index": nomad_response['CreateIndex'], - "create_time": nomad_response['CreateTime'], - "expiration_ttl": nomad_response['ExpirationTTL'], - "expiration_time": nomad_response['ExpirationTime'], - "global": nomad_response['Global'], - "hash": nomad_response['Hash'], - "modify_index": nomad_response['ModifyIndex'], - "name": nomad_response['Name'], - "policies": nomad_response['Policies'], - "roles": nomad_response['Roles'], - "secret_id": nomad_response['SecretID'], - "type": nomad_response['Type'] + "accessor_id": nomad_response["AccessorID"], + "create_index": nomad_response["CreateIndex"], + "create_time": nomad_response["CreateTime"], + "expiration_ttl": nomad_response["ExpirationTTL"], + "expiration_time": nomad_response["ExpirationTime"], + "global": nomad_response["Global"], + "hash": nomad_response["Hash"], + "modify_index": nomad_response["ModifyIndex"], + "name": nomad_response["Name"], + "policies": nomad_response["Policies"], + "roles": nomad_response["Roles"], + "secret_id": nomad_response["SecretID"], + "type": nomad_response["Type"], } return transformed_response argument_spec = dict( - host=dict(required=True, type='str'), - port=dict(type='int', default=4646), - state=dict(required=True, choices=['present', 'absent']), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path'), - client_key=dict(type='path'), - namespace=dict(type='str'), - token=dict(type='str', no_log=True), - name=dict(type='str'), - token_type=dict(choices=['client', 'management', 'bootstrap'], default='client'), - policies=dict(type='list', elements='str', default=[]), - global_replicated=dict(type='bool', default=False), + host=dict(required=True, type="str"), + port=dict(type="int", default=4646), + state=dict(required=True, choices=["present", "absent"]), + use_ssl=dict(type="bool", default=True), + timeout=dict(type="int", default=5), + validate_certs=dict(type="bool", default=True), + client_cert=dict(type="path"), + client_key=dict(type="path"), + namespace=dict(type="str"), + token=dict(type="str", no_log=True), + name=dict(type="str"), + token_type=dict(choices=["client", "management", "bootstrap"], default="client"), + policies=dict(type="list", elements="str", default=[]), + global_replicated=dict(type="bool", default=False), ) @@ -181,12 +180,10 @@ def setup_module_object(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, - required_one_of=[ - ['name', 'token_type'] - ], + required_one_of=[["name", "token_type"]], required_if=[ - ('token_type', 'client', ('name',)), - ('token_type', 'management', ('name',)), + ("token_type", "client", ("name",)), + ("token_type", "management", ("name",)), ], ) return module @@ -196,17 +193,17 @@ def setup_nomad_client(module): if not import_nomad: module.fail_json(msg=missing_required_lib("python-nomad")) - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + certificate_ssl = (module.params.get("client_cert"), module.params.get("client_key")) nomad_client = nomad.Nomad( - host=module.params.get('host'), - port=module.params.get('port'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), + host=module.params.get("host"), + port=module.params.get("port"), + secure=module.params.get("use_ssl"), + timeout=module.params.get("timeout"), + verify=module.params.get("validate_certs"), cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') + namespace=module.params.get("namespace"), + token=module.params.get("token"), ) return nomad_client @@ -218,11 +215,10 @@ def run(module): msg = "" result = {} changed = False - if module.params.get('state') == "present": - - if module.params.get('token_type') == 'bootstrap': + if module.params.get("state") == "present": + if module.params.get("token_type") == "bootstrap": try: - current_token = get_token('Bootstrap Token', nomad_client) + current_token = get_token("Bootstrap Token", nomad_client) if current_token: msg = "ACL bootstrap already exist." else: @@ -243,17 +239,17 @@ def run(module): else: try: token_info = { - "Name": module.params.get('name'), - "Type": module.params.get('token_type'), - "Policies": module.params.get('policies'), - "Global": module.params.get('global_replicated') + "Name": module.params.get("name"), + "Type": module.params.get("token_type"), + "Policies": module.params.get("policies"), + "Global": module.params.get("global_replicated"), } - current_token = get_token(token_info['Name'], nomad_client) + current_token = get_token(token_info["Name"], nomad_client) if current_token: - token_info['AccessorID'] = current_token['AccessorID'] - nomad_result = nomad_client.acl.update_token(current_token['AccessorID'], token_info) + token_info["AccessorID"] = current_token["AccessorID"] + nomad_result = nomad_client.acl.update_token(current_token["AccessorID"], token_info) msg = "ACL token updated." result = transform_response(nomad_result) changed = True @@ -267,19 +263,18 @@ def run(module): except Exception as e: module.fail_json(msg=to_native(e)) - if module.params.get('state') == "absent": - - if not module.params.get('name'): + if module.params.get("state") == "absent": + if not module.params.get("name"): module.fail_json(msg="name is needed to delete token.") - if module.params.get('token_type') == 'bootstrap' or module.params.get('name') == 'Bootstrap Token': + if module.params.get("token_type") == "bootstrap" or module.params.get("name") == "Bootstrap Token": module.fail_json(msg="Delete ACL bootstrap token is not allowed.") try: - token = get_token(module.params.get('name'), nomad_client) + token = get_token(module.params.get("name"), nomad_client) if token: - nomad_client.acl.delete_token(token.get('AccessorID')) - msg = 'ACL token deleted.' + nomad_client.acl.delete_token(token.get("AccessorID")) + msg = "ACL token deleted." changed = True else: msg = f"No token with name '{module.params['name']}' found" diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py index 6db2ece7f46..67c86709a04 100644 --- a/plugins/modules/nosh.py +++ b/plugins/modules/nosh.py @@ -334,38 +334,38 @@ def run_sys_ctl(module, args): - sys_ctl = [module.get_bin_path('system-control', required=True)] - if module.params['user']: - sys_ctl = sys_ctl + ['--user'] + sys_ctl = [module.get_bin_path("system-control", required=True)] + if module.params["user"]: + sys_ctl = sys_ctl + ["--user"] return module.run_command(sys_ctl + args) def get_service_path(module, service): - (rc, out, err) = run_sys_ctl(module, ['find', service]) + (rc, out, err) = run_sys_ctl(module, ["find", service]) # fail if service not found if rc != 0: - fail_if_missing(module, False, service, msg='host') + fail_if_missing(module, False, service, msg="host") else: return to_native(out).strip() def service_is_enabled(module, service_path): - (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path]) + (rc, out, err) = run_sys_ctl(module, ["is-enabled", service_path]) return rc == 0 def service_is_preset_enabled(module, service_path): - (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path]) + (rc, out, err) = run_sys_ctl(module, ["preset", "--dry-run", service_path]) return to_native(out).strip().startswith("enable") def service_is_loaded(module, service_path): - (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path]) + (rc, out, err) = run_sys_ctl(module, ["is-loaded", service_path]) return rc == 0 def get_service_status(module, service_path): - (rc, out, err) = run_sys_ctl(module, ['show-json', service_path]) + (rc, out, err) = run_sys_ctl(module, ["show-json", service_path]) # will fail if not service is not loaded if err is not None and err: module.fail_json(msg=err) @@ -376,7 +376,7 @@ def get_service_status(module, service_path): def service_is_running(service_status): - return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running']) + return service_status["DaemontoolsEncoreState"] in set(["starting", "started", "running"]) def handle_enabled(module, result, service_path): @@ -390,39 +390,39 @@ def handle_enabled(module, result, service_path): """ # computed prior in control flow - preset = result['preset'] - enabled = result['enabled'] + preset = result["preset"] + enabled = result["enabled"] # preset, effect only if option set to true (no reverse preset) - if module.params['preset']: - action = 'preset' + if module.params["preset"]: + action = "preset" # run preset if needed - if preset != module.params['preset']: - result['changed'] = True + if preset != module.params["preset"]: + result["changed"] = True if not module.check_mode: (rc, out, err) = run_sys_ctl(module, [action, service_path]) if rc != 0: module.fail_json(msg=f"Unable to {action} service {service_path}: {out + err}") - result['preset'] = not preset - result['enabled'] = not enabled + result["preset"] = not preset + result["enabled"] = not enabled # enabled/disabled state - if module.params['enabled'] is not None: - if module.params['enabled']: - action = 'enable' + if module.params["enabled"] is not None: + if module.params["enabled"]: + action = "enable" else: - action = 'disable' + action = "disable" # change enable/disable if needed - if enabled != module.params['enabled']: - result['changed'] = True + if enabled != module.params["enabled"]: + result["changed"] = True if not module.check_mode: (rc, out, err) = run_sys_ctl(module, [action, service_path]) if rc != 0: module.fail_json(msg=f"Unable to {action} service {service_path}: {out + err}") - result['enabled'] = not enabled - result['preset'] = not preset + result["enabled"] = not enabled + result["preset"] = not preset def handle_state(module, result, service_path): @@ -433,71 +433,72 @@ def handle_state(module, result, service_path): can be obtained and the service can only be 'started'. """ # default to desired state, no action - result['state'] = module.params['state'] - state = module.params['state'] + result["state"] = module.params["state"] + state = module.params["state"] action = None # computed prior in control flow, possibly modified by handle_enabled() - enabled = result['enabled'] + enabled = result["enabled"] # service not loaded -> not started by manager, no status information if not service_is_loaded(module, service_path): - if state in ['started', 'restarted', 'reloaded']: - action = 'start' - result['state'] = 'started' - elif state == 'reset': + if state in ["started", "restarted", "reloaded"]: + action = "start" + result["state"] = "started" + elif state == "reset": if enabled: - action = 'start' - result['state'] = 'started' + action = "start" + result["state"] = "started" else: - result['state'] = None + result["state"] = None else: - result['state'] = None + result["state"] = None # service is loaded else: # get status information - result['status'] = get_service_status(module, service_path) - running = service_is_running(result['status']) + result["status"] = get_service_status(module, service_path) + running = service_is_running(result["status"]) - if state == 'started': + if state == "started": if not running: - action = 'start' - elif state == 'stopped': + action = "start" + elif state == "stopped": if running: - action = 'stop' + action = "stop" # reset = start/stop according to enabled status - elif state == 'reset': + elif state == "reset": if enabled is not running: if running: - action = 'stop' - result['state'] = 'stopped' + action = "stop" + result["state"] = "stopped" else: - action = 'start' - result['state'] = 'started' + action = "start" + result["state"] = "started" # start if not running, 'service' module constraint - elif state == 'restarted': + elif state == "restarted": if not running: - action = 'start' - result['state'] = 'started' + action = "start" + result["state"] = "started" else: - action = 'condrestart' + action = "condrestart" # start if not running, 'service' module constraint - elif state == 'reloaded': + elif state == "reloaded": if not running: - action = 'start' - result['state'] = 'started' + action = "start" + result["state"] = "started" else: - action = 'hangup' + action = "hangup" # change state as needed if action: - result['changed'] = True + result["changed"] = True if not module.check_mode: (rc, out, err) = run_sys_ctl(module, [action, service_path]) if rc != 0: module.fail_json(msg=f"Unable to {action} service {service_path}: {err}") + # =========================================== # Main control flow @@ -505,48 +506,48 @@ def handle_state(module, result, service_path): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']), - enabled=dict(type='bool'), - preset=dict(type='bool'), - user=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", choices=["started", "stopped", "reset", "restarted", "reloaded"]), + enabled=dict(type="bool"), + preset=dict(type="bool"), + user=dict(type="bool", default=False), ), supports_check_mode=True, - mutually_exclusive=[['enabled', 'preset']], + mutually_exclusive=[["enabled", "preset"]], ) - service = module.params['name'] + service = module.params["name"] rc = 0 - out = err = '' + out = err = "" result = { - 'name': service, - 'changed': False, - 'status': None, + "name": service, + "changed": False, + "status": None, } # check service can be found (or fail) and get path service_path = get_service_path(module, service) # get preliminary service facts - result['service_path'] = service_path - result['user'] = module.params['user'] - result['enabled'] = service_is_enabled(module, service_path) - result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path) + result["service_path"] = service_path + result["user"] = module.params["user"] + result["enabled"] = service_is_enabled(module, service_path) + result["preset"] = result["enabled"] is service_is_preset_enabled(module, service_path) # set enabled state, service need not be loaded - if module.params['enabled'] is not None or module.params['preset']: + if module.params["enabled"] is not None or module.params["preset"]: handle_enabled(module, result, service_path) # set service running state - if module.params['state'] is not None: + if module.params["state"] is not None: handle_state(module, result, service_path) # get final service status if possible if service_is_loaded(module, service_path): - result['status'] = get_service_status(module, service_path) + result["status"] = get_service_status(module, service_path) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/npm.py b/plugins/modules/npm.py index 130daa02c03..2e471d2f74d 100644 --- a/plugins/modules/npm.py +++ b/plugins/modules/npm.py @@ -165,25 +165,25 @@ class Npm: def __init__(self, module, **kwargs): self.module = module - self.glbl = kwargs['glbl'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - self.unsafe_perm = kwargs['unsafe_perm'] - self.state = kwargs['state'] - self.no_optional = kwargs['no_optional'] - self.no_bin_links = kwargs['no_bin_links'] - self.force = kwargs['force'] - - if kwargs['executable']: - self.executable = kwargs['executable'].split(' ') + self.glbl = kwargs["glbl"] + self.name = kwargs["name"] + self.version = kwargs["version"] + self.path = kwargs["path"] + self.registry = kwargs["registry"] + self.production = kwargs["production"] + self.ignore_scripts = kwargs["ignore_scripts"] + self.unsafe_perm = kwargs["unsafe_perm"] + self.state = kwargs["state"] + self.no_optional = kwargs["no_optional"] + self.no_bin_links = kwargs["no_bin_links"] + self.force = kwargs["force"] + + if kwargs["executable"]: + self.executable = kwargs["executable"].split(" ") else: - self.executable = [module.get_bin_path('npm', True)] + self.executable = [module.get_bin_path("npm", True)] - if kwargs['version'] and kwargs['state'] != 'absent': + if kwargs["version"] and kwargs["state"] != "absent": self.name_version = f"{self.name}@{kwargs['version']}" else: self.name_version = self.name @@ -193,16 +193,16 @@ def __init__(self, module, **kwargs): command=self.executable, arg_formats=dict( exec_args=cmd_runner_fmt.as_list(), - global_=cmd_runner_fmt.as_bool('--global'), - production=cmd_runner_fmt.as_bool('--production'), - ignore_scripts=cmd_runner_fmt.as_bool('--ignore-scripts'), - unsafe_perm=cmd_runner_fmt.as_bool('--unsafe-perm'), + global_=cmd_runner_fmt.as_bool("--global"), + production=cmd_runner_fmt.as_bool("--production"), + ignore_scripts=cmd_runner_fmt.as_bool("--ignore-scripts"), + unsafe_perm=cmd_runner_fmt.as_bool("--unsafe-perm"), name_version=cmd_runner_fmt.as_list(), - registry=cmd_runner_fmt.as_opt_val('--registry'), - no_optional=cmd_runner_fmt.as_bool('--no-optional'), - no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'), - force=cmd_runner_fmt.as_bool('--force'), - ) + registry=cmd_runner_fmt.as_opt_val("--registry"), + no_optional=cmd_runner_fmt.as_bool("--no-optional"), + no_bin_links=cmd_runner_fmt.as_bool("--no-bin-links"), + force=cmd_runner_fmt.as_bool("--force"), + ), ) def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): @@ -217,40 +217,40 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=T cwd = self.path params = dict(self.module.params) - params['exec_args'] = args - params['global_'] = self.glbl - params['production'] = self.production and ('install' in args or 'update' in args or 'ci' in args) - params['name_version'] = self.name_version if add_package_name else None + params["exec_args"] = args + params["global_"] = self.glbl + params["production"] = self.production and ("install" in args or "update" in args or "ci" in args) + params["name_version"] = self.name_version if add_package_name else None with self.runner( "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links force", - check_rc=check_rc, cwd=cwd + check_rc=check_rc, + cwd=cwd, ) as ctx: rc, out, err = ctx.run(**params) return out - return '' + return "" def list(self): - cmd = ['list', '--json', '--long'] + cmd = ["list", "--json", "--long"] installed = list() missing = list() data = {} try: - data = json.loads(self._exec(cmd, True, False, False) or '{}') - except (getattr(json, 'JSONDecodeError', ValueError)) as e: + data = json.loads(self._exec(cmd, True, False, False) or "{}") + except getattr(json, "JSONDecodeError", ValueError) as e: self.module.fail_json(msg=f"Failed to parse NPM output with error {e}") - if 'dependencies' in data: - for dep, props in data['dependencies'].items(): - - if 'missing' in props and props['missing']: + if "dependencies" in data: + for dep, props in data["dependencies"].items(): + if "missing" in props and props["missing"]: missing.append(dep) - elif 'invalid' in props and props['invalid']: + elif "invalid" in props and props["invalid"]: missing.append(dep) else: installed.append(dep) - if 'version' in props and props['version']: + if "version" in props and props["version"]: dep_version = f"{dep}@{props['version']}" installed.append(dep_version) if self.name_version and self.name_version not in installed: @@ -262,25 +262,25 @@ def list(self): return installed, missing def install(self): - return self._exec(['install']) + return self._exec(["install"]) def ci_install(self): - return self._exec(['ci']) + return self._exec(["ci"]) def update(self): - return self._exec(['update']) + return self._exec(["update"]) def uninstall(self): - return self._exec(['uninstall']) + return self._exec(["uninstall"]) def list_outdated(self): outdated = list() - data = self._exec(['outdated'], True, False) + data = self._exec(["outdated"], True, False) for dep in data.splitlines(): if dep: # node.js v0.10.22 changed the `npm outdated` module separator # from "@" to " ". Split on both for backwards compatibility. - pkg, other = re.split(r'\s|@', dep, 1) + pkg, other = re.split(r"\s|@", dep, 1) outdated.append(pkg) return outdated @@ -288,61 +288,63 @@ def list_outdated(self): def main(): arg_spec = dict( - name=dict(type='str'), - path=dict(type='path'), - version=dict(type='str'), - production=dict(default=False, type='bool'), - executable=dict(type='path'), - registry=dict(type='str'), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), - unsafe_perm=dict(default=False, type='bool'), - ci=dict(default=False, type='bool'), - no_optional=dict(default=False, type='bool'), - no_bin_links=dict(default=False, type='bool'), - force=dict(default=False, type='bool'), + name=dict(type="str"), + path=dict(type="path"), + version=dict(type="str"), + production=dict(default=False, type="bool"), + executable=dict(type="path"), + registry=dict(type="str"), + state=dict(default="present", choices=["present", "absent", "latest"]), + ignore_scripts=dict(default=False, type="bool"), + unsafe_perm=dict(default=False, type="bool"), + ci=dict(default=False, type="bool"), + no_optional=dict(default=False, type="bool"), + no_bin_links=dict(default=False, type="bool"), + force=dict(default=False, type="bool"), ) - arg_spec['global'] = dict(default=False, type='bool') + arg_spec["global"] = dict(default=False, type="bool") module = AnsibleModule( argument_spec=arg_spec, required_if=[ - ('state', 'absent', ['name']), + ("state", "absent", ["name"]), ("global", False, ["path"]), ], supports_check_mode=True, ) - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - glbl = module.params['global'] - state = module.params['state'] - - npm = Npm(module, - name=name, - path=path, - version=version, - glbl=glbl, - production=module.params['production'], - executable=module.params['executable'], - registry=module.params['registry'], - ignore_scripts=module.params['ignore_scripts'], - unsafe_perm=module.params['unsafe_perm'], - state=state, - no_optional=module.params['no_optional'], - no_bin_links=module.params['no_bin_links'], - force=module.params['force']) + name = module.params["name"] + path = module.params["path"] + version = module.params["version"] + glbl = module.params["global"] + state = module.params["state"] + + npm = Npm( + module, + name=name, + path=path, + version=version, + glbl=glbl, + production=module.params["production"], + executable=module.params["executable"], + registry=module.params["registry"], + ignore_scripts=module.params["ignore_scripts"], + unsafe_perm=module.params["unsafe_perm"], + state=state, + no_optional=module.params["no_optional"], + no_bin_links=module.params["no_bin_links"], + force=module.params["force"], + ) changed = False - if module.params['ci']: + if module.params["ci"]: npm.ci_install() changed = True - elif state == 'present': + elif state == "present": installed, missing = npm.list() if missing: changed = True npm.install() - elif state == 'latest': + elif state == "latest": installed, missing = npm.list() outdated = npm.list_outdated() if missing: @@ -360,5 +362,5 @@ def main(): module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py index dcf56be09a9..c38ad025c50 100644 --- a/plugins/modules/nsupdate.py +++ b/plugins/modules/nsupdate.py @@ -204,42 +204,40 @@ class RecordManager: def __init__(self, module): self.module = module - if module.params['key_name']: + if module.params["key_name"]: try: - self.keyring = dns.tsigkeyring.from_text({ - module.params['key_name']: module.params['key_secret'] - }) + self.keyring = dns.tsigkeyring.from_text({module.params["key_name"]: module.params["key_secret"]}) except TypeError: - module.fail_json(msg='Missing key_secret') + module.fail_json(msg="Missing key_secret") except binascii_error as e: - module.fail_json(msg=f'TSIG key error: {e}') + module.fail_json(msg=f"TSIG key error: {e}") else: self.keyring = None - if module.params['key_algorithm'] == 'hmac-md5': - self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' + if module.params["key_algorithm"] == "hmac-md5": + self.algorithm = "HMAC-MD5.SIG-ALG.REG.INT" else: - self.algorithm = module.params['key_algorithm'] + self.algorithm = module.params["key_algorithm"] - if module.params['zone'] is None: - if module.params['record'][-1] != '.': - self.module.fail_json(msg='record must be absolute when omitting zone parameter') + if module.params["zone"] is None: + if module.params["record"][-1] != ".": + self.module.fail_json(msg="record must be absolute when omitting zone parameter") self.zone = self.lookup_zone() else: - self.zone = module.params['zone'] + self.zone = module.params["zone"] - if self.zone[-1] != '.': - self.zone += '.' + if self.zone[-1] != ".": + self.zone += "." - if module.params['record'][-1] != '.': + if module.params["record"][-1] != ".": self.fqdn = f"{module.params['record']}.{self.zone}" else: - self.fqdn = module.params['record'] + self.fqdn = module.params["record"] - if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: - self.value = list(map(self.txt_helper, self.module.params['value'])) + if self.module.params["type"].lower() == "txt" and self.module.params["value"] is not None: + self.value = list(map(self.txt_helper, self.module.params["value"])) else: - self.value = self.module.params['value'] + self.value = self.module.params["value"] self.dns_rc = 0 @@ -249,23 +247,29 @@ def txt_helper(self, entry): return f'"{entry}"' def lookup_zone(self): - name = dns.name.from_text(self.module.params['record']) + name = dns.name.from_text(self.module.params["record"]) while True: query = dns.message.make_query(name, dns.rdatatype.SOA) if self.keyring: query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + if self.module.params["protocol"] == "tcp": + lookup = dns.query.tcp( + query, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + lookup = dns.query.udp( + query, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"TSIG update error ({e.__class__.__name__}): {e}") except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"DNS server error: ({e.__class__.__name__}): {e}") if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: - self.module.fail_json(msg=f"Zone lookup failure: '{self.module.params['server']}' will not " - f"respond to queries regarding '{self.module.params['record']}'.") + self.module.fail_json( + msg=f"Zone lookup failure: '{self.module.params['server']}' will not " + f"respond to queries regarding '{self.module.params['record']}'." + ) # If the response contains an Answer SOA RR whose name matches the queried name, # this is the name of the zone in which the record needs to be inserted. for rr in lookup.answer: @@ -284,18 +288,22 @@ def lookup_zone(self): def __do_update(self, update): response = None try: - if self.module.params['protocol'] == 'tcp': - response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + if self.module.params["protocol"] == "tcp": + response = dns.query.tcp( + update, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) else: - response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + response = dns.query.udp( + update, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"TSIG update error ({e.__class__.__name__}): {e}") except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"DNS server error: ({e.__class__.__name__}): {e}") return response def create_or_update_record(self): - result = {'changed': False, 'failed': False} + result = {"changed": False, "failed": False} exists = self.record_exists() if exists in [0, 2]: @@ -305,20 +313,20 @@ def create_or_update_record(self): if exists == 0: self.dns_rc = self.create_record() if self.dns_rc != 0: - result['msg'] = f"Failed to create DNS record (rc: {int(self.dns_rc)})" + result["msg"] = f"Failed to create DNS record (rc: {int(self.dns_rc)})" elif exists == 2: self.dns_rc = self.modify_record() if self.dns_rc != 0: - result['msg'] = f"Failed to update DNS record (rc: {int(self.dns_rc)})" + result["msg"] = f"Failed to update DNS record (rc: {int(self.dns_rc)})" if self.dns_rc != 0: - result['failed'] = True + result["failed"] = True else: - result['changed'] = True + result["changed"] = True else: - result['changed'] = False + result["changed"] = False return result @@ -326,14 +334,11 @@ def create_record(self): update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) for entry in self.value: try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) + update.add(self.module.params["record"], self.module.params["ttl"], self.module.params["type"], entry) except AttributeError: - self.module.fail_json(msg='value needed when state=present') + self.module.fail_json(msg="value needed when state=present") except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') + self.module.fail_json(msg="Invalid/malformed value") response = self.__do_update(update) return dns.message.Message.rcode(response) @@ -341,52 +346,53 @@ def create_record(self): def modify_record(self): update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - if self.module.params['type'].upper() == 'NS': + if self.module.params["type"].upper() == "NS": # When modifying a NS record, Bind9 silently refuses to delete all the NS entries for a zone: # > 09-May-2022 18:00:50.352 client @0x7fe7dd1f9568 192.168.1.3#45458/key rndc_ddns_ansible: # > updating zone 'lab/IN': attempt to delete all SOA or NS records ignored # https://gitlab.isc.org/isc-projects/bind9/-/blob/v9_18/lib/ns/update.c#L3304 # Let's perform dns inserts and updates first, deletes after. - query = dns.message.make_query(self.module.params['record'], self.module.params['type']) + query = dns.message.make_query(self.module.params["record"], self.module.params["type"]) if self.keyring: query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + if self.module.params["protocol"] == "tcp": + lookup = dns.query.tcp( + query, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + lookup = dns.query.udp( + query, self.module.params["server"], timeout=10, port=self.module.params["port"] + ) except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"TSIG update error ({e.__class__.__name__}): {e}") except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"DNS server error: ({e.__class__.__name__}): {e}") lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0] entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value] else: - update.delete(self.module.params['record'], self.module.params['type']) + update.delete(self.module.params["record"], self.module.params["type"]) for entry in self.value: try: - update.add(self.module.params['record'], - self.module.params['ttl'], - self.module.params['type'], - entry) + update.add(self.module.params["record"], self.module.params["ttl"], self.module.params["type"], entry) except AttributeError: - self.module.fail_json(msg='value needed when state=present') + self.module.fail_json(msg="value needed when state=present") except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') + self.module.fail_json(msg="Invalid/malformed value") - if self.module.params['type'].upper() == 'NS': + if self.module.params["type"].upper() == "NS": for entry in entries_to_remove: - update.delete(self.module.params['record'], self.module.params['type'], entry) + update.delete(self.module.params["record"], self.module.params["type"], entry) response = self.__do_update(update) return dns.message.Message.rcode(response) def remove_record(self): - result = {'changed': False, 'failed': False} + result = {"changed": False, "failed": False} if self.record_exists() == 0: return result @@ -396,38 +402,38 @@ def remove_record(self): self.module.exit_json(changed=True) update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) + update.delete(self.module.params["record"], self.module.params["type"]) response = self.__do_update(update) self.dns_rc = dns.message.Message.rcode(response) if self.dns_rc != 0: - result['failed'] = True - result['msg'] = f"Failed to delete record (rc: {int(self.dns_rc)})" + result["failed"] = True + result["msg"] = f"Failed to delete record (rc: {int(self.dns_rc)})" else: - result['changed'] = True + result["changed"] = True return result def record_exists(self): update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) try: - update.present(self.module.params['record'], self.module.params['type']) + update.present(self.module.params["record"], self.module.params["type"]) except dns.rdatatype.UnknownRdatatype as e: - self.module.fail_json(msg=f'Record error: {e}') + self.module.fail_json(msg=f"Record error: {e}") response = self.__do_update(update) self.dns_rc = dns.message.Message.rcode(response) if self.dns_rc == 0: - if self.module.params['state'] == 'absent': + if self.module.params["state"] == "absent": return 1 for entry in self.value: try: - update.present(self.module.params['record'], self.module.params['type'], entry) + update.present(self.module.params["record"], self.module.params["type"], entry) except AttributeError: - self.module.fail_json(msg='value needed when state=present') + self.module.fail_json(msg="value needed when state=present") except dns.exception.SyntaxError: - self.module.fail_json(msg='Invalid/malformed value') + self.module.fail_json(msg="Invalid/malformed value") response = self.__do_update(update) self.dns_rc = dns.message.Message.rcode(response) if self.dns_rc == 0: @@ -441,76 +447,85 @@ def record_exists(self): return 0 def ttl_changed(self): - query = dns.message.make_query(self.fqdn, self.module.params['type']) + query = dns.message.make_query(self.fqdn, self.module.params["type"]) if self.keyring: query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) try: - if self.module.params['protocol'] == 'tcp': - lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + if self.module.params["protocol"] == "tcp": + lookup = dns.query.tcp(query, self.module.params["server"], timeout=10, port=self.module.params["port"]) else: - lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + lookup = dns.query.udp(query, self.module.params["server"], timeout=10, port=self.module.params["port"]) except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: - self.module.fail_json(msg=f'TSIG update error ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"TSIG update error ({e.__class__.__name__}): {e}") except (socket_error, dns.exception.Timeout) as e: - self.module.fail_json(msg=f'DNS server error: ({e.__class__.__name__}): {e}') + self.module.fail_json(msg=f"DNS server error: ({e.__class__.__name__}): {e}") if lookup.rcode() != dns.rcode.NOERROR: - self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') + self.module.fail_json(msg="Failed to lookup TTL of existing matching record.") current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl - return current_ttl != self.module.params['ttl'] + return current_ttl != self.module.params["ttl"] def main(): - tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', - 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + tsig_algs = [ + "HMAC-MD5.SIG-ALG.REG.INT", + "hmac-md5", + "hmac-sha1", + "hmac-sha224", + "hmac-sha256", + "hmac-sha384", + "hmac-sha512", + ] module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - server=dict(required=True, type='str'), - port=dict(default=53, type='int'), - key_name=dict(type='str'), - key_secret=dict(type='str', no_log=True), - key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'), - zone=dict(type='str'), - record=dict(required=True, type='str'), - type=dict(default='A', type='str'), - ttl=dict(default=3600, type='int'), - value=dict(type='list', elements='str'), - protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str') + state=dict(default="present", choices=["present", "absent"], type="str"), + server=dict(required=True, type="str"), + port=dict(default=53, type="int"), + key_name=dict(type="str"), + key_secret=dict(type="str", no_log=True), + key_algorithm=dict(default="hmac-md5", choices=tsig_algs, type="str"), + zone=dict(type="str"), + record=dict(required=True, type="str"), + type=dict(default="A", type="str"), + ttl=dict(default=3600, type="int"), + value=dict(type="list", elements="str"), + protocol=dict(default="tcp", choices=["tcp", "udp"], type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAVE_DNSPYTHON: - module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) + module.fail_json(msg=missing_required_lib("dnspython"), exception=DNSPYTHON_IMP_ERR) if len(module.params["record"]) == 0: - module.fail_json(msg='record cannot be empty.') + module.fail_json(msg="record cannot be empty.") record = RecordManager(module) result = {} - if module.params["state"] == 'absent': + if module.params["state"] == "absent": result = record.remove_record() - elif module.params["state"] == 'present': + elif module.params["state"] == "present": result = record.create_or_update_record() - result['dns_rc'] = record.dns_rc - result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) - if result['failed']: + result["dns_rc"] = record.dns_rc + result["dns_rc_str"] = dns.rcode.to_text(record.dns_rc) + if result["failed"]: module.fail_json(**result) else: - result['record'] = dict(zone=record.zone, - record=module.params['record'], - type=module.params['type'], - ttl=module.params['ttl'], - value=record.value) + result["record"] = dict( + zone=record.zone, + record=module.params["record"], + type=module.params["type"], + ttl=module.params["ttl"], + value=record.value, + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ocapi_command.py b/plugins/modules/ocapi_command.py index 4759ffadfd2..8e78d611b38 100644 --- a/plugins/modules/ocapi_command.py +++ b/plugins/modules/ocapi_command.py @@ -175,7 +175,7 @@ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"], "Systems": ["PowerGracefulRestart"], "Update": ["FWUpload", "FWUpdate", "FWActivate"], - "Jobs": ["DeleteJob"] + "Jobs": ["DeleteJob"], } @@ -184,29 +184,26 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='str'), - job_name=dict(type='str'), - baseuri=dict(required=True, type='str'), - proxy_slot_number=dict(type='int'), - update_image_path=dict(type='str'), + command=dict(required=True, type="str"), + job_name=dict(type="str"), + baseuri=dict(required=True, type="str"), + proxy_slot_number=dict(type="int"), + update_image_path=dict(type="str"), username=dict(required=True), password=dict(required=True, no_log=True), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ), - supports_check_mode=True + supports_check_mode=True, ) - category = module.params['category'] - command = module.params['command'] + category = module.params["category"] + command = module.params["command"] # admin credentials used for authentication - creds = { - 'user': module.params['username'], - 'pswd': module.params['password'] - } + creds = {"user": module.params["username"], "pswd": module.params["password"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] base_uri = f"https://{module.params['baseuri']}" proxy_slot_number = module.params.get("proxy_slot_number") @@ -214,11 +211,15 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that the command is valid if command not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Chassis": @@ -247,18 +248,18 @@ def main(): job_uri = urljoin(base_uri, f"Jobs/{job_name}") result = ocapi_utils.delete_job(job_uri) - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) else: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) + del result["ret"] + changed = result.get("changed", True) + session = result.get("session", dict()) kwargs = { "changed": changed, "session": session, - "msg": "Action was successful." if not module.check_mode else result.get( - "msg", "No action performed in check mode." - ) + "msg": "Action was successful." + if not module.check_mode + else result.get("msg", "No action performed in check mode."), } result_keys = [result_key for result_key in result if result_key not in kwargs] for result_key in result_keys: @@ -266,5 +267,5 @@ def main(): module.exit_json(**kwargs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py index 726aedf010b..3a302df1dee 100644 --- a/plugins/modules/ocapi_info.py +++ b/plugins/modules/ocapi_info.py @@ -144,9 +144,7 @@ from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Jobs": ["JobStatus"] -} +CATEGORY_COMMANDS_ALL = {"Jobs": ["JobStatus"]} def main(): @@ -154,28 +152,25 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='str'), - job_name=dict(type='str'), - baseuri=dict(required=True, type='str'), - proxy_slot_number=dict(type='int'), + command=dict(required=True, type="str"), + job_name=dict(type="str"), + baseuri=dict(required=True, type="str"), + proxy_slot_number=dict(type="int"), username=dict(required=True), password=dict(required=True, no_log=True), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ), - supports_check_mode=True + supports_check_mode=True, ) - category = module.params['category'] - command = module.params['command'] + category = module.params["category"] + command = module.params["command"] # admin credentials used for authentication - creds = { - 'user': module.params['username'], - 'pswd': module.params['password'] - } + creds = {"user": module.params["username"], "pswd": module.params["password"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] base_uri = f"https://{module.params['baseuri']}" proxy_slot_number = module.params.get("proxy_slot_number") @@ -183,33 +178,36 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that the command is valid if command not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{command}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Jobs": if command == "JobStatus": if module.params.get("job_name") is None: - module.fail_json(msg=to_native( - "job_name required for JobStatus command.")) + module.fail_json(msg=to_native("job_name required for JobStatus command.")) job_uri = urljoin(base_uri, f"Jobs/{module.params['job_name']}") result = ocapi_utils.get_job_status(job_uri) - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) else: - del result['ret'] + del result["ret"] changed = False - session = result.get('session', dict()) + session = result.get("session", dict()) kwargs = { "changed": changed, "session": session, - "msg": "Action was successful." if not module.check_mode else result.get( - "msg", "No action performed in check mode." - ) + "msg": "Action was successful." + if not module.check_mode + else result.get("msg", "No action performed in check mode."), } result_keys = [result_key for result_key in result if result_key not in kwargs] for result_key in result_keys: @@ -217,5 +215,5 @@ def main(): module.exit_json(**kwargs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py index ef7d7c49947..e9e3cb14d11 100644 --- a/plugins/modules/oci_vcn.py +++ b/plugins/modules/oci_vcn.py @@ -159,9 +159,7 @@ def create_vcn(virtual_network_client, module): def main(): - module_args = oci_utils.get_taggable_arg_spec( - supports_create=True, supports_wait=True - ) + module_args = oci_utils.get_taggable_arg_spec(supports_create=True, supports_wait=True) module_args.update( dict( cidr_block=dict(type="str"), @@ -182,9 +180,7 @@ def main(): if not HAS_OCI_PY_SDK: module.fail_json(msg=missing_required_lib("oci")) - virtual_network_client = oci_utils.create_service_client( - module, VirtualNetworkClient - ) + virtual_network_client = oci_utils.create_service_client(module, VirtualNetworkClient) exclude_attributes = {"display_name": True, "dns_label": True} state = module.params["state"] @@ -194,9 +190,7 @@ def main(): if vcn_id is not None: result = delete_vcn(virtual_network_client, module) else: - module.fail_json( - msg="Specify vcn_id with state as 'absent' to delete a VCN." - ) + module.fail_json(msg="Specify vcn_id with state as 'absent' to delete a VCN.") else: if vcn_id is not None: diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py index 669b052f028..d3558ccec79 100644 --- a/plugins/modules/odbc.py +++ b/plugins/modules/odbc.py @@ -87,6 +87,7 @@ HAS_PYODBC = None try: import pyodbc + HAS_PYODBC = True except ImportError as e: HAS_PYODBC = False @@ -95,27 +96,27 @@ def main(): module = AnsibleModule( argument_spec=dict( - dsn=dict(type='str', required=True, no_log=True), - query=dict(type='str', required=True), - params=dict(type='list', elements='str'), - commit=dict(type='bool', default=True), + dsn=dict(type="str", required=True, no_log=True), + query=dict(type="str", required=True), + params=dict(type="list", elements="str"), + commit=dict(type="bool", default=True), ), ) - dsn = module.params.get('dsn') - query = module.params.get('query') - params = module.params.get('params') - commit = module.params.get('commit') + dsn = module.params.get("dsn") + query = module.params.get("query") + params = module.params.get("params") + commit = module.params.get("commit") if not HAS_PYODBC: - module.fail_json(msg=missing_required_lib('pyodbc')) + module.fail_json(msg=missing_required_lib("pyodbc")) # Try to make a connection with the DSN connection = None try: connection = pyodbc.connect(dsn) except Exception as e: - module.fail_json(msg=f'Failed to connect to DSN: {e}') + module.fail_json(msg=f"Failed to connect to DSN: {e}") result = dict( changed=True, @@ -137,21 +138,21 @@ def main(): # Get the rows out into an 2d array for row in cursor.fetchall(): new_row = [f"{column}" for column in row] - result['results'].append(new_row) + result["results"].append(new_row) # Return additional information from the cursor for row_description in cursor.description: description = {} - description['name'] = row_description[0] - description['type'] = row_description[1].__name__ - description['display_size'] = row_description[2] - description['internal_size'] = row_description[3] - description['precision'] = row_description[4] - description['scale'] = row_description[5] - description['nullable'] = row_description[6] - result['description'].append(description) - - result['row_count'] = cursor.rowcount + description["name"] = row_description[0] + description["type"] = row_description[1].__name__ + description["display_size"] = row_description[2] + description["internal_size"] = row_description[3] + description["precision"] = row_description[4] + description["scale"] = row_description[5] + description["nullable"] = row_description[6] + result["description"].append(description) + + result["row_count"] = cursor.rowcount except pyodbc.ProgrammingError as pe: pass except Exception as e: @@ -166,5 +167,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py index 27f85e531df..a08e91c5527 100644 --- a/plugins/modules/office_365_connector_card.py +++ b/plugins/modules/office_365_connector_card.py @@ -172,125 +172,110 @@ def build_sections(sections): def build_section(section): section_payload = dict() - if 'title' in section: - section_payload['title'] = section['title'] + if "title" in section: + section_payload["title"] = section["title"] - if 'start_group' in section: - section_payload['startGroup'] = section['start_group'] + if "start_group" in section: + section_payload["startGroup"] = section["start_group"] - if 'activity_image' in section: - section_payload['activityImage'] = section['activity_image'] + if "activity_image" in section: + section_payload["activityImage"] = section["activity_image"] - if 'activity_title' in section: - section_payload['activityTitle'] = section['activity_title'] + if "activity_title" in section: + section_payload["activityTitle"] = section["activity_title"] - if 'activity_subtitle' in section: - section_payload['activitySubtitle'] = section['activity_subtitle'] + if "activity_subtitle" in section: + section_payload["activitySubtitle"] = section["activity_subtitle"] - if 'activity_text' in section: - section_payload['activityText'] = section['activity_text'] + if "activity_text" in section: + section_payload["activityText"] = section["activity_text"] - if 'hero_image' in section: - section_payload['heroImage'] = section['hero_image'] + if "hero_image" in section: + section_payload["heroImage"] = section["hero_image"] - if 'text' in section: - section_payload['text'] = section['text'] + if "text" in section: + section_payload["text"] = section["text"] - if 'facts' in section: - section_payload['facts'] = section['facts'] + if "facts" in section: + section_payload["facts"] = section["facts"] - if 'images' in section: - section_payload['images'] = section['images'] + if "images" in section: + section_payload["images"] = section["images"] - if 'actions' in section: - section_payload['potentialAction'] = build_actions(section['actions']) + if "actions" in section: + section_payload["potentialAction"] = build_actions(section["actions"]) return section_payload -def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): +def build_payload_for_connector_card( + module, summary=None, color=None, title=None, text=None, actions=None, sections=None +): payload = dict() - payload['@context'] = OFFICE_365_CARD_CONTEXT - payload['@type'] = OFFICE_365_CARD_TYPE + payload["@context"] = OFFICE_365_CARD_CONTEXT + payload["@type"] = OFFICE_365_CARD_TYPE if summary is not None: - payload['summary'] = summary + payload["summary"] = summary if color is not None: - payload['themeColor'] = color + payload["themeColor"] = color if title is not None: - payload['title'] = title + payload["title"] = title if text is not None: - payload['text'] = text + payload["text"] = text if actions: - payload['potentialAction'] = build_actions(actions) + payload["potentialAction"] = build_actions(actions) if sections: - payload['sections'] = build_sections(sections) + payload["sections"] = build_sections(sections) payload = module.jsonify(payload) return payload def do_notify_connector_card_webhook(module, webhook, payload): - headers = { - 'Content-Type': 'application/json' - } - - response, info = fetch_url( - module=module, - url=webhook, - headers=headers, - method='POST', - data=payload - ) + headers = {"Content-Type": "application/json"} + + response, info = fetch_url(module=module, url=webhook, headers=headers, method="POST", data=payload) - if info['status'] == 200: + if info["status"] == 200: module.exit_json(changed=True) - elif info['status'] == 400 and module.check_mode: - if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: + elif info["status"] == 400 and module.check_mode: + if info["body"] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: module.exit_json(changed=True) else: module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) else: - module.fail_json( - msg=f"failed to send {payload} as a connector card to Incoming Webhook: {info['msg']}" - ) + module.fail_json(msg=f"failed to send {payload} as a connector card to Incoming Webhook: {info['msg']}") def main(): module = AnsibleModule( argument_spec=dict( webhook=dict(required=True, no_log=True), - summary=dict(type='str'), - color=dict(type='str'), - title=dict(type='str'), - text=dict(type='str'), - actions=dict(type='list', elements='dict'), - sections=dict(type='list', elements='dict') + summary=dict(type="str"), + color=dict(type="str"), + title=dict(type="str"), + text=dict(type="str"), + actions=dict(type="list", elements="dict"), + sections=dict(type="list", elements="dict"), ), - supports_check_mode=True + supports_check_mode=True, ) - webhook = module.params['webhook'] - summary = module.params['summary'] - color = module.params['color'] - title = module.params['title'] - text = module.params['text'] - actions = module.params['actions'] - sections = module.params['sections'] - - payload = build_payload_for_connector_card( - module, - summary, - color, - title, - text, - actions, - sections) + webhook = module.params["webhook"] + summary = module.params["summary"] + color = module.params["color"] + title = module.params["title"] + text = module.params["text"] + actions = module.params["actions"] + sections = module.params["sections"] + + payload = build_payload_for_connector_card(module, summary, color, title, text, actions, sections) if module.check_mode: # In check mode, send an empty payload to validate connection @@ -300,5 +285,5 @@ def main(): do_notify_connector_card_webhook(module, webhook, payload) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py index 6d30a062303..9e174aa45e9 100644 --- a/plugins/modules/ohai.py +++ b/plugins/modules/ohai.py @@ -38,13 +38,11 @@ def main(): - module = AnsibleModule( - argument_spec=dict() - ) + module = AnsibleModule(argument_spec=dict()) cmd = ["/usr/bin/env", "ohai"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(**json.loads(out)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/omapi_host.py b/plugins/modules/omapi_host.py index a03a25ec0dc..3ad9d0c2121 100644 --- a/plugins/modules/omapi_host.py +++ b/plugins/modules/omapi_host.py @@ -138,6 +138,7 @@ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE + pureomapi_found = True except ImportError: PUREOMAPI_IMP_ERR = traceback.format_exc() @@ -155,19 +156,25 @@ def __init__(self, module): def connect(self): try: - self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']), - self.module.params['key']) + self.omapi = Omapi( + self.module.params["host"], + self.module.params["port"], + to_bytes(self.module.params["key_name"]), + self.module.params["key"], + ) except binascii.Error: self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") except OmapiError as e: - self.module.fail_json(msg=f"Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' are valid. Exception was: {e}") + self.module.fail_json( + msg=f"Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' are valid. Exception was: {e}" + ) except socket.error as e: self.module.fail_json(msg=f"Unable to connect to OMAPI server: {e}") def get_host(self, macaddr): - msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) - msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) - msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) + msg = OmapiMessage.open(to_bytes("host", errors="surrogate_or_strict")) + msg.obj.append((to_bytes("hardware-address", errors="surrogate_or_strict"), pack_mac(macaddr))) + msg.obj.append((to_bytes("hardware-type", errors="surrogate_or_strict"), struct.pack("!I", 1))) response = self.omapi.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: return None @@ -179,53 +186,56 @@ def unpack_facts(obj): for k, v in dict(obj).items(): result[to_text(k)] = v - if 'hardware-address' in result: - result['hardware-address'] = to_native(unpack_mac(result['hardware-address'])) + if "hardware-address" in result: + result["hardware-address"] = to_native(unpack_mac(result["hardware-address"])) - if 'ip-address' in result: - result['ip-address'] = to_native(unpack_ip(result['ip-address'])) + if "ip-address" in result: + result["ip-address"] = to_native(unpack_ip(result["ip-address"])) - if 'hardware-type' in result: - result['hardware-type'] = struct.unpack("!I", result['hardware-type']) + if "hardware-type" in result: + result["hardware-type"] = struct.unpack("!I", result["hardware-type"]) return result def setup_host(self): - if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: + if self.module.params["hostname"] is None or len(self.module.params["hostname"]) == 0: self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") msg = None - host_response = self.get_host(self.module.params['macaddr']) + host_response = self.get_host(self.module.params["macaddr"]) # If host was not found using macaddr, add create message if host_response is None: - msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) - msg.message.append((to_bytes('create'), struct.pack('!I', 1))) - msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr']))) - msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1))) - msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname']))) - if self.module.params['ip'] is not None: - msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) + msg = OmapiMessage.open(to_bytes("host", errors="surrogate_or_strict")) + msg.message.append((to_bytes("create"), struct.pack("!I", 1))) + msg.message.append((to_bytes("exclusive"), struct.pack("!I", 1))) + msg.obj.append((to_bytes("hardware-address"), pack_mac(self.module.params["macaddr"]))) + msg.obj.append((to_bytes("hardware-type"), struct.pack("!I", 1))) + msg.obj.append((to_bytes("name"), to_bytes(self.module.params["hostname"]))) + if self.module.params["ip"] is not None: + msg.obj.append( + (to_bytes("ip-address", errors="surrogate_or_strict"), pack_ip(self.module.params["ip"])) + ) stmt_join = "" - if self.module.params['ddns']: - stmt_join += f"ddns-hostname \"{self.module.params['hostname']}\"; " + if self.module.params["ddns"]: + stmt_join += f'ddns-hostname "{self.module.params["hostname"]}"; ' try: - if len(self.module.params['statements']) > 0: - stmt_join += "; ".join(self.module.params['statements']) + if len(self.module.params["statements"]) > 0: + stmt_join += "; ".join(self.module.params["statements"]) stmt_join += "; " except TypeError as e: self.module.fail_json(msg=f"Invalid statements found: {e}") if len(stmt_join) > 0: - msg.obj.append((to_bytes('statements'), to_bytes(stmt_join))) + msg.obj.append((to_bytes("statements"), to_bytes(stmt_join))) try: response = self.omapi.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: - self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " - "are valid.") + self.module.fail_json( + msg="Failed to add host, ensure authentication and host parameters are valid." + ) self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) except OmapiError as e: self.module.fail_json(msg=f"OMAPI error: {e}") @@ -234,14 +244,17 @@ def setup_host(self): response_obj = self.unpack_facts(host_response.obj) fields_to_update = {} - if 'ip-address' not in response_obj or \ - response_obj['ip-address'] != self.module.params['ip']: - fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) + if "ip-address" not in response_obj or response_obj["ip-address"] != self.module.params["ip"]: + fields_to_update["ip-address"] = pack_ip(self.module.params["ip"]) # Name cannot be changed - if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: - self.module.fail_json(msg=(f"Changing hostname is not supported. Old was {response_obj['name']}, " - f"new is {self.module.params['hostname']}. Please delete host and add new.")) + if "name" not in response_obj or response_obj["name"] != self.module.params["hostname"]: + self.module.fail_json( + msg=( + f"Changing hostname is not supported. Old was {response_obj['name']}, " + f"new is {self.module.params['hostname']}. Please delete host and add new." + ) + ) """ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. @@ -260,15 +273,16 @@ def setup_host(self): try: response = self.omapi.query_server(msg) if response.opcode != OMAPI_OP_STATUS: - self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " - "are valid.") + self.module.fail_json( + msg="Failed to modify host, ensure authentication and host parameters are valid." + ) self.module.exit_json(changed=True) except OmapiError as e: self.module.fail_json(msg=f"OMAPI error: {e}") def remove_host(self): try: - self.omapi.del_host(self.module.params['macaddr']) + self.omapi.del_host(self.module.params["macaddr"]) self.module.exit_json(changed=True) except OmapiErrorNotFound: self.module.exit_json() @@ -279,38 +293,38 @@ def remove_host(self): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', required=True, choices=['absent', 'present']), - host=dict(type='str', default="localhost"), - port=dict(type='int', default=7911), - key_name=dict(type='str', required=True), - key=dict(type='str', required=True, no_log=True), - macaddr=dict(type='str', required=True), - hostname=dict(type='str', aliases=['name']), - ip=dict(type='str'), - ddns=dict(type='bool', default=False), - statements=dict(type='list', elements='str', default=[]), + state=dict(type="str", required=True, choices=["absent", "present"]), + host=dict(type="str", default="localhost"), + port=dict(type="int", default=7911), + key_name=dict(type="str", required=True), + key=dict(type="str", required=True, no_log=True), + macaddr=dict(type="str", required=True), + hostname=dict(type="str", aliases=["name"]), + ip=dict(type="str"), + ddns=dict(type="bool", default=False), + statements=dict(type="list", elements="str", default=[]), ), supports_check_mode=False, ) if not pureomapi_found: - module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) + module.fail_json(msg=missing_required_lib("pypureomapi"), exception=PUREOMAPI_IMP_ERR) - if module.params['key'] is None or len(module.params["key"]) == 0: + if module.params["key"] is None or len(module.params["key"]) == 0: module.fail_json(msg="'key' parameter cannot be empty.") - if module.params['key_name'] is None or len(module.params["key_name"]) == 0: + if module.params["key_name"] is None or len(module.params["key_name"]) == 0: module.fail_json(msg="'key_name' parameter cannot be empty.") host_manager = OmapiHostManager(module) try: - if module.params['state'] == 'present': + if module.params["state"] == "present": host_manager.setup_host() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": host_manager.remove_host() except ValueError as e: module.fail_json(msg=f"OMAPI input value error: {e}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py index d8ebfe6b662..a9682172f69 100644 --- a/plugins/modules/one_host.py +++ b/plugins/modules/one_host.py @@ -126,23 +126,19 @@ class HostModule(OpenNebulaModule): - def __init__(self): - argument_spec = dict( - name=dict(type='str', required=True), - state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), - im_mad_name=dict(type='str', default="kvm"), - vmm_mad_name=dict(type='str', default="kvm"), - cluster_id=dict(type='int', default=0), - cluster_name=dict(type='str'), - labels=dict(type='list', elements='str'), - template=dict(type='dict', aliases=['attributes']), + name=dict(type="str", required=True), + state=dict(choices=["present", "absent", "enabled", "disabled", "offline"], default="present"), + im_mad_name=dict(type="str", default="kvm"), + vmm_mad_name=dict(type="str", default="kvm"), + cluster_id=dict(type="int", default=0), + cluster_name=dict(type="str"), + labels=dict(type="list", elements="str"), + template=dict(type="dict", aliases=["attributes"]), ) - mutually_exclusive = [ - ['cluster_id', 'cluster_name'] - ] + mutually_exclusive = [["cluster_id", "cluster_name"]] OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) @@ -154,11 +150,13 @@ def allocate_host(self): """ try: - self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')) - self.result['changed'] = True + self.one.host.allocate( + self.get_parameter("name"), + self.get_parameter("vmm_mad_name"), + self.get_parameter("im_mad_name"), + self.get_parameter("cluster_id"), + ) + self.result["changed"] = True except Exception as e: self.fail(msg=f"Could not allocate host, ERROR: {e}") @@ -172,19 +170,21 @@ def wait_for_host_state(self, host, target_states): target_states: """ - return self.wait_for_state('host', - lambda: self.one.host.info(host.ID).STATE, - lambda s: HOST_STATES(s).name, target_states, - invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) + return self.wait_for_state( + "host", + lambda: self.one.host.info(host.ID).STATE, + lambda s: HOST_STATES(s).name, + target_states, + invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR], + ) def run(self, one, module, result): - # Get the list of hosts host_name = self.get_parameter("name") host = self.get_host_by_name(host_name) # manage host state - desired_state = self.get_parameter('state') + desired_state = self.get_parameter("state") if bool(host): current_state = host.STATE current_state_name = HOST_STATES(host.STATE).name @@ -193,7 +193,7 @@ def run(self, one, module, result): current_state_name = "ABSENT" # apply properties - if desired_state == 'present': + if desired_state == "present": if current_state == HOST_ABSENT: self.allocate_host() host = self.get_host_by_name(host_name) @@ -201,7 +201,7 @@ def run(self, one, module, result): elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: self.fail(msg=f"invalid host state {current_state_name}") - elif desired_state == 'enabled': + elif desired_state == "enabled": if current_state == HOST_ABSENT: self.allocate_host() host = self.get_host_by_name(host_name) @@ -209,7 +209,7 @@ def run(self, one, module, result): elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: if one.host.status(host.ID, HOST_STATUS.ENABLED): self.wait_for_host_state(host, [HOST_STATES.MONITORED]) - result['changed'] = True + result["changed"] = True else: self.fail(msg="could not enable host") elif current_state in [HOST_STATES.MONITORED]: @@ -217,14 +217,14 @@ def run(self, one, module, result): else: self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to enable") - elif desired_state == 'disabled': + elif desired_state == "disabled": if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be put in disabled state') + self.fail(msg="absent host cannot be put in disabled state") elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: # returns host ID integer try: one.host.status(host.ID, HOST_STATUS.DISABLED) - result['changed'] = True + result["changed"] = True except Exception as e: self.fail(msg=f"Could not disable host, ERROR: {e}") self.wait_for_host_state(host, [HOST_STATES.DISABLED]) @@ -233,14 +233,14 @@ def run(self, one, module, result): else: self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to disable") - elif desired_state == 'offline': + elif desired_state == "offline": if current_state == HOST_ABSENT: - self.fail(msg='absent host cannot be placed in offline state') + self.fail(msg="absent host cannot be placed in offline state") elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: # returns host ID integer try: one.host.status(host.ID, HOST_STATUS.OFFLINE) - result['changed'] = True + result["changed"] = True except Exception as e: self.fail(msg=f"Could not set host offline, ERROR: {e}") self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) @@ -249,12 +249,12 @@ def run(self, one, module, result): else: self.fail(msg=f"unknown host state {current_state_name}, cowardly refusing to change state to offline") - elif desired_state == 'absent': + elif desired_state == "absent": if current_state != HOST_ABSENT: # returns host ID integer try: one.host.delete(host.ID) - result['changed'] = True + result["changed"] = True except Exception as e: self.fail(msg=f"Could not delete host from cluster, ERROR: {e}") @@ -262,14 +262,14 @@ def run(self, one, module, result): if desired_state != "absent": # manipulate or modify the template - desired_template_changes = self.get_parameter('template') + desired_template_changes = self.get_parameter("template") if desired_template_changes is None: desired_template_changes = dict() # complete the template with specific ansible parameters - if self.is_parameter('labels'): - desired_template_changes['LABELS'] = self.get_parameter('labels') + if self.is_parameter("labels"): + desired_template_changes["LABELS"] = self.get_parameter("labels") if self.requires_template_update(host.TEMPLATE, desired_template_changes): # setup the root element so that pyone will generate XML instead of attribute vector @@ -277,16 +277,16 @@ def run(self, one, module, result): # merge the template, returns host ID integer try: one.host.update(host.ID, desired_template_changes, 1) - result['changed'] = True + result["changed"] = True except Exception as e: self.fail(msg=f"Failed to update the host template, ERROR: {e}") # the cluster - if host.CLUSTER_ID != self.get_parameter('cluster_id'): + if host.CLUSTER_ID != self.get_parameter("cluster_id"): # returns cluster id in int try: - one.cluster.addhost(self.get_parameter('cluster_id'), host.ID) - result['changed'] = True + one.cluster.addhost(self.get_parameter("cluster_id"), host.ID) + result["changed"] = True except Exception as e: self.fail(msg=f"Failed to update the host cluster, ERROR: {e}") @@ -298,5 +298,5 @@ def main(): HostModule().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py index b9ec5699322..8c819a9d964 100644 --- a/plugins/modules/one_image.py +++ b/plugins/modules/one_image.py @@ -371,54 +371,68 @@ from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] +IMAGE_STATES = [ + "INIT", + "READY", + "USED", + "DISABLED", + "LOCKED", + "ERROR", + "CLONE", + "DELETE", + "USED_PERS", + "LOCKED_USED", + "LOCKED_USED_PERS", +] class ImageModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int'), - name=dict(type='str'), - state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), - enabled=dict(type='bool'), - new_name=dict(type='str'), - persistent=dict(type='bool'), - create=dict(type='bool'), - template=dict(type='str'), - datastore_id=dict(type='int'), - wait_timeout=dict(type='int', default=60), + id=dict(type="int"), + name=dict(type="str"), + state=dict(type="str", choices=["present", "absent", "cloned", "renamed"], default="present"), + enabled=dict(type="bool"), + new_name=dict(type="str"), + persistent=dict(type="bool"), + create=dict(type="bool"), + template=dict(type="str"), + datastore_id=dict(type="int"), + wait_timeout=dict(type="int", default=60), ) required_if = [ - ['state', 'renamed', ['id']], - ['create', True, ['template', 'datastore_id', 'name']], + ["state", "renamed", ["id"]], + ["create", True, ["template", "datastore_id", "name"]], ] mutually_exclusive = [ - ['id', 'name'], + ["id", "name"], ] - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_if=required_if) + OpenNebulaModule.__init__( + self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + ) def run(self, one, module, result): params = module.params - id = params.get('id') - name = params.get('name') - desired_state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - persistent = params.get('persistent') - create = params.get('create') - template = params.get('template') - datastore_id = params.get('datastore_id') - wait_timeout = params.get('wait_timeout') + id = params.get("id") + name = params.get("name") + desired_state = params.get("state") + enabled = params.get("enabled") + new_name = params.get("new_name") + persistent = params.get("persistent") + create = params.get("create") + template = params.get("template") + datastore_id = params.get("datastore_id") + wait_timeout = params.get("wait_timeout") self.result = {} image = self.get_image_instance(id, name) - if not image and desired_state != 'absent': + if not image and desired_state != "absent": if create: self.result = self.create_image(name, template, datastore_id, wait_timeout) # Using 'if id:' doesn't work properly when id=0 @@ -427,7 +441,7 @@ def run(self, one, module, result): elif name is not None: module.fail_json(msg=f"There is no image with name={name}") - if desired_state == 'absent': + if desired_state == "absent": self.result = self.delete_image(image, wait_timeout) else: if persistent is not None: @@ -471,21 +485,22 @@ def create_image(self, image_name, template, datastore_id, wait_timeout): image = self.get_image_by_id(image_id) result = self.get_image_info(image) - result['changed'] = True + result["changed"] = True return result def wait_for_ready(self, image_id, wait_timeout=60): import time + start_time = time.time() while (time.time() - start_time) < wait_timeout: image = self.one.image.info(image_id) state = image.STATE - if state in [IMAGE_STATES.index('ERROR')]: + if state in [IMAGE_STATES.index("ERROR")]: self.module.fail_json(msg=f"Got an ERROR state: {image.TEMPLATE['ERROR']}") - if state in [IMAGE_STATES.index('READY')]: + if state in [IMAGE_STATES.index("READY")]: return True time.sleep(1) @@ -493,6 +508,7 @@ def wait_for_ready(self, image_id, wait_timeout=60): def wait_for_delete(self, image_id, wait_timeout=60): import time + start_time = time.time() while (time.time() - start_time) < wait_timeout: @@ -506,7 +522,7 @@ def wait_for_delete(self, image_id, wait_timeout=60): state = image.STATE - if state in [IMAGE_STATES.index('DELETE')]: + if state in [IMAGE_STATES.index("DELETE")]: return True time.sleep(1) @@ -519,21 +535,22 @@ def enable_image(self, image, enable): state = image.STATE - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if state not in [IMAGE_STATES.index("READY"), IMAGE_STATES.index("DISABLED"), IMAGE_STATES.index("ERROR")]: if enable: self.module.fail_json(msg=f"Cannot enable {IMAGE_STATES[state]} image!") else: self.module.fail_json(msg=f"Cannot disable {IMAGE_STATES[state]} image!") - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): + if (enable and state != IMAGE_STATES.index("READY")) or ( + not enable and state != IMAGE_STATES.index("DISABLED") + ): changed = True if changed and not self.module.check_mode: self.one.image.enable(image.ID, enable) result = self.get_image_info(image) - result['changed'] = changed + result["changed"] = changed return result @@ -543,21 +560,22 @@ def change_persistence(self, image, enable): state = image.STATE - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if state not in [IMAGE_STATES.index("READY"), IMAGE_STATES.index("DISABLED"), IMAGE_STATES.index("ERROR")]: if enable: self.module.fail_json(msg=f"Cannot enable persistence for {IMAGE_STATES[state]} image!") else: self.module.fail_json(msg=f"Cannot disable persistence for {IMAGE_STATES[state]} image!") - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): + if (enable and state != IMAGE_STATES.index("READY")) or ( + not enable and state != IMAGE_STATES.index("DISABLED") + ): changed = True if changed and not self.module.check_mode: self.one.image.persistent(image.ID, enable) result = self.get_image_info(image) - result['changed'] = changed + result["changed"] = changed return result @@ -568,10 +586,10 @@ def clone_image(self, image, new_name, wait_timeout): tmp_image = self.get_image_by_name(new_name) if tmp_image: result = self.get_image_info(image) - result['changed'] = False + result["changed"] = False return result - if image.STATE == IMAGE_STATES.index('DISABLED'): + if image.STATE == IMAGE_STATES.index("DISABLED"): self.module.fail_json(msg="Cannot clone DISABLED image") if not self.module.check_mode: @@ -580,7 +598,7 @@ def clone_image(self, image, new_name, wait_timeout): image = self.one.image.info(new_id) result = self.get_image_info(image) - result['changed'] = True + result["changed"] = True return result @@ -590,7 +608,7 @@ def rename_image(self, image, new_name): if new_name == image.NAME: result = self.get_image_info(image) - result['changed'] = False + result["changed"] = False return result tmp_image = self.get_image_by_name(new_name) @@ -601,12 +619,12 @@ def rename_image(self, image, new_name): self.one.image.rename(image.ID, new_name) result = self.get_image_info(image) - result['changed'] = True + result["changed"] = True return result def delete_image(self, image, wait_timeout): if not image: - return {'changed': False} + return {"changed": False} if image.RUNNING_VMS > 0: self.module.fail_json(msg=f"Cannot delete image. There are {image.RUNNING_VMS!s} VMs using it.") @@ -615,12 +633,12 @@ def delete_image(self, image, wait_timeout): self.one.image.delete(image.ID) self.wait_for_delete(image.ID, wait_timeout) - return {'changed': True} + return {"changed": True} def main(): ImageModule().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py index 81022d81f73..70dca77a63c 100644 --- a/plugins/modules/one_image_info.py +++ b/plugins/modules/one_image_info.py @@ -273,28 +273,37 @@ from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] +IMAGE_STATES = [ + "INIT", + "READY", + "USED", + "DISABLED", + "LOCKED", + "ERROR", + "CLONE", + "DELETE", + "USED_PERS", + "LOCKED_USED", + "LOCKED_USED_PERS", +] class ImageInfoModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - ids=dict(type='list', aliases=['id'], elements='str'), - name=dict(type='str'), + ids=dict(type="list", aliases=["id"], elements="str"), + name=dict(type="str"), ) mutually_exclusive = [ - ['ids', 'name'], + ["ids", "name"], ] - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive) + OpenNebulaModule.__init__(self, argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) def run(self, one, module, result): params = module.params - ids = params.get('ids') - name = params.get('name') + ids = params.get("ids") + name = params.get("name") if ids: images = self.get_images_by_ids(ids) @@ -303,9 +312,7 @@ def run(self, one, module, result): else: images = self.get_all_images().IMAGE - self.result = { - 'images': [self.get_image_info(image) for image in images] - } + self.result = {"images": [self.get_image_info(image) for image in images]} self.exit() @@ -337,9 +344,10 @@ def get_images_by_name(self, name_pattern): pool = self.get_all_images() - if name_pattern.startswith('~'): + if name_pattern.startswith("~"): import re - if name_pattern[1] == '*': + + if name_pattern[1] == "*": pattern = re.compile(name_pattern[2:], re.IGNORECASE) else: pattern = re.compile(name_pattern[1:]) @@ -363,5 +371,5 @@ def main(): ImageInfoModule().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py index aebd55b87b2..e52e10a9c34 100644 --- a/plugins/modules/one_service.py +++ b/plugins/modules/one_service.py @@ -234,13 +234,30 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url -STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", - "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") +STATES = ( + "PENDING", + "DEPLOYING", + "RUNNING", + "UNDEPLOYING", + "WARNING", + "DONE", + "FAILED_UNDEPLOYING", + "FAILED_DEPLOYING", + "SCALING", + "FAILED_SCALING", + "COOLDOWN", +) def get_all_templates(module, auth): try: - all_templates = open_url(url=f"{auth.url}/service_template", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + all_templates = open_url( + url=f"{auth.url}/service_template", + method="GET", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + ) except Exception as e: module.fail_json(msg=str(e)) @@ -252,7 +269,7 @@ def get_template(module, auth, pred): found = 0 found_template = None - template_name = '' + template_name = "" if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: @@ -271,7 +288,13 @@ def get_template(module, auth, pred): def get_all_services(module, auth): try: - response = open_url(f"{auth.url}/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + response = open_url( + f"{auth.url}/service", + method="GET", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + ) except Exception as e: module.fail_json(msg=str(e)) @@ -283,7 +306,7 @@ def get_service(module, auth, pred): found = 0 found_service = None - service_name = '' + service_name = "" if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: @@ -294,7 +317,9 @@ def get_service(module, auth, pred): # fail if there are more services with same name if found > 1: - module.fail_json(msg=f"There are multiple services with a name: '{service_name}'. You have to use a unique service name or use 'service_id' instead.") + module.fail_json( + msg=f"There are multiple services with a name: '{service_name}'. You have to use a unique service name or use 'service_id' instead." + ) elif found <= 0: return None else: @@ -310,7 +335,6 @@ def get_service_by_name(module, auth, service_name): def get_service_info(module, auth, service): - result = { "service_id": int(service["ID"]), "service_name": service["NAME"], @@ -318,7 +342,7 @@ def get_service_info(module, auth, service): "group_name": service["GNAME"], "owner_id": int(service["UID"]), "owner_name": service["UNAME"], - "state": STATES[service["TEMPLATE"]["BODY"]["state"]] + "state": STATES[service["TEMPLATE"]["BODY"]["state"]], } roles_status = service["TEMPLATE"]["BODY"]["roles"] @@ -328,7 +352,14 @@ def get_service_info(module, auth, service): if "nodes" in role: for node in role["nodes"]: nodes_ids.append(node["deploy_id"]) - roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) + roles.append( + { + "name": role["name"], + "cardinality": role["cardinality"], + "state": STATES[int(role["state"])], + "ids": nodes_ids, + } + ) result["roles"] = roles result["mode"] = int(parse_service_permissions(service)) @@ -343,18 +374,19 @@ def create_service(module, auth, template_id, service_name, custom_attrs, unique data = { "action": { "perform": "instantiate", - "params": { - "merge_template": { - "custom_attrs_values": custom_attrs_with_str, - "name": service_name - } - } + "params": {"merge_template": {"custom_attrs_values": custom_attrs_with_str, "name": service_name}}, } } try: - response = open_url(f"{auth.url}/service_template/{template_id!s}/action", method="POST", - data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) + response = open_url( + f"{auth.url}/service_template/{template_id!s}/action", + method="POST", + data=module.jsonify(data), + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + ) except Exception as e: module.fail_json(msg=str(e)) @@ -365,12 +397,18 @@ def create_service(module, auth, template_id, service_name, custom_attrs, unique def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): import time + start_time = time.time() while (time.time() - start_time) < wait_timeout: try: - status_result = open_url(f"{auth.url}/service/{service_id!s}", method="GET", - force_basic_auth=True, url_username=auth.user, url_password=auth.password) + status_result = open_url( + f"{auth.url}/service/{service_id!s}", + method="GET", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + ) except Exception as e: module.fail_json(msg=f"Request for service status has failed. Error message: {e!s}") @@ -380,13 +418,15 @@ def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: return status_result["DOCUMENT"] elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: - log_message = '' + log_message = "" for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: if log_info["severity"] == "E": log_message = log_message + log_info["message"] break - module.fail_json(msg=f"Deploying is unsuccessful. Service state: {STATES[service_state]}. Error message: {log_message}") + module.fail_json( + msg=f"Deploying is unsuccessful. Service state: {STATES[service_state]}. Error message: {log_message}" + ) time.sleep(1) @@ -394,62 +434,65 @@ def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): def change_service_permissions(module, auth, service_id, permissions): - - data = { - "action": { - "perform": "chmod", - "params": {"octet": permissions} - } - } + data = {"action": {"perform": "chmod", "params": {"octet": permissions}}} try: - status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + status_result = open_url( + f"{auth.url}/service/{service_id!s}/action", + method="POST", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + data=module.jsonify(data), + ) except Exception as e: module.fail_json(msg=str(e)) def change_service_owner(module, auth, service_id, owner_id): - data = { - "action": { - "perform": "chown", - "params": {"owner_id": owner_id} - } - } + data = {"action": {"perform": "chown", "params": {"owner_id": owner_id}}} try: - status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + status_result = open_url( + f"{auth.url}/service/{service_id!s}/action", + method="POST", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + data=module.jsonify(data), + ) except Exception as e: module.fail_json(msg=str(e)) def change_service_group(module, auth, service_id, group_id): - - data = { - "action": { - "perform": "chgrp", - "params": {"group_id": group_id} - } - } + data = {"action": {"perform": "chgrp", "params": {"group_id": group_id}}} try: - status_result = open_url(f"{auth.url}/service/{service_id!s}/action", method="POST", force_basic_auth=True, - url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + status_result = open_url( + f"{auth.url}/service/{service_id!s}/action", + method="POST", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + data=module.jsonify(data), + ) except Exception as e: module.fail_json(msg=str(e)) def change_role_cardinality(module, auth, service_id, role, cardinality, force): - - data = { - "cardinality": cardinality, - "force": force - } + data = {"cardinality": cardinality, "force": force} try: - status_result = open_url(f"{auth.url}/service/{service_id!s}/role/{role}", method="PUT", - force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + status_result = open_url( + f"{auth.url}/service/{service_id!s}/role/{role}", + method="PUT", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + data=module.jsonify(data), + ) except Exception as e: module.fail_json(msg=str(e)) @@ -471,7 +514,7 @@ def check_change_service_group(module, service, group_id): def parse_service_permissions(service): perm_dict = service["PERMISSIONS"] - ''' + """ This is the structure of the 'PERMISSIONS' dictionary: "PERMISSIONS": { @@ -485,7 +528,7 @@ def parse_service_permissions(service): "OTHER_M": "0", "OTHER_A": "0" } - ''' + """ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) @@ -512,9 +555,11 @@ def check_change_role_cardinality(module, service, role_name, cardinality): module.fail_json(msg=f"There is no role with name: {role_name}") -def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): +def create_service_and_operation( + module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout +): if not service_name: - service_name = '' + service_name = "" changed = False service = None @@ -530,8 +575,16 @@ def create_service_and_operation(module, auth, template_id, service_name, owner_ if module.check_mode and changed: return {"changed": True} - result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, - wait_timeout=wait_timeout, permissions=permissions, service=service) + result = service_operation( + module, + auth, + owner_id=owner_id, + group_id=group_id, + wait=wait, + wait_timeout=wait_timeout, + permissions=permissions, + service=service, + ) if result["changed"]: changed = True @@ -541,9 +594,20 @@ def create_service_and_operation(module, auth, template_id, service_name, owner_ return result -def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, - role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): - +def service_operation( + module, + auth, + service_id=None, + owner_id=None, + group_id=None, + permissions=None, + role=None, + cardinality=None, + force=None, + wait=False, + wait_timeout=None, + service=None, +): changed = False if not service: @@ -602,7 +666,13 @@ def delete_service(module, auth, service_id): return service_info try: - result = open_url(f"{auth.url}/service/{service_id!s}", method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + result = open_url( + f"{auth.url}/service/{service_id!s}", + method="DELETE", + force_basic_auth=True, + url_username=auth.user, + url_password=auth.password, + ) except Exception as e: module.fail_json(msg=f"Service deletion has failed. Error message: {e}") @@ -614,11 +684,17 @@ def get_template_by_name(module, auth, template_name): def get_template_by_id(module, auth, template_id): - return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None + return ( + get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None + ) def get_template_id(module, auth, requested_id, requested_name): - template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) + template = ( + get_template_by_id(module, auth, requested_id) + if requested_id + else get_template_by_name(module, auth, requested_name) + ) if template: return template["ID"] @@ -636,24 +712,26 @@ def get_service_id_by_name(module, auth, service_name): def get_connection_info(module): - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') + url = module.params.get("api_url") + username = module.params.get("api_username") + password = module.params.get("api_password") if not url: - url = os.environ.get('ONEFLOW_URL') + url = os.environ.get("ONEFLOW_URL") if not username: - username = os.environ.get('ONEFLOW_USERNAME') + username = os.environ.get("ONEFLOW_USERNAME") if not password: - password = os.environ.get('ONEFLOW_PASSWORD') + password = os.environ.get("ONEFLOW_PASSWORD") if not (url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + module.fail_json( + msg="One or more connection parameters (api_url, api_username, api_password) were not specified" + ) from collections import namedtuple - auth_params = namedtuple('auth', ('url', 'user', 'password')) + auth_params = namedtuple("auth", ("url", "user", "password")) return auth_params(url=url, user=username, password=password) @@ -667,11 +745,7 @@ def main(): "service_id": {"required": False, "type": "int"}, "template_name": {"required": False, "type": "str"}, "template_id": {"required": False, "type": "int"}, - "state": { - "default": "present", - "choices": ['present', 'absent'], - "type": "str" - }, + "state": {"default": "present", "choices": ["present", "absent"], "type": "str"}, "mode": {"required": False, "type": "str"}, "owner_id": {"required": False, "type": "int"}, "group_id": {"required": False, "type": "int"}, @@ -681,38 +755,40 @@ def main(): "custom_attrs": {"default": {}, "type": "dict"}, "role": {"required": False, "type": "str"}, "cardinality": {"required": False, "type": "int"}, - "force": {"default": False, "type": "bool"} + "force": {"default": False, "type": "bool"}, } - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'service_id'], - ['service_id', 'service_name'], - ['template_id', 'template_name', 'role'], - ['template_id', 'template_name', 'cardinality'], - ['service_id', 'custom_attrs'] - ], - required_together=[['role', 'cardinality']], - supports_check_mode=True) + module = AnsibleModule( + argument_spec=fields, + mutually_exclusive=[ + ["template_id", "template_name", "service_id"], + ["service_id", "service_name"], + ["template_id", "template_name", "role"], + ["template_id", "template_name", "cardinality"], + ["service_id", "custom_attrs"], + ], + required_together=[["role", "cardinality"]], + supports_check_mode=True, + ) auth = get_connection_info(module) params = module.params - service_name = params.get('service_name') - service_id = params.get('service_id') - - requested_template_id = params.get('template_id') - requested_template_name = params.get('template_name') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - unique = params.get('unique') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - custom_attrs = params.get('custom_attrs') - role = params.get('role') - cardinality = params.get('cardinality') - force = params.get('force') + service_name = params.get("service_name") + service_id = params.get("service_id") + + requested_template_id = params.get("template_id") + requested_template_name = params.get("template_name") + state = params.get("state") + permissions = params.get("mode") + owner_id = params.get("owner_id") + group_id = params.get("group_id") + unique = params.get("unique") + wait = params.get("wait") + wait_timeout = params.get("wait_timeout") + custom_attrs = params.get("custom_attrs") + role = params.get("role") + cardinality = params.get("cardinality") + force = params.get("force") template_id = None @@ -727,12 +803,23 @@ def main(): if unique and not service_name: module.fail_json(msg="You cannot use unique without passing service_name!") - if template_id and state == 'absent': + if template_id and state == "absent": module.fail_json(msg="State absent is not valid for template") - if template_id and state == 'present': # Instantiate a service - result = create_service_and_operation(module, auth, template_id, service_name, owner_id, - group_id, permissions, custom_attrs, unique, wait, wait_timeout) + if template_id and state == "present": # Instantiate a service + result = create_service_and_operation( + module, + auth, + template_id, + service_name, + owner_id, + group_id, + permissions, + custom_attrs, + unique, + wait, + wait_timeout, + ) else: if not (service_id or service_name): module.fail_json(msg="To manage the service at least the service id or service name should be specified!") @@ -742,16 +829,18 @@ def main(): if not service_id: service_id = get_service_id_by_name(module, auth, service_name) # The task should be failed when we want to manage a non-existent service identified by its name - if not service_id and state == 'present': + if not service_id and state == "present": module.fail_json(msg=f"There is no service with name: {service_name}") - if state == 'absent': + if state == "absent": result = delete_service(module, auth, service_id) else: - result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) + result = service_operation( + module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout + ) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py index 7864a813863..df031cee90f 100644 --- a/plugins/modules/one_template.py +++ b/plugins/modules/one_template.py @@ -161,49 +161,47 @@ class TemplateModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int'), - name=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str'), - filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'), + id=dict(type="int"), + name=dict(type="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), + template=dict(type="str"), + filter=dict(type="str", choices=["user_primary_group", "user", "all", "user_groups"], default="user"), ) - mutually_exclusive = [ - ['id', 'name'] - ] + mutually_exclusive = [["id", "name"]] - required_one_of = [('id', 'name')] + required_one_of = [("id", "name")] - required_if = [ - ['state', 'present', ['template']] - ] + required_if = [["state", "present", ["template"]]] - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) + OpenNebulaModule.__init__( + self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if, + ) def run(self, one, module, result): params = module.params - id = params.get('id') - name = params.get('name') - desired_state = params.get('state') - template_data = params.get('template') - filter = params.get('filter') + id = params.get("id") + name = params.get("name") + desired_state = params.get("state") + template_data = params.get("template") + filter = params.get("filter") self.result = {} template = self.get_template_instance(id, name, filter) needs_creation = False - if not template and desired_state != 'absent': + if not template and desired_state != "absent": if id: module.fail_json(msg=f"There is no template with id={id}") else: needs_creation = True - if desired_state == 'absent': + if desired_state == "absent": self.result = self.delete_template(template) else: if needs_creation: @@ -218,7 +216,7 @@ def get_template(self, predicate, filter): # Issue: https://github.com/ansible-collections/community.general/issues/9278 # PR: https://github.com/ansible-collections/community.general/pull/9547 # the other two parameters are used for pagination, -1 for both essentially means "return all" - filter_values = {'user_primary_group': -4, 'user': -3, 'all': -2, 'user_groups': -1} + filter_values = {"user_primary_group": -4, "user": -3, "all": -2, "user_groups": -1} pool = self.one.templatepool.info(filter_values[filter], -1, -1) for template in pool.VMTEMPLATE: @@ -241,13 +239,13 @@ def get_template_instance(self, requested_id, requested_name, filter): def get_template_info(self, template): info = { - 'id': template.ID, - 'name': template.NAME, - 'template': template.TEMPLATE, - 'user_name': template.UNAME, - 'user_id': template.UID, - 'group_name': template.GNAME, - 'group_id': template.GID, + "id": template.ID, + "name": template.NAME, + "template": template.TEMPLATE, + "user_name": template.UNAME, + "user_id": template.UID, + "group_name": template.GNAME, + "group_id": template.GID, } return info @@ -257,7 +255,7 @@ def create_template(self, name, template_data, filter): self.one.template.allocate(f'NAME = "{name}"\n{template_data}') result = self.get_template_info(self.get_template_by_name(name, filter)) - result['changed'] = True + result["changed"] = True return result @@ -269,26 +267,26 @@ def update_template(self, template, template_data, filter): result = self.get_template_info(self.get_template_by_id(template.ID, filter)) if self.module.check_mode: # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. - result['changed'] = True + result["changed"] = True else: # if the previous parsed template data is not equal to the updated one, this has changed - result['changed'] = template.TEMPLATE != result['template'] + result["changed"] = template.TEMPLATE != result["template"] return result def delete_template(self, template): if not template: - return {'changed': False} + return {"changed": False} if not self.module.check_mode: self.one.template.delete(template.ID) - return {'changed': True} + return {"changed": True} def main(): TemplateModule().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py index d448a48d321..d76b54d01ef 100644 --- a/plugins/modules/one_vm.py +++ b/plugins/modules/one_vm.py @@ -677,6 +677,7 @@ try: import pyone + HAS_PYONE = True except ImportError: HAS_PYONE = False @@ -694,7 +695,17 @@ UPDATECONF_ATTRIBUTES = { "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID", "FIRMWARE"], "CPU_MODEL": ["MODEL", "FEATURES"], - "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT", "VIRTIO_BLK_QUEUES", "VIRTIO_SCSI_QUEUES", "IOTHREADS"], + "FEATURES": [ + "ACPI", + "PAE", + "APIC", + "LOCALTIME", + "HYPERV", + "GUEST_AGENT", + "VIRTIO_BLK_QUEUES", + "VIRTIO_SCSI_QUEUES", + "IOTHREADS", + ], "INPUT": ["TYPE", "BUS"], "GRAPHICS": ["TYPE", "LISTEN", "PORT", "PASSWD", "KEYMAP", "COMMAND"], "VIDEO": ["ATS", "IOMMU", "RESOLUTION", "TYPE", "VRAM"], @@ -705,7 +716,7 @@ def check_updateconf(module, to_check): - '''Checks if attributes are compatible with one.vm.updateconf API call.''' + """Checks if attributes are compatible with one.vm.updateconf API call.""" for attr, subattributes in to_check.items(): if attr not in UPDATECONF_ATTRIBUTES: module.fail_json(msg=f"'{attr}' is not a valid VM attribute.") @@ -717,7 +728,7 @@ def check_updateconf(module, to_check): def parse_updateconf(vm_template): - '''Extracts 'updateconf' attributes from a VM template.''' + """Extracts 'updateconf' attributes from a VM template.""" updateconf = {} for attr, subattributes in vm_template.items(): if attr not in UPDATECONF_ATTRIBUTES: @@ -733,12 +744,11 @@ def parse_updateconf(vm_template): def get_template(module, client, predicate): - pool = client.templatepool.info(-2, -1, -1, -1) # Filter -2 means fetch all templates user can Use found = 0 found_template = None - template_name = '' + template_name = "" for template in pool.VMTEMPLATE: if predicate(template): @@ -762,7 +772,11 @@ def get_template_by_id(module, client, template_id): def get_template_id(module, client, requested_id, requested_name): - template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) + template = ( + get_template_by_id(module, client, requested_id) + if requested_id is not None + else get_template_by_name(module, client, requested_name) + ) if template: return template.ID else: @@ -773,7 +787,7 @@ def get_datastore(module, client, predicate): pool = client.datastorepool.info() found = 0 found_datastore = None - datastore_name = '' + datastore_name = "" for datastore in pool.DATASTORE: if predicate(datastore): @@ -797,7 +811,11 @@ def get_datastore_by_id(module, client, datastore_id): def get_datastore_id(module, client, requested_id, requested_name): - datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) + datastore = ( + get_datastore_by_id(module, client, requested_id) + if requested_id + else get_datastore_by_name(module, client, requested_name) + ) if datastore: return datastore.ID else: @@ -817,7 +835,7 @@ def get_vms_by_ids(module, client, state, ids): for vm_id in ids: vm = get_vm_by_id(client, vm_id) - if vm is None and state != 'absent': + if vm is None and state != "absent": module.fail_json(msg=f"There is no VM with id={vm_id}") vms.append(vm) @@ -825,49 +843,51 @@ def get_vms_by_ids(module, client, state, ids): def get_vm_info(client, vm): - vm = client.vm.info(vm.ID) networks_info = [] disk_size = [] - if 'DISK' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['DISK'], list): - for disk in vm.TEMPLATE['DISK']: + if "DISK" in vm.TEMPLATE: + if isinstance(vm.TEMPLATE["DISK"], list): + for disk in vm.TEMPLATE["DISK"]: disk_size.append(f"{disk['SIZE']} MB") else: disk_size.append(f"{vm.TEMPLATE['DISK']['SIZE']} MB") - if 'NIC' in vm.TEMPLATE: - if isinstance(vm.TEMPLATE['NIC'], list): - for nic in vm.TEMPLATE['NIC']: - networks_info.append({ - 'ip': nic.get('IP', ''), - 'mac': nic.get('MAC', ''), - 'name': nic.get('NETWORK', ''), - 'security_groups': nic.get('SECURITY_GROUPS', '') - }) + if "NIC" in vm.TEMPLATE: + if isinstance(vm.TEMPLATE["NIC"], list): + for nic in vm.TEMPLATE["NIC"]: + networks_info.append( + { + "ip": nic.get("IP", ""), + "mac": nic.get("MAC", ""), + "name": nic.get("NETWORK", ""), + "security_groups": nic.get("SECURITY_GROUPS", ""), + } + ) else: - networks_info.append({ - 'ip': vm.TEMPLATE['NIC'].get('IP', ''), - 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), - 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), - 'security_groups': - vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') - }) + networks_info.append( + { + "ip": vm.TEMPLATE["NIC"].get("IP", ""), + "mac": vm.TEMPLATE["NIC"].get("MAC", ""), + "name": vm.TEMPLATE["NIC"].get("NETWORK", ""), + "security_groups": vm.TEMPLATE["NIC"].get("SECURITY_GROUPS", ""), + } + ) import time current_time = time.localtime() vm_start_time = time.localtime(vm.STIME) vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) - vm_uptime /= (60 * 60) + vm_uptime /= 60 * 60 permissions_str = parse_vm_permissions(client, vm) # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE vm_lcm_state = None - if vm.STATE == VM_STATES.index('ACTIVE'): + if vm.STATE == VM_STATES.index("ACTIVE"): vm_lcm_state = LCM_STATES[vm.LCM_STATE] vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) @@ -875,25 +895,25 @@ def get_vm_info(client, vm): updateconf = parse_updateconf(vm.TEMPLATE) info = { - 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), - 'vm_id': vm.ID, - 'vm_name': vm.NAME, - 'state': VM_STATES[vm.STATE], - 'lcm_state': vm_lcm_state, - 'owner_name': vm.UNAME, - 'owner_id': vm.UID, - 'networks': networks_info, - 'disk_size': disk_size, - 'memory': f"{vm.TEMPLATE['MEMORY']} MB", - 'vcpu': vm.TEMPLATE['VCPU'], - 'cpu': vm.TEMPLATE['CPU'], - 'group_name': vm.GNAME, - 'group_id': vm.GID, - 'uptime_h': int(vm_uptime), - 'attributes': vm_attributes, - 'mode': permissions_str, - 'labels': vm_labels, - 'updateconf': updateconf, + "template_id": int(vm.TEMPLATE["TEMPLATE_ID"]), + "vm_id": vm.ID, + "vm_name": vm.NAME, + "state": VM_STATES[vm.STATE], + "lcm_state": vm_lcm_state, + "owner_name": vm.UNAME, + "owner_id": vm.UID, + "networks": networks_info, + "disk_size": disk_size, + "memory": f"{vm.TEMPLATE['MEMORY']} MB", + "vcpu": vm.TEMPLATE["VCPU"], + "cpu": vm.TEMPLATE["CPU"], + "group_name": vm.GNAME, + "group_id": vm.GID, + "uptime_h": int(vm_uptime), + "attributes": vm_attributes, + "mode": permissions_str, + "labels": vm_labels, + "updateconf": updateconf, } return info @@ -924,9 +944,21 @@ def set_vm_permissions(module, client, vms, permissions): mode_bits = [int(d) for d in permissions_str] try: client.vm.chmod( - vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) + vm.ID, + mode_bits[0], + mode_bits[1], + mode_bits[2], + mode_bits[3], + mode_bits[4], + mode_bits[5], + mode_bits[6], + mode_bits[7], + mode_bits[8], + ) except pyone.OneAuthorizationException: - module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") + module.fail_json( + msg="Permissions changing is unsuccessful, but instances are present if you deployed them." + ) return changed @@ -947,7 +979,9 @@ def set_vm_ownership(module, client, vms, owner_id, group_id): try: client.vm.chown(vm.ID, owner_id, group_id) except pyone.OneAuthorizationException: - module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") + module.fail_json( + msg="Ownership changing is unsuccessful, but instances are present if you deployed them." + ) return changed @@ -975,13 +1009,12 @@ def update_vms(module, client, vms, *args): def get_size_in_MB(module, size_str): - - SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] + SYMBOLS = ["B", "KB", "MB", "GB", "TB"] s = size_str init = size_str num = "" - while s and s[0:1].isdigit() or s[0:1] == '.': + while s and s[0:1].isdigit() or s[0:1] == ".": num += s[0] s = s[1:] num = float(num) @@ -990,7 +1023,7 @@ def get_size_in_MB(module, size_str): if symbol not in SYMBOLS: module.fail_json(msg=f"Cannot interpret {init!r} {symbol!r} {num}") - prefix = {'B': 1} + prefix = {"B": 1} for i, s in enumerate(SYMBOLS[1:]): prefix[s] = 1 << (i + 1) * 10 @@ -1001,13 +1034,24 @@ def get_size_in_MB(module, size_str): return size_in_MB -def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict): +def create_vm( + module, + client, + template_id, + attributes_dict, + labels_list, + disk_size, + network_attrs_list, + vm_start_on_hold, + vm_persistent, + updateconf_dict, +): if attributes_dict: - vm_name = attributes_dict.get('NAME', '') + vm_name = attributes_dict.get("NAME", "") template = client.template.info(template_id).TEMPLATE - disk_count = len(flatten(template.get('DISK', []))) + disk_count = len(flatten(template.get("DISK", []))) if disk_size: size_count = len(flatten(disk_size)) # check if the number of disks is correct @@ -1015,28 +1059,37 @@ def create_vm(module, client, template_id, attributes_dict, labels_list, disk_si module.fail_json(msg=f"This template has {disk_count} disks but you defined {size_count}") vm_extra_template = dict_merge(template or {}, attributes_dict or {}) - vm_extra_template = dict_merge(vm_extra_template, { - 'LABELS': ','.join(labels_list), - 'NIC': flatten(network_attrs_list, extract=True), - 'DISK': flatten([ - disk if not size else dict_merge(disk, { - 'SIZE': str(int(get_size_in_MB(module, size))), - }) - for disk, size in zip( - flatten(template.get('DISK', [])), - flatten(disk_size or [None] * disk_count), - ) - if disk is not None - ], extract=True) - }) + vm_extra_template = dict_merge( + vm_extra_template, + { + "LABELS": ",".join(labels_list), + "NIC": flatten(network_attrs_list, extract=True), + "DISK": flatten( + [ + disk + if not size + else dict_merge( + disk, + { + "SIZE": str(int(get_size_in_MB(module, size))), + }, + ) + for disk, size in zip( + flatten(template.get("DISK", [])), + flatten(disk_size or [None] * disk_count), + ) + if disk is not None + ], + extract=True, + ), + }, + ) vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {}) try: - vm_id = client.template.instantiate(template_id, - vm_name, - vm_start_on_hold, - render(vm_extra_template), - vm_persistent) + vm_id = client.template.instantiate( + template_id, vm_name, vm_start_on_hold, render(vm_extra_template), vm_persistent + ) except pyone.OneException as e: module.fail_json(msg=str(e)) @@ -1062,11 +1115,11 @@ def get_vm_labels_and_attributes_dict(client, vm_id): labels_list = [] for key, value in vm_USER_TEMPLATE.items(): - if key != 'LABELS': + if key != "LABELS": attrs_dict[key] = value else: if key is not None and value is not None: - labels_list = value.split(',') + labels_list = value.split(",") return labels_list, attrs_dict @@ -1074,18 +1127,18 @@ def get_vm_labels_and_attributes_dict(client, vm_id): def get_all_vms_by_attributes(client, attributes_dict, labels_list): pool = client.vmpool.info(-2, -1, -1, -1).VM vm_list = [] - name = '' + name = "" if attributes_dict: - name = attributes_dict.pop('NAME', '') + name = attributes_dict.pop("NAME", "") - if name != '': - base_name = name[:len(name) - name.count('#')] + if name != "": + base_name = name[: len(name) - name.count("#")] # Check does the name have indexed format - with_hash = name.endswith('#') + with_hash = name.endswith("#") for vm in pool: if vm.NAME.startswith(base_name): - if with_hash and vm.NAME[len(base_name):].isdigit(): + if with_hash and vm.NAME[len(base_name) :].isdigit(): # If the name has indexed format and after base_name it has only digits it'll be matched vm_list.append(vm) elif not with_hash and vm.NAME == name: @@ -1123,28 +1176,39 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list): return vm_list -def create_count_of_vms(module, client, - template_id, count, - attributes_dict, labels_list, disk_size, network_attrs_list, - wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): +def create_count_of_vms( + module, + client, + template_id, + count, + attributes_dict, + labels_list, + disk_size, + network_attrs_list, + wait, + wait_timeout, + vm_start_on_hold, + vm_persistent, + updateconf_dict, +): new_vms_list = [] - vm_name = '' + vm_name = "" if attributes_dict: - vm_name = attributes_dict.get('NAME', '') + vm_name = attributes_dict.get("NAME", "") if module.check_mode: return True, [], [] # Create list of used indexes vm_filled_indexes_list = None - num_sign_cnt = vm_name.count('#') - if vm_name != '' and num_sign_cnt > 0: - vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) - base_name = vm_name[:len(vm_name) - num_sign_cnt] + num_sign_cnt = vm_name.count("#") + if vm_name != "" and num_sign_cnt > 0: + vm_list = get_all_vms_by_attributes(client, {"NAME": vm_name}, None) + base_name = vm_name[: len(vm_name) - num_sign_cnt] vm_name = base_name # Make list which contains used indexes in format ['000', '001',...] - vm_filled_indexes_list = [vm.NAME[len(base_name):].zfill(num_sign_cnt) for vm in vm_list] + vm_filled_indexes_list = [vm.NAME[len(base_name) :].zfill(num_sign_cnt) for vm in vm_list] while count > 0: new_vm_name = vm_name @@ -1154,11 +1218,20 @@ def create_count_of_vms(module, client, vm_filled_indexes_list.append(next_index) new_vm_name += next_index # Update NAME value in the attributes in case there is index - attributes_dict['NAME'] = new_vm_name - new_vm_dict = create_vm(module, client, - template_id, attributes_dict, labels_list, disk_size, network_attrs_list, - vm_start_on_hold, vm_persistent, updateconf_dict) - new_vm_id = new_vm_dict.get('vm_id') + attributes_dict["NAME"] = new_vm_name + new_vm_dict = create_vm( + module, + client, + template_id, + attributes_dict, + labels_list, + disk_size, + network_attrs_list, + vm_start_on_hold, + vm_persistent, + updateconf_dict, + ) + new_vm_id = new_vm_dict.get("vm_id") new_vm = get_vm_by_id(client, new_vm_id) new_vms_list.append(new_vm) count -= 1 @@ -1175,10 +1248,24 @@ def create_count_of_vms(module, client, return True, new_vms_list, [] -def create_exact_count_of_vms(module, client, - template_id, exact_count, attributes_dict, count_attributes_dict, - labels_list, count_labels_list, disk_size, network_attrs_list, - hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): +def create_exact_count_of_vms( + module, + client, + template_id, + exact_count, + attributes_dict, + count_attributes_dict, + labels_list, + count_labels_list, + disk_size, + network_attrs_list, + hard, + wait, + wait_timeout, + vm_start_on_hold, + vm_persistent, + updateconf_dict, +): vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) vm_count_diff = exact_count - len(vm_list) @@ -1193,9 +1280,21 @@ def create_exact_count_of_vms(module, client, if vm_count_diff > 0: # Add more VMs - changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, - labels_list, disk_size, network_attrs_list, wait, wait_timeout, - vm_start_on_hold, vm_persistent, updateconf_dict) + changed, instances_list, tagged_instances = create_count_of_vms( + module, + client, + template_id, + vm_count_diff, + attributes_dict, + labels_list, + disk_size, + network_attrs_list, + wait, + wait_timeout, + vm_start_on_hold, + vm_persistent, + updateconf_dict, + ) tagged_instances_list += instances_list elif vm_count_diff < 0: @@ -1220,16 +1319,57 @@ def create_exact_count_of_vms(module, client, return changed, instances_list, tagged_instances_list -VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] -LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', - 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', - 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', - 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', - 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] +VM_STATES = [ + "INIT", + "PENDING", + "HOLD", + "ACTIVE", + "STOPPED", + "SUSPENDED", + "DONE", + "", + "POWEROFF", + "UNDEPLOYED", + "CLONING", + "CLONING_FAILURE", +] +LCM_STATES = [ + "LCM_INIT", + "PROLOG", + "BOOT", + "RUNNING", + "MIGRATE", + "SAVE_STOP", + "SAVE_SUSPEND", + "SAVE_MIGRATE", + "PROLOG_MIGRATE", + "PROLOG_RESUME", + "EPILOG_STOP", + "EPILOG", + "SHUTDOWN", + "STATE13", + "STATE14", + "CLEANUP_RESUBMIT", + "UNKNOWN", + "HOTPLUG", + "SHUTDOWN_POWEROFF", + "BOOT_UNKNOWN", + "BOOT_POWEROFF", + "BOOT_SUSPENDED", + "BOOT_STOPPED", + "CLEANUP_DELETE", + "HOTPLUG_SNAPSHOT", + "HOTPLUG_NIC", + "HOTPLUG_SAVEAS", + "HOTPLUG_SAVEAS_POWEROFF", + "HOTPULG_SAVEAS_SUSPENDED", + "SHUTDOWN_UNDEPLOY", +] def wait_for_state(module, client, vm, wait_timeout, state_predicate): import time + start_time = time.time() while (time.time() - start_time) < wait_timeout: @@ -1239,8 +1379,14 @@ def wait_for_state(module, client, vm, wait_timeout, state_predicate): if state_predicate(state, lcm_state): return vm - elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), - VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: + elif state not in [ + VM_STATES.index("INIT"), + VM_STATES.index("PENDING"), + VM_STATES.index("HOLD"), + VM_STATES.index("ACTIVE"), + VM_STATES.index("CLONING"), + VM_STATES.index("POWEROFF"), + ]: module.fail_json(msg=f"Action is unsuccessful. VM state: {VM_STATES[state]}") time.sleep(1) @@ -1249,20 +1395,31 @@ def wait_for_state(module, client, vm, wait_timeout, state_predicate): def wait_for_running(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, - lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) + return wait_for_state( + module, + client, + vm, + wait_timeout, + lambda state, lcm_state: (state in [VM_STATES.index("ACTIVE")] and lcm_state in [LCM_STATES.index("RUNNING")]), + ) def wait_for_done(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) + return wait_for_state( + module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index("DONE")]) + ) def wait_for_hold(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) + return wait_for_state( + module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index("HOLD")]) + ) def wait_for_poweroff(module, client, vm, wait_timeout): - return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) + return wait_for_state( + module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index("POWEROFF")]) + ) def terminate_vm(module, client, vm, hard=False): @@ -1275,9 +1432,9 @@ def terminate_vm(module, client, vm, hard=False): if not module.check_mode: if hard: - client.vm.action('terminate-hard', vm.ID) + client.vm.action("terminate-hard", vm.ID) else: - client.vm.action('terminate', vm.ID) + client.vm.action("terminate", vm.ID) return changed @@ -1298,14 +1455,16 @@ def poweroff_vm(module, client, vm, hard): lcm_state = vm.LCM_STATE state = vm.STATE - if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + if lcm_state not in [LCM_STATES.index("SHUTDOWN"), LCM_STATES.index("SHUTDOWN_POWEROFF")] and state not in [ + VM_STATES.index("POWEROFF") + ]: changed = True if changed and not module.check_mode: if not hard: - client.vm.action('poweroff', vm.ID) + client.vm.action("poweroff", vm.ID) else: - client.vm.action('poweroff-hard', vm.ID) + client.vm.action("poweroff-hard", vm.ID) return changed @@ -1320,14 +1479,13 @@ def poweroff_vms(module, client, vms, hard): def reboot_vms(module, client, vms, wait_timeout, hard): - if not module.check_mode: # Firstly, power-off all instances for vm in vms: vm = client.vm.info(vm.ID) lcm_state = vm.LCM_STATE state = vm.STATE - if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + if lcm_state not in [LCM_STATES.index("SHUTDOWN_POWEROFF")] and state not in [VM_STATES.index("POWEROFF")]: poweroff_vm(module, client, vm, hard) # Wait for all to be power-off @@ -1345,19 +1503,21 @@ def resume_vm(module, client, vm): changed = False state = vm.STATE - if state in [VM_STATES.index('HOLD')]: + if state in [VM_STATES.index("HOLD")]: changed = release_vm(module, client, vm) return changed lcm_state = vm.LCM_STATE - if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): - module.fail_json(msg="Cannot perform action 'resume' because this action is not available " - "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") - if lcm_state not in [LCM_STATES.index('RUNNING')]: + if lcm_state == LCM_STATES.index("SHUTDOWN_POWEROFF"): + module.fail_json( + msg="Cannot perform action 'resume' because this action is not available " + "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly" + ) + if lcm_state not in [LCM_STATES.index("RUNNING")]: changed = True if changed and not module.check_mode: - client.vm.action('resume', vm.ID) + client.vm.action("resume", vm.ID) return changed @@ -1376,14 +1536,16 @@ def release_vm(module, client, vm): changed = False state = vm.STATE - if state != VM_STATES.index('HOLD'): - module.fail_json(msg="Cannot perform action 'release' because this action is not available " - "because VM is not in state 'HOLD'.") + if state != VM_STATES.index("HOLD"): + module.fail_json( + msg="Cannot perform action 'release' because this action is not available " + "because VM is not in state 'HOLD'." + ) else: changed = True if changed and not module.check_mode: - client.vm.action('release', vm.ID) + client.vm.action("release", vm.ID) return changed @@ -1391,14 +1553,35 @@ def release_vm(module, client, vm): def check_name_attribute(module, attributes): if attributes.get("NAME"): import re - if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: - module.fail_json(msg=f"Illegal 'NAME' attribute: '{attributes.get('NAME')}" - "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") + + if re.match(r"^[^#]+#*$", attributes.get("NAME")) is None: + module.fail_json( + msg=f"Illegal 'NAME' attribute: '{attributes.get('NAME')}" + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'." + ) -TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", - "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", - "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] +TEMPLATE_RESTRICTED_ATTRIBUTES = [ + "CPU", + "VCPU", + "OS", + "FEATURES", + "MEMORY", + "DISK", + "NIC", + "INPUT", + "GRAPHICS", + "CONTEXT", + "CREATED_BY", + "CPU_COST", + "DISK_COST", + "MEMORY_COST", + "TEMPLATE_ID", + "VMID", + "AUTOMATIC_DS_REQUIREMENTS", + "DEPLOY_FOLDER", + "LABELS", +] def check_attributes(module, attributes): @@ -1410,40 +1593,39 @@ def check_attributes(module, attributes): def disk_save_as(module, client, vm, disk_saveas, wait_timeout): - if not disk_saveas.get('name'): + if not disk_saveas.get("name"): module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") - image_name = disk_saveas.get('name') - disk_id = disk_saveas.get('disk_id', 0) + image_name = disk_saveas.get("name") + disk_id = disk_saveas.get("disk_id", 0) if not module.check_mode: - if vm.STATE != VM_STATES.index('POWEROFF'): + if vm.STATE != VM_STATES.index("POWEROFF"): module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") try: - client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) + client.vm.disksaveas(vm.ID, disk_id, image_name, "OS", -1) except pyone.OneException as e: module.fail_json(msg=str(e)) wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') + url = module.params.get("api_url") + username = module.params.get("api_username") + password = module.params.get("api_password") if not url: - url = os.environ.get('ONE_URL') + url = os.environ.get("ONE_URL") if not username: - username = os.environ.get('ONE_USERNAME') + username = os.environ.get("ONE_USERNAME") if not password: - password = os.environ.get('ONE_PASSWORD') + password = os.environ.get("ONE_PASSWORD") if not username: if not password: - authfile = os.environ.get('ONE_AUTH') + authfile = os.environ.get("ONE_AUTH") if authfile is None: authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") try: @@ -1459,7 +1641,7 @@ def get_connection_info(module): module.fail_json(msg="Opennebula API url (api_url) is not specified") from collections import namedtuple - auth_params = namedtuple('auth', ('url', 'username', 'password')) + auth_params = namedtuple("auth", ("url", "username", "password")) return auth_params(url=url, username=username, password=password) @@ -1469,14 +1651,14 @@ def main(): "api_url": {"required": False, "type": "str"}, "api_username": {"required": False, "type": "str"}, "api_password": {"required": False, "type": "str", "no_log": True}, - "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, + "instance_ids": {"required": False, "aliases": ["ids"], "type": "list", "elements": "int"}, "template_name": {"required": False, "type": "str"}, "template_id": {"required": False, "type": "int"}, "vm_start_on_hold": {"default": False, "type": "bool"}, "state": { "default": "present", - "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], - "type": "str" + "choices": ["present", "absent", "rebooted", "poweredoff", "running"], + "type": "str", }, "mode": {"required": False, "type": "str"}, "owner_id": {"required": False, "type": "int"}, @@ -1502,58 +1684,62 @@ def main(): "updateconf": {"type": "dict"}, } - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[ - ['template_id', 'template_name', 'instance_ids'], - ['template_id', 'template_name', 'disk_saveas'], - ['instance_ids', 'count_attributes', 'count'], - ['instance_ids', 'count_labels', 'count'], - ['instance_ids', 'exact_count'], - ['instance_ids', 'attributes'], - ['instance_ids', 'labels'], - ['disk_saveas', 'attributes'], - ['disk_saveas', 'labels'], - ['exact_count', 'count'], - ['count', 'hard'], - ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], - ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], - ['instance_ids', 'networks'], - ['persistent', 'disk_size'] - ], - supports_check_mode=True) + module = AnsibleModule( + argument_spec=fields, + mutually_exclusive=[ + ["template_id", "template_name", "instance_ids"], + ["template_id", "template_name", "disk_saveas"], + ["instance_ids", "count_attributes", "count"], + ["instance_ids", "count_labels", "count"], + ["instance_ids", "exact_count"], + ["instance_ids", "attributes"], + ["instance_ids", "labels"], + ["disk_saveas", "attributes"], + ["disk_saveas", "labels"], + ["exact_count", "count"], + ["count", "hard"], + ["instance_ids", "cpu"], + ["instance_ids", "vcpu"], + ["instance_ids", "memory"], + ["instance_ids", "disk_size"], + ["instance_ids", "networks"], + ["persistent", "disk_size"], + ], + supports_check_mode=True, + ) if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') + module.fail_json(msg="This module requires pyone to work!") auth = get_connection_info(module) params = module.params - instance_ids = params.get('instance_ids') - requested_template_name = params.get('template_name') - requested_template_id = params.get('template_id') - put_vm_on_hold = params.get('vm_start_on_hold') - state = params.get('state') - permissions = params.get('mode') - owner_id = params.get('owner_id') - group_id = params.get('group_id') - wait = params.get('wait') - wait_timeout = params.get('wait_timeout') - hard = params.get('hard') - memory = params.get('memory') - cpu = params.get('cpu') - vcpu = params.get('vcpu') - disk_size = params.get('disk_size') - requested_datastore_id = params.get('datastore_id') - requested_datastore_name = params.get('datastore_name') - networks = params.get('networks') - count = params.get('count') - exact_count = params.get('exact_count') - attributes = params.get('attributes') - count_attributes = params.get('count_attributes') - labels = params.get('labels') - count_labels = params.get('count_labels') - disk_saveas = params.get('disk_saveas') - persistent = params.get('persistent') - updateconf = params.get('updateconf') + instance_ids = params.get("instance_ids") + requested_template_name = params.get("template_name") + requested_template_id = params.get("template_id") + put_vm_on_hold = params.get("vm_start_on_hold") + state = params.get("state") + permissions = params.get("mode") + owner_id = params.get("owner_id") + group_id = params.get("group_id") + wait = params.get("wait") + wait_timeout = params.get("wait_timeout") + hard = params.get("hard") + memory = params.get("memory") + cpu = params.get("cpu") + vcpu = params.get("vcpu") + disk_size = params.get("disk_size") + requested_datastore_id = params.get("datastore_id") + requested_datastore_name = params.get("datastore_name") + networks = params.get("networks") + count = params.get("count") + exact_count = params.get("exact_count") + attributes = params.get("attributes") + count_attributes = params.get("count_attributes") + labels = params.get("labels") + count_labels = params.get("count_labels") + disk_saveas = params.get("disk_saveas") + persistent = params.get("persistent") + updateconf = params.get("updateconf") if not (auth.username and auth.password): module.warn("Credentials missing") @@ -1568,7 +1754,10 @@ def main(): count_attributes = {key.upper(): value for key, value in count_attributes.items()} if not attributes: import copy - module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') + + module.warn( + "When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly." + ) attributes = copy.copy(count_attributes) check_attributes(module, count_attributes) @@ -1576,7 +1765,9 @@ def main(): check_updateconf(module, updateconf) if count_labels and not labels: - module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') + module.warn( + "When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly." + ) labels = count_labels # Fetch template @@ -1599,48 +1790,80 @@ def main(): elif requested_datastore_name: module.fail_json(msg=f"There is no datastore with name: {requested_datastore_name}") else: - attributes['SCHED_DS_REQUIREMENTS'] = f"ID={datastore_id}" + attributes["SCHED_DS_REQUIREMENTS"] = f"ID={datastore_id}" if exact_count and template_id is None: - module.fail_json(msg='Option `exact_count` needs template_id or template_name') + module.fail_json(msg="Option `exact_count` needs template_id or template_name") if exact_count is not None and not (count_attributes or count_labels): - module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') + module.fail_json( + msg="Either `count_attributes` or `count_labels` has to be specified with option `exact_count`." + ) if (count_attributes or count_labels) and exact_count is None: - module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') - if template_id is not None and state != 'present': + module.fail_json( + msg="Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used." + ) + if template_id is not None and state != "present": module.fail_json(msg="Only state 'present' is valid for the template") if memory: - attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) + attributes["MEMORY"] = str(int(get_size_in_MB(module, memory))) if cpu: - attributes['CPU'] = str(cpu) + attributes["CPU"] = str(cpu) if vcpu: - attributes['VCPU'] = str(vcpu) + attributes["VCPU"] = str(vcpu) - if exact_count is not None and state != 'present': - module.fail_json(msg='The `exact_count` option is valid only for the `present` state') + if exact_count is not None and state != "present": + module.fail_json(msg="The `exact_count` option is valid only for the `present` state") if exact_count is not None and exact_count < 0: - module.fail_json(msg='`exact_count` cannot be less than 0') + module.fail_json(msg="`exact_count` cannot be less than 0") if count <= 0: - module.fail_json(msg='`count` has to be greater than 0') + module.fail_json(msg="`count` has to be greater than 0") if permissions is not None: import re + if re.match("^[0-7]{3}$", permissions) is None: module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") if exact_count is not None: # Deploy an exact count of VMs - changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, - count_attributes, labels, count_labels, disk_size, - networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf) + changed, instances_list, tagged_instances_list = create_exact_count_of_vms( + module, + one_client, + template_id, + exact_count, + attributes, + count_attributes, + labels, + count_labels, + disk_size, + networks, + hard, + wait, + wait_timeout, + put_vm_on_hold, + persistent, + updateconf, + ) vms = tagged_instances_list - elif template_id is not None and state == 'present': + elif template_id is not None and state == "present": # Deploy count VMs - changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, - attributes, labels, disk_size, networks, wait, wait_timeout, - put_vm_on_hold, persistent, updateconf) + changed, instances_list, tagged_instances_list = create_count_of_vms( + module, + one_client, + template_id, + count, + attributes, + labels, + disk_size, + networks, + wait, + wait_timeout, + put_vm_on_hold, + persistent, + updateconf, + ) # instances_list - new instances # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` vms = instances_list @@ -1650,10 +1873,14 @@ def main(): module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") if memory or cpu or vcpu or disk_size or networks: - module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") + module.fail_json( + msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!" + ) - if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: - module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") + if hard and state not in ["rebooted", "poweredoff", "absent", "present"]: + module.fail_json( + msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'" + ) vms = [] tagged = False @@ -1665,22 +1892,22 @@ def main(): tagged = True vms = get_all_vms_by_attributes(one_client, attributes, labels) - if len(vms) == 0 and state != 'absent' and state != 'present': - module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') + if len(vms) == 0 and state != "absent" and state != "present": + module.fail_json(msg="There are no instances with specified `instance_ids`, `attributes` and/or `labels`") - if len(vms) == 0 and state == 'present' and not tagged: - module.fail_json(msg='There are no instances with specified `instance_ids`.') + if len(vms) == 0 and state == "present" and not tagged: + module.fail_json(msg="There are no instances with specified `instance_ids`.") - if tagged and state == 'absent': - module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') + if tagged and state == "absent": + module.fail_json(msg="Option `instance_ids` is required when state is `absent`.") - if state == 'absent': + if state == "absent": changed = terminate_vms(module, one_client, vms, hard) - elif state == 'rebooted': + elif state == "rebooted": changed = reboot_vms(module, one_client, vms, wait_timeout, hard) - elif state == 'poweredoff': + elif state == "poweredoff": changed = poweroff_vms(module, one_client, vms, hard) - elif state == 'running': + elif state == "running": changed = resume_vms(module, one_client, vms) instances_list = vms @@ -1695,12 +1922,12 @@ def main(): if template_id is None and updateconf is not None: changed = update_vms(module, one_client, vms, updateconf) or changed - if wait and not module.check_mode and state != 'present': + if wait and not module.check_mode and state != "present": wait_for = { - 'absent': wait_for_done, - 'rebooted': wait_for_running, - 'poweredoff': wait_for_poweroff, - 'running': wait_for_running + "absent": wait_for_done, + "rebooted": wait_for_running, + "poweredoff": wait_for_poweroff, + "running": wait_for_running, } for vm in vms: if vm is not None: @@ -1718,10 +1945,15 @@ def main(): # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) - result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} + result = { + "changed": changed, + "instances": instances, + "instances_ids": instances_ids, + "tagged_instances": tagged_instances, + } module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py index 53a394bc9df..db0a01b58b5 100644 --- a/plugins/modules/one_vnet.py +++ b/plugins/modules/one_vnet.py @@ -258,50 +258,47 @@ class NetworksModule(OpenNebulaModule): - def __init__(self): argument_spec = dict( - id=dict(type='int'), - name=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str'), + id=dict(type="int"), + name=dict(type="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), + template=dict(type="str"), ) - mutually_exclusive = [ - ['id', 'name'] - ] + mutually_exclusive = [["id", "name"]] - required_one_of = [('id', 'name')] + required_one_of = [("id", "name")] - required_if = [ - ['state', 'present', ['template']] - ] + required_if = [["state", "present", ["template"]]] - OpenNebulaModule.__init__(self, - argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_one_of=required_one_of, - required_if=required_if) + OpenNebulaModule.__init__( + self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if, + ) def run(self, one, module, result): params = module.params - id = params.get('id') - name = params.get('name') - desired_state = params.get('state') - template_data = params.get('template') + id = params.get("id") + name = params.get("name") + desired_state = params.get("state") + template_data = params.get("template") self.result = {} template = self.get_template_instance(id, name) needs_creation = False - if not template and desired_state != 'absent': + if not template and desired_state != "absent": if id: module.fail_json(msg=f"There is no template with id={id}") else: needs_creation = True - if desired_state == 'absent': + if desired_state == "absent": self.result = self.delete_template(template) else: if needs_creation: @@ -339,54 +336,56 @@ def get_networks_ar_pool(self, template): template_pool = template.AR_POOL.AR for ar in range(len(template_pool)): template_param = template_pool[ar] - ar_pool.append({ - # These params will always be present - 'ar_id': template_param.AR_ID, - 'mac': template_param.MAC, - 'size': template_param.SIZE, - 'type': template_param.TYPE, - # These are optional so firstly check for presence - # and if not present set value to Null - 'allocated': getattr(template_param, 'ALLOCATED', 'Null'), - 'ip': getattr(template_param, 'IP', 'Null'), - 'global_prefix': getattr(template_param, 'GLOBAL_PREFIX', 'Null'), - 'parent_network_ar_id': getattr(template_param, 'PARENT_NETWORK_AR_ID', 'Null'), - 'ula_prefix': getattr(template_param, 'ULA_PREFIX', 'Null'), - 'vn_mad': getattr(template_param, 'VN_MAD', 'Null'), - }) + ar_pool.append( + { + # These params will always be present + "ar_id": template_param.AR_ID, + "mac": template_param.MAC, + "size": template_param.SIZE, + "type": template_param.TYPE, + # These are optional so firstly check for presence + # and if not present set value to Null + "allocated": getattr(template_param, "ALLOCATED", "Null"), + "ip": getattr(template_param, "IP", "Null"), + "global_prefix": getattr(template_param, "GLOBAL_PREFIX", "Null"), + "parent_network_ar_id": getattr(template_param, "PARENT_NETWORK_AR_ID", "Null"), + "ula_prefix": getattr(template_param, "ULA_PREFIX", "Null"), + "vn_mad": getattr(template_param, "VN_MAD", "Null"), + } + ) return ar_pool def get_template_info(self, template): info = { - 'id': template.ID, - 'name': template.NAME, - 'template': template.TEMPLATE, - 'user_name': template.UNAME, - 'user_id': template.UID, - 'group_name': template.GNAME, - 'group_id': template.GID, - 'permissions': { - 'owner_u': template.PERMISSIONS.OWNER_U, - 'owner_m': template.PERMISSIONS.OWNER_M, - 'owner_a': template.PERMISSIONS.OWNER_A, - 'group_u': template.PERMISSIONS.GROUP_U, - 'group_m': template.PERMISSIONS.GROUP_M, - 'group_a': template.PERMISSIONS.GROUP_A, - 'other_u': template.PERMISSIONS.OTHER_U, - 'other_m': template.PERMISSIONS.OTHER_M, - 'other_a': template.PERMISSIONS.OTHER_A + "id": template.ID, + "name": template.NAME, + "template": template.TEMPLATE, + "user_name": template.UNAME, + "user_id": template.UID, + "group_name": template.GNAME, + "group_id": template.GID, + "permissions": { + "owner_u": template.PERMISSIONS.OWNER_U, + "owner_m": template.PERMISSIONS.OWNER_M, + "owner_a": template.PERMISSIONS.OWNER_A, + "group_u": template.PERMISSIONS.GROUP_U, + "group_m": template.PERMISSIONS.GROUP_M, + "group_a": template.PERMISSIONS.GROUP_A, + "other_u": template.PERMISSIONS.OTHER_U, + "other_m": template.PERMISSIONS.OTHER_M, + "other_a": template.PERMISSIONS.OTHER_A, }, - 'clusters': template.CLUSTERS.ID, - 'bridge': template.BRIDGE, - 'bride_type': template.BRIDGE_TYPE, - 'parent_network_id': template.PARENT_NETWORK_ID, - 'vn_mad': template.VN_MAD, - 'phydev': template.PHYDEV, - 'vlan_id': template.VLAN_ID, - 'outer_vlan_id': template.OUTER_VLAN_ID, - 'used_leases': template.USED_LEASES, - 'vrouters': template.VROUTERS.ID, - 'ar_pool': self.get_networks_ar_pool(template) + "clusters": template.CLUSTERS.ID, + "bridge": template.BRIDGE, + "bride_type": template.BRIDGE_TYPE, + "parent_network_id": template.PARENT_NETWORK_ID, + "vn_mad": template.VN_MAD, + "phydev": template.PHYDEV, + "vlan_id": template.VLAN_ID, + "outer_vlan_id": template.OUTER_VLAN_ID, + "used_leases": template.USED_LEASES, + "vrouters": template.VROUTERS.ID, + "ar_pool": self.get_networks_ar_pool(template), } return info @@ -397,7 +396,7 @@ def create_template(self, name, template_data): self.one.vn.allocate(f'NAME = "{name}"\n{template_data}', -1) result = self.get_template_info(self.get_template_by_name(name)) - result['changed'] = True + result["changed"] = True return result @@ -409,26 +408,26 @@ def update_template(self, template, template_data): result = self.get_template_info(self.get_template_by_id(template.ID)) if self.module.check_mode: # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. - result['changed'] = True + result["changed"] = True else: # if the previous parsed template data is not equal to the updated one, this has changed - result['changed'] = template.TEMPLATE != result['template'] + result["changed"] = template.TEMPLATE != result["template"] return result def delete_template(self, template): if not template: - return {'changed': False} + return {"changed": False} if not self.module.check_mode: self.one.vn.delete(template.ID) - return {'changed': True} + return {"changed": True} def main(): NetworksModule().run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py index 8eff568b7ec..eeee60e7a99 100644 --- a/plugins/modules/oneandone_firewall_policy.py +++ b/plugins/modules/oneandone_firewall_policy.py @@ -206,7 +206,7 @@ get_firewall_policy, get_server, OneAndOneResources, - wait_for_resource_creation_completion + wait_for_resource_creation_completion, ) HAS_ONEANDONE_SDK = True @@ -219,9 +219,7 @@ def _check_mode(module, result): if module.check_mode: - module.exit_json( - changed=result - ) + module.exit_json(changed=result) def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): @@ -234,8 +232,7 @@ def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): for _server_id in server_ids: server = get_server(oneandone_conn, _server_id, True) attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] + server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"] ) attach_servers.append(attach_server) @@ -245,8 +242,8 @@ def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): return False firewall_policy = oneandone_conn.attach_server_firewall_policy( - firewall_id=firewall_id, - server_ips=attach_servers) + firewall_id=firewall_id, server_ips=attach_servers + ) return firewall_policy except Exception as e: module.fail_json(msg=str(e)) @@ -258,16 +255,12 @@ def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): """ try: if module.check_mode: - firewall_server = oneandone_conn.get_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) + firewall_server = oneandone_conn.get_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id) if firewall_server: return True return False - firewall_policy = oneandone_conn.remove_firewall_server( - firewall_id=firewall_id, - server_ip_id=server_ip_id) + firewall_policy = oneandone_conn.remove_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id) return firewall_policy except Exception as e: module.fail_json(msg=str(e)) @@ -282,10 +275,8 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): for rule in rules: firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) + protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"] + ) firewall_rules.append(firewall_rule) if module.check_mode: @@ -295,8 +286,7 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): return False firewall_policy = oneandone_conn.add_firewall_policy_rule( - firewall_id=firewall_id, - firewall_policy_rules=firewall_rules + firewall_id=firewall_id, firewall_policy_rules=firewall_rules ) return firewall_policy except Exception as e: @@ -309,17 +299,12 @@ def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): """ try: if module.check_mode: - rule = oneandone_conn.get_firewall_policy_rule( - firewall_id=firewall_id, - rule_id=rule_id) + rule = oneandone_conn.get_firewall_policy_rule(firewall_id=firewall_id, rule_id=rule_id) if rule: return True return False - firewall_policy = oneandone_conn.remove_firewall_rule( - firewall_id=firewall_id, - rule_id=rule_id - ) + firewall_policy = oneandone_conn.remove_firewall_rule(firewall_id=firewall_id, rule_id=rule_id) return firewall_policy except Exception as e: module.fail_json(msg=str(e)) @@ -336,13 +321,13 @@ def update_firewall_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - firewall_policy_id = module.params.get('firewall_policy') - name = module.params.get('name') - description = module.params.get('description') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') + firewall_policy_id = module.params.get("firewall_policy") + name = module.params.get("name") + description = module.params.get("description") + add_server_ips = module.params.get("add_server_ips") + remove_server_ips = module.params.get("remove_server_ips") + add_rules = module.params.get("add_rules") + remove_rules = module.params.get("remove_rules") changed = False @@ -353,43 +338,30 @@ def update_firewall_policy(module, oneandone_conn): if name or description: _check_mode(module, True) firewall_policy = oneandone_conn.modify_firewall( - firewall_id=firewall_policy['id'], - name=name, - description=description) + firewall_id=firewall_policy["id"], name=name, description=description + ) changed = True if add_server_ips: if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - firewall_policy['id'], - add_server_ips)) + _check_mode(module, _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips)) - firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) + firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips) changed = True if remove_server_ips: chk_changed = False for server_ip_id in remove_server_ips: if module.check_mode: - chk_changed |= _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) - - _remove_firewall_server(module, - oneandone_conn, - firewall_policy['id'], - server_ip_id) + chk_changed |= _remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id) + + _remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id) _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) changed = True if add_rules: - firewall_policy = _add_firewall_rules(module, - oneandone_conn, - firewall_policy['id'], - add_rules) + firewall_policy = _add_firewall_rules(module, oneandone_conn, firewall_policy["id"], add_rules) _check_mode(module, firewall_policy) changed = True @@ -397,17 +369,11 @@ def update_firewall_policy(module, oneandone_conn): chk_changed = False for rule_id in remove_rules: if module.check_mode: - chk_changed |= _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) - - _remove_firewall_rule(module, - oneandone_conn, - firewall_policy['id'], - rule_id) + chk_changed |= _remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id) + + _remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id) _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) changed = True return (changed, firewall_policy) @@ -423,43 +389,34 @@ def create_firewall_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - name = module.params.get('name') - description = module.params.get('description') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + name = module.params.get("name") + description = module.params.get("description") + rules = module.params.get("rules") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") firewall_rules = [] for rule in rules: firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule['protocol'], - port_from=rule['port_from'], - port_to=rule['port_to'], - source=rule['source']) + protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"] + ) firewall_rules.append(firewall_rule) - firewall_policy_obj = oneandone.client.FirewallPolicy( - name=name, - description=description - ) + firewall_policy_obj = oneandone.client.FirewallPolicy(name=name, description=description) _check_mode(module, True) firewall_policy = oneandone_conn.create_firewall_policy( - firewall_policy=firewall_policy_obj, - firewall_policy_rules=firewall_rules + firewall_policy=firewall_policy_obj, firewall_policy_rules=firewall_rules ) if wait: wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.firewall_policy, - firewall_policy['id'], - wait_timeout, - wait_interval) + oneandone_conn, OneAndOneResources.firewall_policy, firewall_policy["id"], wait_timeout, wait_interval + ) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) # refresh changed = True if firewall_policy else False _check_mode(module, False) @@ -477,7 +434,7 @@ def remove_firewall_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - fp_id = module.params.get('name') + fp_id = module.params.get("name") firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) if module.check_mode: if firewall_policy_id is None: @@ -487,10 +444,7 @@ def remove_firewall_policy(module, oneandone_conn): changed = True if firewall_policy else False - return (changed, { - 'id': firewall_policy['id'], - 'name': firewall_policy['name'] - }) + return (changed, {"id": firewall_policy["id"], "name": firewall_policy["name"]}) except Exception as e: module.fail_json(msg=str(e)) @@ -498,67 +452,59 @@ def remove_firewall_policy(module, oneandone_conn): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - firewall_policy=dict(type='str'), - description=dict(type='str'), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + name=dict(type="str"), + firewall_policy=dict(type="str"), + description=dict(type="str"), + rules=dict(type="list", elements="dict", default=[]), + add_server_ips=dict(type="list", elements="str", default=[]), + remove_server_ips=dict(type="list", elements="str", default=[]), + add_rules=dict(type="list", elements="dict", default=[]), + remove_rules=dict(type="list", elements="str", default=[]), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "update"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') + if not module.params.get("auth_token"): + module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a firewall policy.") + if state == "absent": + if not module.params.get("name"): + module.fail_json(msg="'name' parameter is required to delete a firewall policy.") try: (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('firewall_policy'): - module.fail_json( - msg="'firewall_policy' parameter is required to update a firewall policy.") + elif state == "update": + if not module.params.get("firewall_policy"): + module.fail_json(msg="'firewall_policy' parameter is required to update a firewall policy.") try: (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'present': - for param in ('name', 'rules'): + elif state == "present": + for param in ("name", "rules"): if not module.params.get(param): - module.fail_json( - msg=f"{param} parameter is required for new firewall policies.") + module.fail_json(msg=f"{param} parameter is required for new firewall policies.") try: (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) except Exception as e: @@ -567,5 +513,5 @@ def main(): module.exit_json(changed=changed, firewall_policy=firewall_policy) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py index 8bbe8646dd1..48f94686940 100644 --- a/plugins/modules/oneandone_load_balancer.py +++ b/plugins/modules/oneandone_load_balancer.py @@ -258,7 +258,7 @@ get_server, get_datacenter, OneAndOneResources, - wait_for_resource_creation_completion + wait_for_resource_creation_completion, ) HAS_ONEANDONE_SDK = True @@ -268,16 +268,14 @@ except ImportError: HAS_ONEANDONE_SDK = False -DATACENTERS = ['US', 'ES', 'DE', 'GB'] -HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] -METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] +DATACENTERS = ["US", "ES", "DE", "GB"] +HEALTH_CHECK_TESTS = ["NONE", "TCP", "HTTP", "ICMP"] +METHODS = ["ROUND_ROBIN", "LEAST_CONNECTIONS"] def _check_mode(module, result): if module.check_mode: - module.exit_json( - changed=result - ) + module.exit_json(changed=result) def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): @@ -290,8 +288,7 @@ def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): for server_id in server_ids: server = get_server(oneandone_conn, server_id, True) attach_server = oneandone.client.AttachServer( - server_id=server['id'], - server_ip_id=next(iter(server['ips'] or []), None)['id'] + server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"] ) attach_servers.append(attach_server) @@ -301,8 +298,8 @@ def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): return False load_balancer = oneandone_conn.attach_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ips=attach_servers) + load_balancer_id=load_balancer_id, server_ips=attach_servers + ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) @@ -315,15 +312,15 @@ def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, serve try: if module.check_mode: lb_server = oneandone_conn.get_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) + load_balancer_id=load_balancer_id, server_ip_id=server_ip_id + ) if lb_server: return True return False load_balancer = oneandone_conn.remove_load_balancer_server( - load_balancer_id=load_balancer_id, - server_ip_id=server_ip_id) + load_balancer_id=load_balancer_id, server_ip_id=server_ip_id + ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) @@ -338,10 +335,11 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) + protocol=rule["protocol"], + port_balancer=rule["port_balancer"], + port_server=rule["port_server"], + source=rule["source"], + ) load_balancer_rules.append(load_balancer_rule) if module.check_mode: @@ -351,8 +349,7 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): return False load_balancer = oneandone_conn.add_load_balancer_rule( - load_balancer_id=load_balancer_id, - load_balancer_rules=load_balancer_rules + load_balancer_id=load_balancer_id, load_balancer_rules=load_balancer_rules ) return load_balancer @@ -366,17 +363,12 @@ def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id """ try: if module.check_mode: - rule = oneandone_conn.get_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id) + rule = oneandone_conn.get_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id) if rule: return True return False - load_balancer = oneandone_conn.remove_load_balancer_rule( - load_balancer_id=load_balancer_id, - rule_id=rule_id - ) + load_balancer = oneandone_conn.remove_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) @@ -393,20 +385,20 @@ def update_load_balancer(module, oneandone_conn): module : AnsibleModule object oneandone_conn: authenticated oneandone object """ - load_balancer_id = module.params.get('load_balancer') - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - add_server_ips = module.params.get('add_server_ips') - remove_server_ips = module.params.get('remove_server_ips') - add_rules = module.params.get('add_rules') - remove_rules = module.params.get('remove_rules') + load_balancer_id = module.params.get("load_balancer") + name = module.params.get("name") + description = module.params.get("description") + health_check_test = module.params.get("health_check_test") + health_check_interval = module.params.get("health_check_interval") + health_check_path = module.params.get("health_check_path") + health_check_parse = module.params.get("health_check_parse") + persistence = module.params.get("persistence") + persistence_time = module.params.get("persistence_time") + method = module.params.get("method") + add_server_ips = module.params.get("add_server_ips") + remove_server_ips = module.params.get("remove_server_ips") + add_rules = module.params.get("add_rules") + remove_rules = module.params.get("remove_rules") changed = False @@ -414,11 +406,20 @@ def update_load_balancer(module, oneandone_conn): if load_balancer is None: _check_mode(module, False) - if (name or description or health_check_test or health_check_interval or health_check_path or - health_check_parse or persistence or persistence_time or method): + if ( + name + or description + or health_check_test + or health_check_interval + or health_check_path + or health_check_parse + or persistence + or persistence_time + or method + ): _check_mode(module, True) load_balancer = oneandone_conn.modify_load_balancer( - load_balancer_id=load_balancer['id'], + load_balancer_id=load_balancer["id"], name=name, description=description, health_check_test=health_check_test, @@ -427,41 +428,30 @@ def update_load_balancer(module, oneandone_conn): health_check_parse=health_check_parse, persistence=persistence, persistence_time=persistence_time, - method=method) + method=method, + ) changed = True if add_server_ips: if module.check_mode: - _check_mode(module, _add_server_ips(module, - oneandone_conn, - load_balancer['id'], - add_server_ips)) + _check_mode(module, _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips)) - load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) + load_balancer = _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips) changed = True if remove_server_ips: chk_changed = False for server_ip_id in remove_server_ips: if module.check_mode: - chk_changed |= _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) - - _remove_load_balancer_server(module, - oneandone_conn, - load_balancer['id'], - server_ip_id) + chk_changed |= _remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id) + + _remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id) _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) changed = True if add_rules: - load_balancer = _add_load_balancer_rules(module, - oneandone_conn, - load_balancer['id'], - add_rules) + load_balancer = _add_load_balancer_rules(module, oneandone_conn, load_balancer["id"], add_rules) _check_mode(module, load_balancer) changed = True @@ -469,17 +459,11 @@ def update_load_balancer(module, oneandone_conn): chk_changed = False for rule_id in remove_rules: if module.check_mode: - chk_changed |= _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) - - _remove_load_balancer_rule(module, - oneandone_conn, - load_balancer['id'], - rule_id) + chk_changed |= _remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id) + + _remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id) _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) changed = True try: @@ -496,20 +480,20 @@ def create_load_balancer(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - name = module.params.get('name') - description = module.params.get('description') - health_check_test = module.params.get('health_check_test') - health_check_interval = module.params.get('health_check_interval') - health_check_path = module.params.get('health_check_path') - health_check_parse = module.params.get('health_check_parse') - persistence = module.params.get('persistence') - persistence_time = module.params.get('persistence_time') - method = module.params.get('method') - datacenter = module.params.get('datacenter') - rules = module.params.get('rules') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + name = module.params.get("name") + description = module.params.get("description") + health_check_test = module.params.get("health_check_test") + health_check_interval = module.params.get("health_check_interval") + health_check_path = module.params.get("health_check_path") + health_check_parse = module.params.get("health_check_parse") + persistence = module.params.get("persistence") + persistence_time = module.params.get("persistence_time") + method = module.params.get("method") + datacenter = module.params.get("datacenter") + rules = module.params.get("rules") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") load_balancer_rules = [] @@ -517,15 +501,15 @@ def create_load_balancer(module, oneandone_conn): if datacenter is not None: datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: - module.fail_json( - msg=f'datacenter {datacenter} not found.') + module.fail_json(msg=f"datacenter {datacenter} not found.") for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule['protocol'], - port_balancer=rule['port_balancer'], - port_server=rule['port_server'], - source=rule['source']) + protocol=rule["protocol"], + port_balancer=rule["port_balancer"], + port_server=rule["port_server"], + source=rule["source"], + ) load_balancer_rules.append(load_balancer_rule) _check_mode(module, True) @@ -539,22 +523,19 @@ def create_load_balancer(module, oneandone_conn): persistence=persistence, persistence_time=persistence_time, method=method, - datacenter_id=datacenter_id + datacenter_id=datacenter_id, ) load_balancer = oneandone_conn.create_load_balancer( - load_balancer=load_balancer_obj, - load_balancer_rules=load_balancer_rules + load_balancer=load_balancer_obj, load_balancer_rules=load_balancer_rules ) if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.load_balancer, - load_balancer['id'], - wait_timeout, - wait_interval) + wait_for_resource_creation_completion( + oneandone_conn, OneAndOneResources.load_balancer, load_balancer["id"], wait_timeout, wait_interval + ) - load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh + load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) # refresh changed = True if load_balancer else False _check_mode(module, False) @@ -572,7 +553,7 @@ def remove_load_balancer(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - lb_id = module.params.get('name') + lb_id = module.params.get("name") load_balancer_id = get_load_balancer(oneandone_conn, lb_id) if module.check_mode: if load_balancer_id is None: @@ -582,10 +563,7 @@ def remove_load_balancer(module, oneandone_conn): changed = True if load_balancer else False - return (changed, { - 'id': load_balancer['id'], - 'name': load_balancer['name'] - }) + return (changed, {"id": load_balancer["id"], "name": load_balancer["name"]}) except Exception as ex: module.fail_json(msg=str(ex)) @@ -593,78 +571,74 @@ def remove_load_balancer(module, oneandone_conn): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - load_balancer=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - health_check_test=dict( - choices=HEALTH_CHECK_TESTS), - health_check_interval=dict(type='str'), - health_check_path=dict(type='str'), - health_check_parse=dict(type='str'), - persistence=dict(type='bool'), - persistence_time=dict(type='str'), - method=dict( - choices=METHODS), - datacenter=dict( - choices=DATACENTERS), - rules=dict(type='list', elements="dict", default=[]), - add_server_ips=dict(type='list', elements="str", default=[]), - remove_server_ips=dict(type='list', elements="str", default=[]), - add_rules=dict(type='list', elements="dict", default=[]), - remove_rules=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + load_balancer=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + health_check_test=dict(choices=HEALTH_CHECK_TESTS), + health_check_interval=dict(type="str"), + health_check_path=dict(type="str"), + health_check_parse=dict(type="str"), + persistence=dict(type="bool"), + persistence_time=dict(type="str"), + method=dict(choices=METHODS), + datacenter=dict(choices=DATACENTERS), + rules=dict(type="list", elements="dict", default=[]), + add_server_ips=dict(type="list", elements="str", default=[]), + remove_server_ips=dict(type="list", elements="str", default=[]), + add_rules=dict(type="list", elements="dict", default=[]), + remove_rules=dict(type="list", elements="str", default=[]), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "update"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') + if not module.params.get("auth_token"): + module.fail_json(msg="auth_token parameter is required.") - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a load balancer.") + if state == "absent": + if not module.params.get("name"): + module.fail_json(msg="'name' parameter is required for deleting a load balancer.") try: (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('load_balancer'): - module.fail_json( - msg="'load_balancer' parameter is required for updating a load balancer.") + elif state == "update": + if not module.params.get("load_balancer"): + module.fail_json(msg="'load_balancer' parameter is required for updating a load balancer.") try: (changed, load_balancer) = update_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state == 'present': - for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', - 'persistence_time', 'method', 'rules'): + elif state == "present": + for param in ( + "name", + "health_check_test", + "health_check_interval", + "persistence", + "persistence_time", + "method", + "rules", + ): if not module.params.get(param): - module.fail_json( - msg=f"{param} parameter is required for new load balancers.") + module.fail_json(msg=f"{param} parameter is required for new load balancers.") try: (changed, load_balancer) = create_load_balancer(module, oneandone_conn) except Exception as ex: @@ -673,5 +647,5 @@ def main(): module.exit_json(changed=changed, load_balancer=load_balancer) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py index fac044549b4..5a2622ace96 100644 --- a/plugins/modules/oneandone_monitoring_policy.py +++ b/plugins/modules/oneandone_monitoring_policy.py @@ -425,7 +425,7 @@ get_monitoring_policy, get_server, OneAndOneResources, - wait_for_resource_creation_completion + wait_for_resource_creation_completion, ) HAS_ONEANDONE_SDK = True @@ -438,9 +438,7 @@ def _check_mode(module, result): if module.check_mode: - module.exit_json( - changed=result - ) + module.exit_json(changed=result) def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): @@ -452,10 +450,10 @@ def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): for _port in ports: monitoring_policy_port = oneandone.client.Port( - protocol=_port['protocol'], - port=_port['port'], - alert_if=_port['alert_if'], - email_notification=_port['email_notification'] + protocol=_port["protocol"], + port=_port["port"], + alert_if=_port["alert_if"], + email_notification=_port["email_notification"], ) monitoring_policy_ports.append(monitoring_policy_port) @@ -465,8 +463,8 @@ def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): return False monitoring_policy = oneandone_conn.add_port( - monitoring_policy_id=monitoring_policy_id, - ports=monitoring_policy_ports) + monitoring_policy_id=monitoring_policy_id, ports=monitoring_policy_ports + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -479,15 +477,15 @@ def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, try: if module.check_mode: monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) + monitoring_policy_id=monitoring_policy_id, port_id=port_id + ) if monitoring_policy: return True return False monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) + monitoring_policy_id=monitoring_policy_id, port_id=port_id + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -500,23 +498,22 @@ def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): try: if module.check_mode: cm_port = oneandone_conn.get_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id) + monitoring_policy_id=monitoring_policy_id, port_id=port_id + ) if cm_port: return True return False monitoring_policy_port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=port['email_notification'] + protocol=port["protocol"], + port=port["port"], + alert_if=port["alert_if"], + email_notification=port["email_notification"], ) monitoring_policy = oneandone_conn.modify_port( - monitoring_policy_id=monitoring_policy_id, - port_id=port_id, - port=monitoring_policy_port) + monitoring_policy_id=monitoring_policy_id, port_id=port_id, port=monitoring_policy_port + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -531,9 +528,9 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): for _process in processes: monitoring_policy_process = oneandone.client.Process( - process=_process['process'], - alert_if=_process['alert_if'], - email_notification=_process['email_notification'] + process=_process["process"], + alert_if=_process["alert_if"], + email_notification=_process["email_notification"], ) monitoring_policy_processes.append(monitoring_policy_process) @@ -544,8 +541,8 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): return False monitoring_policy = oneandone_conn.add_process( - monitoring_policy_id=monitoring_policy_id, - processes=monitoring_policy_processes) + monitoring_policy_id=monitoring_policy_id, processes=monitoring_policy_processes + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -558,16 +555,15 @@ def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_ try: if module.check_mode: process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id + monitoring_policy_id=monitoring_policy_id, process_id=process_id ) if process: return True return False monitoring_policy = oneandone_conn.delete_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) + monitoring_policy_id=monitoring_policy_id, process_id=process_id + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -580,22 +576,19 @@ def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, pr try: if module.check_mode: cm_process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id) + monitoring_policy_id=monitoring_policy_id, process_id=process_id + ) if cm_process: return True return False monitoring_policy_process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=process['email_notification'] + process=process["process"], alert_if=process["alert_if"], email_notification=process["email_notification"] ) monitoring_policy = oneandone_conn.modify_process( - monitoring_policy_id=monitoring_policy_id, - process_id=process_id, - process=monitoring_policy_process) + monitoring_policy_id=monitoring_policy_id, process_id=process_id, process=monitoring_policy_process + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -610,9 +603,7 @@ def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_i for _server_id in servers: server_id = get_server(oneandone_conn, _server_id) - attach_server = oneandone.client.AttachServer( - server_id=server_id - ) + attach_server = oneandone.client.AttachServer(server_id=server_id) attach_servers.append(attach_server) if module.check_mode: @@ -621,8 +612,8 @@ def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_i return False monitoring_policy = oneandone_conn.attach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - servers=attach_servers) + monitoring_policy_id=monitoring_policy_id, servers=attach_servers + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -635,15 +626,15 @@ def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_i try: if module.check_mode: mp_server = oneandone_conn.get_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) + monitoring_policy_id=monitoring_policy_id, server_id=server_id + ) if mp_server: return True return False monitoring_policy = oneandone_conn.detach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, - server_id=server_id) + monitoring_policy_id=monitoring_policy_id, server_id=server_id + ) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex)) @@ -661,19 +652,19 @@ def update_monitoring_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - monitoring_policy_id = module.params.get('monitoring_policy') - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - thresholds = module.params.get('thresholds') - add_ports = module.params.get('add_ports') - update_ports = module.params.get('update_ports') - remove_ports = module.params.get('remove_ports') - add_processes = module.params.get('add_processes') - update_processes = module.params.get('update_processes') - remove_processes = module.params.get('remove_processes') - add_servers = module.params.get('add_servers') - remove_servers = module.params.get('remove_servers') + monitoring_policy_id = module.params.get("monitoring_policy") + name = module.params.get("name") + description = module.params.get("description") + email = module.params.get("email") + thresholds = module.params.get("thresholds") + add_ports = module.params.get("add_ports") + update_ports = module.params.get("update_ports") + remove_ports = module.params.get("remove_ports") + add_processes = module.params.get("add_processes") + update_processes = module.params.get("update_processes") + remove_processes = module.params.get("remove_processes") + add_servers = module.params.get("add_servers") + remove_servers = module.params.get("remove_servers") changed = False @@ -681,16 +672,12 @@ def update_monitoring_policy(module, oneandone_conn): if monitoring_policy is None: _check_mode(module, False) - _monitoring_policy = oneandone.client.MonitoringPolicy( - name=name, - description=description, - email=email - ) + _monitoring_policy = oneandone.client.MonitoringPolicy(name=name, description=description, email=email) _thresholds = None if thresholds: - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"] _thresholds = [] for threshold in thresholds: @@ -698,70 +685,56 @@ def update_monitoring_policy(module, oneandone_conn): if key in threshold_entities: _threshold = oneandone.client.Threshold( entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) + warning_value=threshold[key]["warning"]["value"], + warning_alert=str(threshold[key]["warning"]["alert"]).lower(), + critical_value=threshold[key]["critical"]["value"], + critical_alert=str(threshold[key]["critical"]["alert"]).lower(), + ) _thresholds.append(_threshold) if name or description or email or thresholds: _check_mode(module, True) monitoring_policy = oneandone_conn.modify_monitoring_policy( - monitoring_policy_id=monitoring_policy['id'], + monitoring_policy_id=monitoring_policy["id"], monitoring_policy=_monitoring_policy, - thresholds=_thresholds) + thresholds=_thresholds, + ) changed = True if add_ports: if module.check_mode: - _check_mode(module, _add_ports(module, - oneandone_conn, - monitoring_policy['id'], - add_ports)) + _check_mode(module, _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports)) - monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) + monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports) changed = True if update_ports: chk_changed = False for update_port in update_ports: if module.check_mode: - chk_changed |= _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - - _modify_port(module, - oneandone_conn, - monitoring_policy['id'], - update_port['id'], - update_port) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + chk_changed |= _modify_port( + module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port + ) + + _modify_port(module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) changed = True if remove_ports: chk_changed = False for port_id in remove_ports: if module.check_mode: - chk_changed |= _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) - - _delete_monitoring_policy_port(module, - oneandone_conn, - monitoring_policy['id'], - port_id) + chk_changed |= _delete_monitoring_policy_port( + module, oneandone_conn, monitoring_policy["id"], port_id + ) + + _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy["id"], port_id) _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) changed = True if add_processes: - monitoring_policy = _add_processes(module, - oneandone_conn, - monitoring_policy['id'], - add_processes) + monitoring_policy = _add_processes(module, oneandone_conn, monitoring_policy["id"], add_processes) _check_mode(module, monitoring_policy) changed = True @@ -769,43 +742,32 @@ def update_monitoring_policy(module, oneandone_conn): chk_changed = False for update_process in update_processes: if module.check_mode: - chk_changed |= _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) - - _modify_process(module, - oneandone_conn, - monitoring_policy['id'], - update_process['id'], - update_process) + chk_changed |= _modify_process( + module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process + ) + + _modify_process(module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process) _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) changed = True if remove_processes: chk_changed = False for process_id in remove_processes: if module.check_mode: - chk_changed |= _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) - - _delete_monitoring_policy_process(module, - oneandone_conn, - monitoring_policy['id'], - process_id) + chk_changed |= _delete_monitoring_policy_process( + module, oneandone_conn, monitoring_policy["id"], process_id + ) + + _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy["id"], process_id) _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) changed = True if add_servers: - monitoring_policy = _attach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - add_servers) + monitoring_policy = _attach_monitoring_policy_server( + module, oneandone_conn, monitoring_policy["id"], add_servers + ) _check_mode(module, monitoring_policy) changed = True @@ -815,17 +777,13 @@ def update_monitoring_policy(module, oneandone_conn): server_id = get_server(oneandone_conn, _server_id) if module.check_mode: - chk_changed |= _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) - - _detach_monitoring_policy_server(module, - oneandone_conn, - monitoring_policy['id'], - server_id) + chk_changed |= _detach_monitoring_policy_server( + module, oneandone_conn, monitoring_policy["id"], server_id + ) + + _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy["id"], server_id) _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) changed = True return (changed, monitoring_policy) @@ -841,25 +799,27 @@ def create_monitoring_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - name = module.params.get('name') - description = module.params.get('description') - email = module.params.get('email') - agent = module.params.get('agent') - thresholds = module.params.get('thresholds') - ports = module.params.get('ports') - processes = module.params.get('processes') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') - - _monitoring_policy = oneandone.client.MonitoringPolicy(name, - description, - email, - agent, ) - - _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() - - threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + name = module.params.get("name") + description = module.params.get("description") + email = module.params.get("email") + agent = module.params.get("agent") + thresholds = module.params.get("thresholds") + ports = module.params.get("ports") + processes = module.params.get("processes") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") + + _monitoring_policy = oneandone.client.MonitoringPolicy( + name, + description, + email, + agent, + ) + + _monitoring_policy.specs["agent"] = str(_monitoring_policy.specs["agent"]).lower() + + threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"] _thresholds = [] for threshold in thresholds: @@ -867,44 +827,45 @@ def create_monitoring_policy(module, oneandone_conn): if key in threshold_entities: _threshold = oneandone.client.Threshold( entity=key, - warning_value=threshold[key]['warning']['value'], - warning_alert=str(threshold[key]['warning']['alert']).lower(), - critical_value=threshold[key]['critical']['value'], - critical_alert=str(threshold[key]['critical']['alert']).lower()) + warning_value=threshold[key]["warning"]["value"], + warning_alert=str(threshold[key]["warning"]["alert"]).lower(), + critical_value=threshold[key]["critical"]["value"], + critical_alert=str(threshold[key]["critical"]["alert"]).lower(), + ) _thresholds.append(_threshold) _ports = [] for port in ports: _port = oneandone.client.Port( - protocol=port['protocol'], - port=port['port'], - alert_if=port['alert_if'], - email_notification=str(port['email_notification']).lower()) + protocol=port["protocol"], + port=port["port"], + alert_if=port["alert_if"], + email_notification=str(port["email_notification"]).lower(), + ) _ports.append(_port) _processes = [] for process in processes: _process = oneandone.client.Process( - process=process['process'], - alert_if=process['alert_if'], - email_notification=str(process['email_notification']).lower()) + process=process["process"], + alert_if=process["alert_if"], + email_notification=str(process["email_notification"]).lower(), + ) _processes.append(_process) _check_mode(module, True) monitoring_policy = oneandone_conn.create_monitoring_policy( - monitoring_policy=_monitoring_policy, - thresholds=_thresholds, - ports=_ports, - processes=_processes + monitoring_policy=_monitoring_policy, thresholds=_thresholds, ports=_ports, processes=_processes ) if wait: wait_for_resource_creation_completion( oneandone_conn, OneAndOneResources.monitoring_policy, - monitoring_policy['id'], + monitoring_policy["id"], wait_timeout, - wait_interval) + wait_interval, + ) changed = True if monitoring_policy else False @@ -923,7 +884,7 @@ def remove_monitoring_policy(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - mp_id = module.params.get('name') + mp_id = module.params.get("name") monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) if module.check_mode: if monitoring_policy_id is None: @@ -933,10 +894,7 @@ def remove_monitoring_policy(module, oneandone_conn): changed = True if monitoring_policy else False - return (changed, { - 'id': monitoring_policy['id'], - 'name': monitoring_policy['name'] - }) + return (changed, {"id": monitoring_policy["id"], "name": monitoring_policy["name"]}) except Exception as ex: module.fail_json(msg=str(ex)) @@ -944,74 +902,66 @@ def remove_monitoring_policy(module, oneandone_conn): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - name=dict(type='str'), - monitoring_policy=dict(type='str'), - agent=dict(type='str'), - email=dict(type='str'), - description=dict(type='str'), - thresholds=dict(type='list', elements="dict", default=[]), - ports=dict(type='list', elements="dict", default=[]), - processes=dict(type='list', elements="dict", default=[]), - add_ports=dict(type='list', elements="dict", default=[]), - update_ports=dict(type='list', elements="dict", default=[]), - remove_ports=dict(type='list', elements="str", default=[]), - add_processes=dict(type='list', elements="dict", default=[]), - update_processes=dict(type='list', elements="dict", default=[]), - remove_processes=dict(type='list', elements="str", default=[]), - add_servers=dict(type='list', elements="str", default=[]), - remove_servers=dict(type='list', elements="str", default=[]), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + name=dict(type="str"), + monitoring_policy=dict(type="str"), + agent=dict(type="str"), + email=dict(type="str"), + description=dict(type="str"), + thresholds=dict(type="list", elements="dict", default=[]), + ports=dict(type="list", elements="dict", default=[]), + processes=dict(type="list", elements="dict", default=[]), + add_ports=dict(type="list", elements="dict", default=[]), + update_ports=dict(type="list", elements="dict", default=[]), + remove_ports=dict(type="list", elements="str", default=[]), + add_processes=dict(type="list", elements="dict", default=[]), + update_processes=dict(type="list", elements="dict", default=[]), + remove_processes=dict(type="list", elements="str", default=[]), + add_servers=dict(type="list", elements="str", default=[]), + remove_servers=dict(type="list", elements="str", default=[]), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "update"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') + if not module.params.get("auth_token"): + module.fail_json(msg="auth_token parameter is required.") - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required to delete a monitoring policy.") + if state == "absent": + if not module.params.get("name"): + module.fail_json(msg="'name' parameter is required to delete a monitoring policy.") try: (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state == 'update': - if not module.params.get('monitoring_policy'): - module.fail_json( - msg="'monitoring_policy' parameter is required to update a monitoring policy.") + elif state == "update": + if not module.params.get("monitoring_policy"): + module.fail_json(msg="'monitoring_policy' parameter is required to update a monitoring policy.") try: (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state == 'present': - for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): + elif state == "present": + for param in ("name", "agent", "email", "thresholds", "ports", "processes"): if not module.params.get(param): - module.fail_json( - msg=f"{param} parameter is required for a new monitoring policy.") + module.fail_json(msg=f"{param} parameter is required for a new monitoring policy.") try: (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) except Exception as ex: @@ -1020,5 +970,5 @@ def main(): module.exit_json(changed=changed, monitoring_policy=monitoring_policy) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py index 1fafbde01da..f7959090cf3 100644 --- a/plugins/modules/oneandone_private_network.py +++ b/plugins/modules/oneandone_private_network.py @@ -157,7 +157,7 @@ get_datacenter, OneAndOneResources, wait_for_resource_creation_completion, - wait_for_resource_deletion_completion + wait_for_resource_deletion_completion, ) HAS_ONEANDONE_SDK = True @@ -167,14 +167,12 @@ except ImportError: HAS_ONEANDONE_SDK = False -DATACENTERS = ['US', 'ES', 'DE', 'GB'] +DATACENTERS = ["US", "ES", "DE", "GB"] def _check_mode(module, result): if module.check_mode: - module.exit_json( - changed=result - ) + module.exit_json(changed=result) def _add_servers(module, oneandone_conn, name, members): @@ -187,8 +185,8 @@ def _add_servers(module, oneandone_conn, name, members): return False network = oneandone_conn.attach_private_network_servers( - private_network_id=private_network_id, - server_ids=members) + private_network_id=private_network_id, server_ids=members + ) return network except Exception as e: @@ -202,15 +200,13 @@ def _remove_member(module, oneandone_conn, name, member_id): if module.check_mode: if private_network_id: network_member = oneandone_conn.get_private_network_server( - private_network_id=private_network_id, - server_id=member_id) + private_network_id=private_network_id, server_id=member_id + ) if network_member: return True return False - network = oneandone_conn.remove_private_network_server( - private_network_id=name, - server_id=member_id) + network = oneandone_conn.remove_private_network_server(private_network_id=name, server_id=member_id) return network except Exception as ex: @@ -227,20 +223,19 @@ def create_network(module, oneandone_conn): Returns a dictionary containing a 'changed' attribute indicating whether any network was added. """ - name = module.params.get('name') - description = module.params.get('description') - network_address = module.params.get('network_address') - subnet_mask = module.params.get('subnet_mask') - datacenter = module.params.get('datacenter') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + name = module.params.get("name") + description = module.params.get("description") + network_address = module.params.get("network_address") + subnet_mask = module.params.get("subnet_mask") + datacenter = module.params.get("datacenter") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") if datacenter is not None: datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: - module.fail_json( - msg=f'datacenter {datacenter} not found.') + module.fail_json(msg=f"datacenter {datacenter} not found.") try: _check_mode(module, True) @@ -250,19 +245,15 @@ def create_network(module, oneandone_conn): description=description, network_address=network_address, subnet_mask=subnet_mask, - datacenter_id=datacenter_id - )) + datacenter_id=datacenter_id, + ) + ) if wait: wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.private_network, - network['id'], - wait_timeout, - wait_interval) - network = get_private_network(oneandone_conn, - network['id'], - True) + oneandone_conn, OneAndOneResources.private_network, network["id"], wait_timeout, wait_interval + ) + network = get_private_network(oneandone_conn, network["id"], True) changed = True if network else False @@ -281,30 +272,29 @@ def update_network(module, oneandone_conn): oneandone_conn: authenticated oneandone object """ try: - _private_network_id = module.params.get('private_network') - _name = module.params.get('name') - _description = module.params.get('description') - _network_address = module.params.get('network_address') - _subnet_mask = module.params.get('subnet_mask') - _add_members = module.params.get('add_members') - _remove_members = module.params.get('remove_members') + _private_network_id = module.params.get("private_network") + _name = module.params.get("name") + _description = module.params.get("description") + _network_address = module.params.get("network_address") + _subnet_mask = module.params.get("subnet_mask") + _add_members = module.params.get("add_members") + _remove_members = module.params.get("remove_members") changed = False - private_network = get_private_network(oneandone_conn, - _private_network_id, - True) + private_network = get_private_network(oneandone_conn, _private_network_id, True) if private_network is None: _check_mode(module, False) if _name or _description or _network_address or _subnet_mask: _check_mode(module, True) private_network = oneandone_conn.modify_private_network( - private_network_id=private_network['id'], + private_network_id=private_network["id"], name=_name, description=_description, network_address=_network_address, - subnet_mask=_subnet_mask) + subnet_mask=_subnet_mask, + ) changed = True if _add_members: @@ -315,7 +305,7 @@ def update_network(module, oneandone_conn): instance_obj = oneandone.client.AttachServer(server_id=instance_id) instances.extend([instance_obj]) - private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) + private_network = _add_servers(module, oneandone_conn, private_network["id"], instances) _check_mode(module, private_network) changed = True @@ -325,19 +315,11 @@ def update_network(module, oneandone_conn): instance = get_server(oneandone_conn, member, True) if module.check_mode: - chk_changed |= _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) + chk_changed |= _remove_member(module, oneandone_conn, private_network["id"], instance["id"]) _check_mode(module, instance and chk_changed) - _remove_member(module, - oneandone_conn, - private_network['id'], - instance['id']) - private_network = get_private_network(oneandone_conn, - private_network['id'], - True) + _remove_member(module, oneandone_conn, private_network["id"], instance["id"]) + private_network = get_private_network(oneandone_conn, private_network["id"], True) changed = True return (changed, private_network) @@ -353,9 +335,9 @@ def remove_network(module, oneandone_conn): oneandone_conn: authenticated oneandone object. """ try: - pn_id = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + pn_id = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") private_network_id = get_private_network(oneandone_conn, pn_id) if module.check_mode: @@ -363,18 +345,13 @@ def remove_network(module, oneandone_conn): _check_mode(module, False) _check_mode(module, True) private_network = oneandone_conn.delete_private_network(private_network_id) - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.private_network, - private_network['id'], - wait_timeout, - wait_interval) + wait_for_resource_deletion_completion( + oneandone_conn, OneAndOneResources.private_network, private_network["id"], wait_timeout, wait_interval + ) changed = True if private_network else False - return (changed, { - 'id': private_network['id'], - 'name': private_network['name'] - }) + return (changed, {"id": private_network["id"], "name": private_network["name"]}) except Exception as e: module.fail_json(msg=str(e)) @@ -382,65 +359,56 @@ def remove_network(module, oneandone_conn): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - private_network=dict(type='str'), - name=dict(type='str'), - description=dict(type='str'), - network_address=dict(type='str'), - subnet_mask=dict(type='str'), - add_members=dict(type='list', elements="str", default=[]), - remove_members=dict(type='list', elements="str", default=[]), - datacenter=dict( - choices=DATACENTERS), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + private_network=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + network_address=dict(type="str"), + subnet_mask=dict(type="str"), + add_members=dict(type="list", elements="str", default=[]), + remove_members=dict(type="list", elements="str", default=[]), + datacenter=dict(choices=DATACENTERS), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "update"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') + if not module.params.get("auth_token"): + module.fail_json(msg="auth_token parameter is required.") - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for deleting a network.") + if state == "absent": + if not module.params.get("name"): + module.fail_json(msg="'name' parameter is required for deleting a network.") try: (changed, private_network) = remove_network(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('private_network'): - module.fail_json( - msg="'private_network' parameter is required for updating a network.") + elif state == "update": + if not module.params.get("private_network"): + module.fail_json(msg="'private_network' parameter is required for updating a network.") try: (changed, private_network) = update_network(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'present': - if not module.params.get('name'): - module.fail_json( - msg="'name' parameter is required for new networks.") + elif state == "present": + if not module.params.get("name"): + module.fail_json(msg="'name' parameter is required for new networks.") try: (changed, private_network) = create_network(module, oneandone_conn) except Exception as e: @@ -449,5 +417,5 @@ def main(): module.exit_json(changed=changed, private_network=private_network) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py index b59f54719c8..7ab46d389f9 100644 --- a/plugins/modules/oneandone_public_ip.py +++ b/plugins/modules/oneandone_public_ip.py @@ -122,7 +122,7 @@ get_datacenter, get_public_ip, OneAndOneResources, - wait_for_resource_creation_completion + wait_for_resource_creation_completion, ) HAS_ONEANDONE_SDK = True @@ -132,16 +132,14 @@ except ImportError: HAS_ONEANDONE_SDK = False -DATACENTERS = ['US', 'ES', 'DE', 'GB'] +DATACENTERS = ["US", "ES", "DE", "GB"] -TYPES = ['IPV4', 'IPV6'] +TYPES = ["IPV4", "IPV6"] def _check_mode(module, result): if module.check_mode: - module.exit_json( - changed=result - ) + module.exit_json(changed=result) def create_public_ip(module, oneandone_conn): @@ -154,34 +152,30 @@ def create_public_ip(module, oneandone_conn): Returns a dictionary containing a 'changed' attribute indicating whether any public IP was added. """ - reverse_dns = module.params.get('reverse_dns') - datacenter = module.params.get('datacenter') - ip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + reverse_dns = module.params.get("reverse_dns") + datacenter = module.params.get("datacenter") + ip_type = module.params.get("type") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") if datacenter is not None: datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: _check_mode(module, False) - module.fail_json( - msg=f'datacenter {datacenter} not found.') + module.fail_json(msg=f"datacenter {datacenter} not found.") try: _check_mode(module, True) public_ip = oneandone_conn.create_public_ip( - reverse_dns=reverse_dns, - ip_type=ip_type, - datacenter_id=datacenter_id) + reverse_dns=reverse_dns, ip_type=ip_type, datacenter_id=datacenter_id + ) if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) + wait_for_resource_creation_completion( + oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval + ) + public_ip = oneandone_conn.get_public_ip(public_ip["id"]) changed = True if public_ip else False @@ -200,31 +194,26 @@ def update_public_ip(module, oneandone_conn): Returns a dictionary containing a 'changed' attribute indicating whether any public IP was changed. """ - reverse_dns = module.params.get('reverse_dns') - public_ip_id = module.params.get('public_ip_id') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + reverse_dns = module.params.get("reverse_dns") + public_ip_id = module.params.get("public_ip_id") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") public_ip = get_public_ip(oneandone_conn, public_ip_id, True) if public_ip is None: _check_mode(module, False) - module.fail_json( - msg=f'public IP {public_ip_id} not found.') + module.fail_json(msg=f"public IP {public_ip_id} not found.") try: _check_mode(module, True) - public_ip = oneandone_conn.modify_public_ip( - ip_id=public_ip['id'], - reverse_dns=reverse_dns) + public_ip = oneandone_conn.modify_public_ip(ip_id=public_ip["id"], reverse_dns=reverse_dns) if wait: - wait_for_resource_creation_completion(oneandone_conn, - OneAndOneResources.public_ip, - public_ip['id'], - wait_timeout, - wait_interval) - public_ip = oneandone_conn.get_public_ip(public_ip['id']) + wait_for_resource_creation_completion( + oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval + ) + public_ip = oneandone_conn.get_public_ip(public_ip["id"]) changed = True if public_ip else False @@ -243,24 +232,20 @@ def delete_public_ip(module, oneandone_conn): Returns a dictionary containing a 'changed' attribute indicating whether any public IP was deleted. """ - public_ip_id = module.params.get('public_ip_id') + public_ip_id = module.params.get("public_ip_id") public_ip = get_public_ip(oneandone_conn, public_ip_id, True) if public_ip is None: _check_mode(module, False) - module.fail_json( - msg=f'public IP {public_ip_id} not found.') + module.fail_json(msg=f"public IP {public_ip_id} not found.") try: _check_mode(module, True) - deleted_public_ip = oneandone_conn.delete_public_ip( - ip_id=public_ip['id']) + deleted_public_ip = oneandone_conn.delete_public_ip(ip_id=public_ip["id"]) changed = True if deleted_public_ip else False - return (changed, { - 'id': public_ip['id'] - }) + return (changed, {"id": public_ip["id"]}) except Exception as e: module.fail_json(msg=str(e)) @@ -268,62 +253,51 @@ def delete_public_ip(module, oneandone_conn): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', no_log=True, - default=os.environ.get('ONEANDONE_AUTH_TOKEN')), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - public_ip_id=dict(type='str'), - reverse_dns=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - type=dict( - choices=TYPES, - default='IPV4'), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + public_ip_id=dict(type="str"), + reverse_dns=dict(type="str"), + datacenter=dict(choices=DATACENTERS, default="US"), + type=dict(choices=TYPES, default="IPV4"), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "update"]), ), - supports_check_mode=True + supports_check_mode=True, ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='auth_token parameter is required.') + if not module.params.get("auth_token"): + module.fail_json(msg="auth_token parameter is required.") - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to delete a public ip.") + if state == "absent": + if not module.params.get("public_ip_id"): + module.fail_json(msg="'public_ip_id' parameter is required to delete a public ip.") try: (changed, public_ip) = delete_public_ip(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'update': - if not module.params.get('public_ip_id'): - module.fail_json( - msg="'public_ip_id' parameter is required to update a public ip.") + elif state == "update": + if not module.params.get("public_ip_id"): + module.fail_json(msg="'public_ip_id' parameter is required to update a public ip.") try: (changed, public_ip) = update_public_ip(module, oneandone_conn) except Exception as e: module.fail_json(msg=str(e)) - elif state == 'present': + elif state == "present": try: (changed, public_ip) = create_public_ip(module, oneandone_conn) except Exception as e: @@ -332,5 +306,5 @@ def main(): module.exit_json(changed=changed, public_ip=public_ip) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py index b835f603c8a..50235da77f0 100644 --- a/plugins/modules/oneandone_server.py +++ b/plugins/modules/oneandone_server.py @@ -216,7 +216,7 @@ get_server, OneAndOneResources, wait_for_resource_creation_completion, - wait_for_resource_deletion_completion + wait_for_resource_deletion_completion, ) HAS_ONEANDONE_SDK = True @@ -226,14 +226,14 @@ except ImportError: HAS_ONEANDONE_SDK = False -DATACENTERS = ['US', 'ES', 'DE', 'GB'] +DATACENTERS = ["US", "ES", "DE", "GB"] ONEANDONE_SERVER_STATES = ( - 'DEPLOYING', - 'POWERED_OFF', - 'POWERED_ON', - 'POWERING_ON', - 'POWERING_OFF', + "DEPLOYING", + "POWERED_OFF", + "POWERED_ON", + "POWERING_ON", + "POWERING_OFF", ) @@ -242,13 +242,28 @@ def _check_mode(module, result): module.exit_json(changed=result) -def _create_server(module, oneandone_conn, hostname, description, - fixed_instance_size_id, vcore, cores_per_processor, ram, - hdds, datacenter_id, appliance_id, ssh_key, - private_network_id, firewall_policy_id, load_balancer_id, - monitoring_policy_id, server_type, wait, wait_timeout, - wait_interval): - +def _create_server( + module, + oneandone_conn, + hostname, + description, + fixed_instance_size_id, + vcore, + cores_per_processor, + ram, + hdds, + datacenter_id, + appliance_id, + ssh_key, + private_network_id, + firewall_policy_id, + load_balancer_id, + monitoring_policy_id, + server_type, + wait, + wait_timeout, + wait_interval, +): try: existing_server = get_server(oneandone_conn, hostname) @@ -275,16 +290,16 @@ def _create_server(module, oneandone_conn, hostname, description, firewall_policy_id=firewall_policy_id, load_balancer_id=load_balancer_id, monitoring_policy_id=monitoring_policy_id, - server_type=server_type,), hdds) + server_type=server_type, + ), + hdds, + ) if wait: wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh + oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval + ) + server = oneandone_conn.get_server(server["id"]) # refresh return server except Exception as ex: @@ -292,11 +307,11 @@ def _create_server(module, oneandone_conn, hostname, description, def _insert_network_data(server): - for addr_data in server['ips']: - if addr_data['type'] == 'IPV6': - server['public_ipv6'] = addr_data['ip'] - elif addr_data['type'] == 'IPV4': - server['public_ipv4'] = addr_data['ip'] + for addr_data in server["ips"]: + if addr_data["type"] == "IPV6": + server["public_ipv6"] = addr_data["ip"] + elif addr_data["type"] == "IPV4": + server["public_ipv4"] = addr_data["ip"] return server @@ -311,88 +326,71 @@ def create_server(module, oneandone_conn): any server was added, and a 'servers' attribute with the list of the created servers' hostname, id and ip addresses. """ - hostname = module.params.get('hostname') - description = module.params.get('description') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - fixed_instance_size = module.params.get('fixed_instance_size') - vcore = module.params.get('vcore') - cores_per_processor = module.params.get('cores_per_processor') - ram = module.params.get('ram') - hdds = module.params.get('hdds') - datacenter = module.params.get('datacenter') - appliance = module.params.get('appliance') - ssh_key = module.params.get('ssh_key') - private_network = module.params.get('private_network') - monitoring_policy = module.params.get('monitoring_policy') - firewall_policy = module.params.get('firewall_policy') - load_balancer = module.params.get('load_balancer') - server_type = module.params.get('server_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + hostname = module.params.get("hostname") + description = module.params.get("description") + auto_increment = module.params.get("auto_increment") + count = module.params.get("count") + fixed_instance_size = module.params.get("fixed_instance_size") + vcore = module.params.get("vcore") + cores_per_processor = module.params.get("cores_per_processor") + ram = module.params.get("ram") + hdds = module.params.get("hdds") + datacenter = module.params.get("datacenter") + appliance = module.params.get("appliance") + ssh_key = module.params.get("ssh_key") + private_network = module.params.get("private_network") + monitoring_policy = module.params.get("monitoring_policy") + firewall_policy = module.params.get("firewall_policy") + load_balancer = module.params.get("load_balancer") + server_type = module.params.get("server_type") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: _check_mode(module, False) - module.fail_json( - msg=f'datacenter {datacenter} not found.') + module.fail_json(msg=f"datacenter {datacenter} not found.") fixed_instance_size_id = None if fixed_instance_size: - fixed_instance_size_id = get_fixed_instance_size( - oneandone_conn, - fixed_instance_size) + fixed_instance_size_id = get_fixed_instance_size(oneandone_conn, fixed_instance_size) if fixed_instance_size_id is None: _check_mode(module, False) - module.fail_json( - msg=f'fixed_instance_size {fixed_instance_size} not found.') + module.fail_json(msg=f"fixed_instance_size {fixed_instance_size} not found.") appliance_id = get_appliance(oneandone_conn, appliance) if appliance_id is None: _check_mode(module, False) - module.fail_json( - msg=f'appliance {appliance} not found.') + module.fail_json(msg=f"appliance {appliance} not found.") private_network_id = None if private_network: - private_network_id = get_private_network( - oneandone_conn, - private_network) + private_network_id = get_private_network(oneandone_conn, private_network) if private_network_id is None: _check_mode(module, False) - module.fail_json( - msg=f'private network {private_network} not found.') + module.fail_json(msg=f"private network {private_network} not found.") monitoring_policy_id = None if monitoring_policy: - monitoring_policy_id = get_monitoring_policy( - oneandone_conn, - monitoring_policy) + monitoring_policy_id = get_monitoring_policy(oneandone_conn, monitoring_policy) if monitoring_policy_id is None: _check_mode(module, False) - module.fail_json( - msg=f'monitoring policy {monitoring_policy} not found.') + module.fail_json(msg=f"monitoring policy {monitoring_policy} not found.") firewall_policy_id = None if firewall_policy: - firewall_policy_id = get_firewall_policy( - oneandone_conn, - firewall_policy) + firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_policy) if firewall_policy_id is None: _check_mode(module, False) - module.fail_json( - msg=f'firewall policy {firewall_policy} not found.') + module.fail_json(msg=f"firewall policy {firewall_policy} not found.") load_balancer_id = None if load_balancer: - load_balancer_id = get_load_balancer( - oneandone_conn, - load_balancer) + load_balancer_id = get_load_balancer(oneandone_conn, load_balancer) if load_balancer_id is None: _check_mode(module, False) - module.fail_json( - msg=f'load balancer {load_balancer} not found.') + module.fail_json(msg=f"load balancer {load_balancer} not found.") if auto_increment: hostnames = _auto_increment_hostname(count, hostname) @@ -404,10 +402,7 @@ def create_server(module, oneandone_conn): hdd_objs = [] if hdds: for hdd in hdds: - hdd_objs.append(oneandone.client.Hdd( - size=hdd['size'], - is_main=hdd['is_main'] - )) + hdd_objs.append(oneandone.client.Hdd(size=hdd["size"], is_main=hdd["is_main"])) servers = [] for index, name in enumerate(hostnames): @@ -431,7 +426,8 @@ def create_server(module, oneandone_conn): server_type=server_type, wait=wait, wait_timeout=wait_timeout, - wait_interval=wait_interval) + wait_interval=wait_interval, + ) if server: servers.append(server) @@ -461,10 +457,10 @@ def remove_server(module, oneandone_conn): the server was removed, and a 'removed_server' attribute with the removed server's hostname and id. """ - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + server_id = module.params.get("server") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") changed = False removed_server = None @@ -473,22 +469,16 @@ def remove_server(module, oneandone_conn): if server: _check_mode(module, True) try: - oneandone_conn.delete_server(server_id=server['id']) + oneandone_conn.delete_server(server_id=server["id"]) if wait: - wait_for_resource_deletion_completion(oneandone_conn, - OneAndOneResources.server, - server['id'], - wait_timeout, - wait_interval) + wait_for_resource_deletion_completion( + oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval + ) changed = True except Exception as ex: - module.fail_json( - msg=f"failed to terminate the server: {ex}") + module.fail_json(msg=f"failed to terminate the server: {ex}") - removed_server = { - 'id': server['id'], - 'hostname': server['name'] - } + removed_server = {"id": server["id"], "hostname": server["name"]} _check_mode(module, False) return (changed, removed_server) @@ -506,11 +496,11 @@ def startstop_server(module, oneandone_conn): being run, and a 'server' attribute with basic information for the server. """ - state = module.params.get('state') - server_id = module.params.get('server') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - wait_interval = module.params.get('wait_interval') + state = module.params.get("state") + server_id = module.params.get("server") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + wait_interval = module.params.get("wait_interval") changed = False @@ -520,21 +510,14 @@ def startstop_server(module, oneandone_conn): # Attempt to change the server state, only if it is not already there # or on its way. try: - if state == 'stopped' and server['status']['state'] == 'POWERED_ON': + if state == "stopped" and server["status"]["state"] == "POWERED_ON": _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_OFF', - method='SOFTWARE') - elif state == 'running' and server['status']['state'] == 'POWERED_OFF': + oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_OFF", method="SOFTWARE") + elif state == "running" and server["status"]["state"] == "POWERED_OFF": _check_mode(module, True) - oneandone_conn.modify_server_status( - server_id=server['id'], - action='POWER_ON', - method='SOFTWARE') + oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_ON", method="SOFTWARE") except Exception as ex: - module.fail_json( - msg=f"failed to set server {server_id} to state {state}: {ex}") + module.fail_json(msg=f"failed to set server {server_id} to state {state}: {ex}") _check_mode(module, False) @@ -544,17 +527,16 @@ def startstop_server(module, oneandone_conn): wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): time.sleep(wait_interval) - server = oneandone_conn.get_server(server['id']) # refresh - server_state = server['status']['state'] - if state == 'stopped' and server_state == 'POWERED_OFF': + server = oneandone_conn.get_server(server["id"]) # refresh + server_state = server["status"]["state"] + if state == "stopped" and server_state == "POWERED_OFF": operation_completed = True break - if state == 'running' and server_state == 'POWERED_ON': + if state == "running" and server_state == "POWERED_ON": operation_completed = True break if not operation_completed: - module.fail_json( - msg=f"Timeout waiting for server {server_id} to get to state {state}") + module.fail_json(msg=f"Timeout waiting for server {server_id} to get to state {state}") changed = True server = _insert_network_data(server) @@ -570,13 +552,10 @@ def _auto_increment_hostname(count, hostname): string formatting (%) operator. Otherwise, increment using name-01, name-02, name-03, and so forth. """ - if '%' not in hostname: + if "%" not in hostname: hostname = "%s-%%01d" % hostname - return [ - hostname % i - for i in range(1, count + 1) - ] + return [hostname % i for i in range(1, count + 1)] def _auto_increment_description(count, description): @@ -584,11 +563,8 @@ def _auto_increment_description(count, description): Allow the incremental count in the description when defined with the string formatting (%) operator. Otherwise, repeat the same description. """ - if '%' in description: - return [ - description % i - for i in range(1, count + 1) - ] + if "%" in description: + return [description % i for i in range(1, count + 1)] else: return [description] * count @@ -596,85 +572,76 @@ def _auto_increment_description(count, description): def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', - default=os.environ.get('ONEANDONE_AUTH_TOKEN'), - no_log=True), - api_url=dict( - type='str', - default=os.environ.get('ONEANDONE_API_URL')), - hostname=dict(type='str'), - description=dict(type='str'), - appliance=dict(type='str'), - fixed_instance_size=dict(type='str'), - vcore=dict(type='int'), - cores_per_processor=dict(type='int'), - ram=dict(type='float'), - hdds=dict(type='list', elements='dict'), - count=dict(type='int', default=1), - ssh_key=dict(type='raw', no_log=False), - auto_increment=dict(type='bool', default=True), - server=dict(type='str'), - datacenter=dict( - choices=DATACENTERS, - default='US'), - private_network=dict(type='str'), - firewall_policy=dict(type='str'), - load_balancer=dict(type='str'), - monitoring_policy=dict(type='str'), - server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - wait_interval=dict(type='int', default=5), - state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), + auth_token=dict(type="str", default=os.environ.get("ONEANDONE_AUTH_TOKEN"), no_log=True), + api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), + hostname=dict(type="str"), + description=dict(type="str"), + appliance=dict(type="str"), + fixed_instance_size=dict(type="str"), + vcore=dict(type="int"), + cores_per_processor=dict(type="int"), + ram=dict(type="float"), + hdds=dict(type="list", elements="dict"), + count=dict(type="int", default=1), + ssh_key=dict(type="raw", no_log=False), + auto_increment=dict(type="bool", default=True), + server=dict(type="str"), + datacenter=dict(choices=DATACENTERS, default="US"), + private_network=dict(type="str"), + firewall_policy=dict(type="str"), + load_balancer=dict(type="str"), + monitoring_policy=dict(type="str"), + server_type=dict(type="str", default="cloud", choices=["cloud", "baremetal", "k8s_node"]), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + wait_interval=dict(type="int", default=5), + state=dict(type="str", default="present", choices=["present", "absent", "running", "stopped"]), ), supports_check_mode=True, - mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], - ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), - required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) + mutually_exclusive=( + ["fixed_instance_size", "vcore"], + ["fixed_instance_size", "cores_per_processor"], + ["fixed_instance_size", "ram"], + ["fixed_instance_size", "hdds"], + ), + required_together=(["vcore", "cores_per_processor", "ram", "hdds"],), ) if not HAS_ONEANDONE_SDK: - module.fail_json(msg='1and1 required for this module') + module.fail_json(msg="1and1 required for this module") - if not module.params.get('auth_token'): - module.fail_json( - msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') + if not module.params.get("auth_token"): + module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') - if not module.params.get('api_url'): - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token')) + if not module.params.get("api_url"): + oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) else: oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") + ) - state = module.params.get('state') + state = module.params.get("state") - if state == 'absent': - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for deleting a server.") + if state == "absent": + if not module.params.get("server"): + module.fail_json(msg="'server' parameter is required for deleting a server.") try: (changed, servers) = remove_server(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state in ('running', 'stopped'): - if not module.params.get('server'): - module.fail_json( - msg="'server' parameter is required for starting/stopping a server.") + elif state in ("running", "stopped"): + if not module.params.get("server"): + module.fail_json(msg="'server' parameter is required for starting/stopping a server.") try: (changed, servers) = startstop_server(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) - elif state == 'present': - for param in ('hostname', - 'appliance', - 'datacenter'): + elif state == "present": + for param in ("hostname", "appliance", "datacenter"): if not module.params.get(param): - module.fail_json( - msg=f"{param} parameter is required for new server.") + module.fail_json(msg=f"{param} parameter is required for new server.") try: (changed, servers) = create_server(module, oneandone_conn) except Exception as ex: @@ -683,5 +650,5 @@ def main(): module.exit_json(changed=changed, servers=servers) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py index 74cce3b9647..fac87c6a799 100644 --- a/plugins/modules/onepassword_info.py +++ b/plugins/modules/onepassword_info.py @@ -178,14 +178,13 @@ def __repr__(self): class OnePasswordInfo: - def __init__(self): - self.cli_path = module.params.get('cli_path') - self.auto_login = module.params.get('auto_login') + self.cli_path = module.params.get("cli_path") + self.auto_login = module.params.get("auto_login") self.logged_in = False self.token = None - terms = module.params.get('search_terms') + terms = module.params.get("search_terms") self.terms = self.parse_search_terms(terms) self._config = OnePasswordConfig() @@ -193,7 +192,7 @@ def __init__(self): def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): if self.token: # Adds the session token to all commands if we're logged in. - args += [to_bytes('--session=') + self.token] + args += [to_bytes("--session=") + self.token] command = [self.cli_path] + args p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) @@ -206,53 +205,55 @@ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): def _parse_field(self, data_json, item_id, field_name, section_title=None): data = json.loads(data_json) - if 'documentAttributes' in data['details']: + if "documentAttributes" in data["details"]: # This is actually a document, let's fetch the document data instead! - document = self._run(["get", "document", data['overview']['title']]) - return {'document': document[1].strip()} + document = self._run(["get", "document", data["overview"]["title"]]) + return {"document": document[1].strip()} else: # This is not a document, let's try to find the requested field # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, # not inside it, so we need to check there first. - if field_name in data['details']: - return {field_name: data['details'][field_name]} + if field_name in data["details"]: + return {field_name: data["details"][field_name]} # Otherwise we continue looking inside the 'fields' attribute for the specified field. else: if section_title is None: - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return {field_name: field_data.get('value', '')} + for field_data in data["details"].get("fields", []): + if field_data.get("name", "").lower() == field_name.lower(): + return {field_name: field_data.get("value", "")} # Not found it yet, so now lets see if there are any sections defined # and search through those for the field. If a section was given, we skip # any non-matching sections, otherwise we search them all until we find the field. - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): + for section_data in data["details"].get("sections", []): + if section_title is not None and section_title.lower() != section_data["title"].lower(): continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return {field_name: field_data.get('v', '')} + for field_data in section_data.get("fields", []): + if field_data.get("t", "").lower() == field_name.lower(): + return {field_name: field_data.get("v", "")} # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. - optional_section_title = '' if section_title is None else f" in the section '{section_title}'" - module.fail_json(msg=f"Unable to find an item in 1Password named '{item_id}' with the field '{field_name}'{optional_section_title}.") + optional_section_title = "" if section_title is None else f" in the section '{section_title}'" + module.fail_json( + msg=f"Unable to find an item in 1Password named '{item_id}' with the field '{field_name}'{optional_section_title}." + ) def parse_search_terms(self, terms): processed_terms = [] for term in terms: if not isinstance(term, dict): - term = {'name': term} + term = {"name": term} - if 'name' not in term: + if "name" not in term: module.fail_json(msg=f"Missing required 'name' field from search term, got: '{term}'") - term['field'] = term.get('field', 'password') - term['section'] = term.get('section', None) - term['vault'] = term.get('vault', None) + term["field"] = term.get("field", "password") + term["section"] = term.get("section", None) + term["vault"] = term.get("vault", None) processed_terms.append(term) @@ -262,7 +263,7 @@ def get_raw(self, item_id, vault=None): try: args = ["get", "item", item_id] if vault is not None: - args += [f'--vault={vault}'] + args += [f"--vault={vault}"] rc, output, dummy = self._run(args) return output @@ -274,50 +275,56 @@ def get_raw(self, item_id, vault=None): def get_field(self, item_id, field, section=None, vault=None): output = self.get_raw(item_id, vault) - return self._parse_field(output, item_id, field, section) if output != '' else '' + return self._parse_field(output, item_id, field, section) if output != "" else "" def full_login(self): if self.auto_login is not None: - if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), - self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: - module.fail_json(msg='Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') + if None in [ + self.auto_login.get("subdomain"), + self.auto_login.get("username"), + self.auto_login.get("secret_key"), + self.auto_login.get("master_password"), + ]: + module.fail_json( + msg="Unable to perform initial sign in to 1Password. " + "subdomain, username, secret_key, and master_password are required to perform initial sign in." + ) args = [ - 'signin', + "signin", f"{self.auto_login['subdomain']}.1password.com", - to_bytes(self.auto_login['username']), - to_bytes(self.auto_login['secret_key']), - '--output=raw', + to_bytes(self.auto_login["username"]), + to_bytes(self.auto_login["secret_key"]), + "--output=raw", ] try: - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login["master_password"])) self.token = out.strip() except AnsibleModuleError as e: module.fail_json(msg=f"Failed to perform initial sign in to 1Password: {to_native(e)}") else: - module.fail_json(msg=f"Unable to perform an initial sign in to 1Password. Please run '{self.cli_path} signin' " - "or define credentials in 'auto_login'. See the module documentation for details.") + module.fail_json( + msg=f"Unable to perform an initial sign in to 1Password. Please run '{self.cli_path} signin' " + "or define credentials in 'auto_login'. See the module documentation for details." + ) def get_token(self): # If the config file exists, assume an initial signin has taken place and try basic sign in if os.path.isfile(self._config.config_file_path): - if self.auto_login is not None: - # Since we are not currently signed in, master_password is required at a minimum - if not self.auto_login.get('master_password'): + if not self.auto_login.get("master_password"): module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") # Try signing in using the master_password and a subdomain if one is provided try: - args = ['signin', '--output=raw'] + args = ["signin", "--output=raw"] - if self.auto_login.get('subdomain'): - args = ['signin', self.auto_login['subdomain'], '--output=raw'] + if self.auto_login.get("subdomain"): + args = ["signin", self.auto_login["subdomain"], "--output=raw"] - rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login["master_password"])) self.token = out.strip() except AnsibleModuleError: @@ -332,7 +339,7 @@ def get_token(self): def assert_logged_in(self): try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) + rc, out, err = self._run(["get", "account"], ignore_errors=True) if rc == 0: self.logged_in = True if not self.logged_in: @@ -348,16 +355,16 @@ def run(self): self.assert_logged_in() for term in self.terms: - value = self.get_field(term['name'], term['field'], term['section'], term['vault']) + value = self.get_field(term["name"], term["field"], term["section"], term["vault"]) - if term['name'] in result: + if term["name"] in result: # If we already have a result for this key, we have to append this result dictionary # to the existing one. This is only applicable when there is a single item # in 1Password which has two different fields, and we want to retrieve both of them. - result[term['name']].update(value) + result[term["name"]].update(value) else: # If this is the first result for this key, simply set it. - result[term['name']] = value + result[term["name"]] = value return result @@ -366,22 +373,25 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - cli_path=dict(type='path', default='op'), - auto_login=dict(type='dict', options=dict( - subdomain=dict(type='str'), - username=dict(type='str'), - master_password=dict(required=True, type='str', no_log=True), - secret_key=dict(type='str', no_log=True), - )), - search_terms=dict(required=True, type='list', elements='dict'), + cli_path=dict(type="path", default="op"), + auto_login=dict( + type="dict", + options=dict( + subdomain=dict(type="str"), + username=dict(type="str"), + master_password=dict(required=True, type="str", no_log=True), + secret_key=dict(type="str", no_log=True), + ), + ), + search_terms=dict(required=True, type="list", elements="dict"), ), - supports_check_mode=True + supports_check_mode=True, ) - results = {'onepassword': OnePasswordInfo().run()} + results = {"onepassword": OnePasswordInfo().run()} module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py index 3edc67343f4..31a486ee09c 100644 --- a/plugins/modules/oneview_datacenter_info.py +++ b/plugins/modules/oneview_datacenter_info.py @@ -121,11 +121,7 @@ class DatacenterInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict') - ) + argument_spec = dict(name=dict(type="str"), options=dict(type="list", elements="str"), params=dict(type="dict")) def __init__(self): super().__init__( @@ -134,22 +130,21 @@ def __init__(self): ) def execute_module(self): - client = self.oneview_client.datacenters info = {} - if self.module.params.get('name'): - datacenters = client.get_by('name', self.module.params['name']) + if self.module.params.get("name"): + datacenters = client.get_by("name", self.module.params["name"]) - if self.options and 'visualContent' in self.options: + if self.options and "visualContent" in self.options: if datacenters: - info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri']) + info["datacenter_visual_content"] = client.get_visual_content(datacenters[0]["uri"]) else: - info['datacenter_visual_content'] = None + info["datacenter_visual_content"] = None - info['datacenters'] = datacenters + info["datacenters"] = datacenters else: - info['datacenters'] = client.get_all(**self.facts_params) + info["datacenters"] = client.get_all(**self.facts_params) return dict(changed=False, **info) @@ -158,5 +153,5 @@ def main(): DatacenterInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py index 16745a2e0f4..98d7cc2044e 100644 --- a/plugins/modules/oneview_enclosure_info.py +++ b/plugins/modules/oneview_enclosure_info.py @@ -175,11 +175,7 @@ class EnclosureInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='raw'), - params=dict(type='dict') - ) + argument_spec = dict(name=dict(type="str"), options=dict(type="list", elements="raw"), params=dict(type="dict")) def __init__(self): super().__init__( @@ -188,58 +184,54 @@ def __init__(self): ) def execute_module(self): - info = {} - if self.module.params['name']: - enclosures = self._get_by_name(self.module.params['name']) + if self.module.params["name"]: + enclosures = self._get_by_name(self.module.params["name"]) if self.options and enclosures: info = self._gather_optional_info(self.options, enclosures[0]) else: enclosures = self.oneview_client.enclosures.get_all(**self.facts_params) - info['enclosures'] = enclosures + info["enclosures"] = enclosures return dict(changed=False, **info) def _gather_optional_info(self, options, enclosure): - enclosure_client = self.oneview_client.enclosures info = {} - if options.get('script'): - info['enclosure_script'] = enclosure_client.get_script(enclosure['uri']) - if options.get('environmentalConfiguration'): - env_config = enclosure_client.get_environmental_configuration(enclosure['uri']) - info['enclosure_environmental_configuration'] = env_config - if options.get('utilization'): - info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization']) + if options.get("script"): + info["enclosure_script"] = enclosure_client.get_script(enclosure["uri"]) + if options.get("environmentalConfiguration"): + env_config = enclosure_client.get_environmental_configuration(enclosure["uri"]) + info["enclosure_environmental_configuration"] = env_config + if options.get("utilization"): + info["enclosure_utilization"] = self._get_utilization(enclosure, options["utilization"]) return info def _get_utilization(self, enclosure, params): - fields = view = refresh = filter = '' + fields = view = refresh = filter = "" if isinstance(params, dict): - fields = params.get('fields') - view = params.get('view') - refresh = params.get('refresh') - filter = params.get('filter') + fields = params.get("fields") + view = params.get("view") + refresh = params.get("refresh") + filter = params.get("filter") - return self.oneview_client.enclosures.get_utilization(enclosure['uri'], - fields=fields, - filter=filter, - refresh=refresh, - view=view) + return self.oneview_client.enclosures.get_utilization( + enclosure["uri"], fields=fields, filter=filter, refresh=refresh, view=view + ) def _get_by_name(self, name): - return self.oneview_client.enclosures.get_by('name', name) + return self.oneview_client.enclosures.get_by("name", name) def main(): EnclosureInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py index 2d423964a77..fa4a26d99da 100644 --- a/plugins/modules/oneview_ethernet_network.py +++ b/plugins/modules/oneview_ethernet_network.py @@ -122,29 +122,31 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound +from ansible_collections.community.general.plugins.module_utils.oneview import ( + OneViewModuleBase, + OneViewModuleResourceNotFound, +) class EthernetNetworkModule(OneViewModuleBase): - MSG_CREATED = 'Ethernet Network created successfully.' - MSG_UPDATED = 'Ethernet Network updated successfully.' - MSG_DELETED = 'Ethernet Network deleted successfully.' - MSG_ALREADY_PRESENT = 'Ethernet Network is already present.' - MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.' + MSG_CREATED = "Ethernet Network created successfully." + MSG_UPDATED = "Ethernet Network updated successfully." + MSG_DELETED = "Ethernet Network deleted successfully." + MSG_ALREADY_PRESENT = "Ethernet Network is already present." + MSG_ALREADY_ABSENT = "Ethernet Network is already absent." - MSG_BULK_CREATED = 'Ethernet Networks created successfully.' - MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.' - MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.' - MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.' - MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.' + MSG_BULK_CREATED = "Ethernet Networks created successfully." + MSG_MISSING_BULK_CREATED = "Some missing Ethernet Networks were created successfully." + MSG_BULK_ALREADY_EXIST = "The specified Ethernet Networks already exist." + MSG_CONNECTION_TEMPLATE_RESET = "Ethernet Network connection template was reset to the default." + MSG_ETHERNET_NETWORK_NOT_FOUND = "Ethernet Network was not found." - RESOURCE_FACT_NAME = 'ethernet_network' + RESOURCE_FACT_NAME = "ethernet_network" def __init__(self): - argument_spec = dict( - state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']), - data=dict(type='dict', required=True), + state=dict(type="str", default="present", choices=["absent", "default_bandwidth_reset", "present"]), + data=dict(type="dict", required=True), ) super().__init__(additional_arg_spec=argument_spec, validate_etag_support=True) @@ -152,79 +154,78 @@ def __init__(self): self.resource_client = self.oneview_client.ethernet_networks def execute_module(self): + changed, msg, ansible_facts, resource = False, "", {}, None - changed, msg, ansible_facts, resource = False, '', {}, None - - if self.data.get('name'): - resource = self.get_by_name(self.data['name']) + if self.data.get("name"): + resource = self.get_by_name(self.data["name"]) - if self.state == 'present': - if self.data.get('vlanIdRange'): + if self.state == "present": + if self.data.get("vlanIdRange"): return self._bulk_present() else: return self._present(resource) - elif self.state == 'absent': + elif self.state == "absent": return self.resource_absent(resource) - elif self.state == 'default_bandwidth_reset': + elif self.state == "default_bandwidth_reset": changed, msg, ansible_facts = self._default_bandwidth_reset(resource) return dict(changed=changed, msg=msg, ansible_facts=ansible_facts) def _present(self, resource): - - bandwidth = self.data.pop('bandwidth', None) - scope_uris = self.data.pop('scopeUris', None) + bandwidth = self.data.pop("bandwidth", None) + scope_uris = self.data.pop("scopeUris", None) result = self.resource_present(resource, self.RESOURCE_FACT_NAME) if bandwidth: - if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]: - result['changed'] = True - result['msg'] = self.MSG_UPDATED + if self._update_connection_template(result["ansible_facts"]["ethernet_network"], bandwidth)[0]: + result["changed"] = True + result["msg"] = self.MSG_UPDATED if scope_uris is not None: - result = self.resource_scopes_set(result, 'ethernet_network', scope_uris) + result = self.resource_scopes_set(result, "ethernet_network", scope_uris) return result def _bulk_present(self): - vlan_id_range = self.data['vlanIdRange'] + vlan_id_range = self.data["vlanIdRange"] result = dict(ansible_facts={}) - ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + ethernet_networks = self.resource_client.get_range(self.data["namePrefix"], vlan_id_range) if not ethernet_networks: self.resource_client.create_bulk(self.data) - result['changed'] = True - result['msg'] = self.MSG_BULK_CREATED + result["changed"] = True + result["msg"] = self.MSG_BULK_CREATED else: vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range) for net in ethernet_networks[:]: - vlan_ids.remove(net['vlanId']) + vlan_ids.remove(net["vlanId"]) if len(vlan_ids) == 0: - result['msg'] = self.MSG_BULK_ALREADY_EXIST - result['changed'] = False + result["msg"] = self.MSG_BULK_ALREADY_EXIST + result["changed"] = False else: if len(vlan_ids) == 1: - self.data['vlanIdRange'] = f'{vlan_ids[0]}-{vlan_ids[0]}' + self.data["vlanIdRange"] = f"{vlan_ids[0]}-{vlan_ids[0]}" else: - self.data['vlanIdRange'] = ','.join(str(s) for s in vlan_ids) + self.data["vlanIdRange"] = ",".join(str(s) for s in vlan_ids) self.resource_client.create_bulk(self.data) - result['changed'] = True - result['msg'] = self.MSG_MISSING_BULK_CREATED - result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + result["changed"] = True + result["msg"] = self.MSG_MISSING_BULK_CREATED + result["ansible_facts"]["ethernet_network_bulk"] = self.resource_client.get_range( + self.data["namePrefix"], vlan_id_range + ) return result def _update_connection_template(self, ethernet_network, bandwidth): - - if 'connectionTemplateUri' not in ethernet_network: + if "connectionTemplateUri" not in ethernet_network: return False, None - connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri']) + connection_template = self.oneview_client.connection_templates.get(ethernet_network["connectionTemplateUri"]) merged_data = connection_template.copy() - merged_data.update({'bandwidth': bandwidth}) + merged_data.update({"bandwidth": bandwidth}) if not self.compare(connection_template, merged_data): connection_template = self.oneview_client.connection_templates.update(merged_data) @@ -233,21 +234,25 @@ def _update_connection_template(self, ethernet_network, bandwidth): return False, None def _default_bandwidth_reset(self, resource): - if not resource: raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND) default_connection_template = self.oneview_client.connection_templates.get_default() - changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth']) + changed, connection_template = self._update_connection_template( + resource, default_connection_template["bandwidth"] + ) - return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict( - ethernet_network_connection_template=connection_template) + return ( + changed, + self.MSG_CONNECTION_TEMPLATE_RESET, + dict(ethernet_network_connection_template=connection_template), + ) def main(): EthernetNetworkModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py index c963175e491..79298fa8d48 100644 --- a/plugins/modules/oneview_ethernet_network_info.py +++ b/plugins/modules/oneview_ethernet_network_info.py @@ -115,11 +115,7 @@ class EthernetNetworkInfoModule(OneViewModuleBase): - argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict') - ) + argument_spec = dict(name=dict(type="str"), options=dict(type="list", elements="str"), params=dict(type="dict")) def __init__(self): super().__init__( @@ -131,35 +127,34 @@ def __init__(self): def execute_module(self): info = {} - if self.module.params['name']: - ethernet_networks = self.resource_client.get_by('name', self.module.params['name']) + if self.module.params["name"]: + ethernet_networks = self.resource_client.get_by("name", self.module.params["name"]) - if self.module.params.get('options') and ethernet_networks: + if self.module.params.get("options") and ethernet_networks: info = self.__gather_optional_info(ethernet_networks[0]) else: ethernet_networks = self.resource_client.get_all(**self.facts_params) - info['ethernet_networks'] = ethernet_networks + info["ethernet_networks"] = ethernet_networks return dict(changed=False, **info) def __gather_optional_info(self, ethernet_network): - info = {} - if self.options.get('associatedProfiles'): - info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network) - if self.options.get('associatedUplinkGroups'): - info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network) + if self.options.get("associatedProfiles"): + info["enet_associated_profiles"] = self.__get_associated_profiles(ethernet_network) + if self.options.get("associatedUplinkGroups"): + info["enet_associated_uplink_groups"] = self.__get_associated_uplink_groups(ethernet_network) return info def __get_associated_profiles(self, ethernet_network): - associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri']) + associated_profiles = self.resource_client.get_associated_profiles(ethernet_network["uri"]) return [self.oneview_client.server_profiles.get(x) for x in associated_profiles] def __get_associated_uplink_groups(self, ethernet_network): - uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri']) + uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network["uri"]) return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups] @@ -167,5 +162,5 @@ def main(): EthernetNetworkInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py index d6563157cdf..f342aa90c39 100644 --- a/plugins/modules/oneview_fc_network.py +++ b/plugins/modules/oneview_fc_network.py @@ -84,37 +84,35 @@ class FcNetworkModule(OneViewModuleBase): - MSG_CREATED = 'FC Network created successfully.' - MSG_UPDATED = 'FC Network updated successfully.' - MSG_DELETED = 'FC Network deleted successfully.' - MSG_ALREADY_PRESENT = 'FC Network is already present.' - MSG_ALREADY_ABSENT = 'FC Network is already absent.' - RESOURCE_FACT_NAME = 'fc_network' + MSG_CREATED = "FC Network created successfully." + MSG_UPDATED = "FC Network updated successfully." + MSG_DELETED = "FC Network deleted successfully." + MSG_ALREADY_PRESENT = "FC Network is already present." + MSG_ALREADY_ABSENT = "FC Network is already absent." + RESOURCE_FACT_NAME = "fc_network" def __init__(self): - - additional_arg_spec = dict(data=dict(required=True, type='dict'), - state=dict( - required=True, - choices=['present', 'absent'])) + additional_arg_spec = dict( + data=dict(required=True, type="dict"), state=dict(required=True, choices=["present", "absent"]) + ) super().__init__(additional_arg_spec=additional_arg_spec, validate_etag_support=True) self.resource_client = self.oneview_client.fc_networks def execute_module(self): - resource = self.get_by_name(self.data['name']) + resource = self.get_by_name(self.data["name"]) - if self.state == 'present': + if self.state == "present": return self._present(resource) else: return self.resource_absent(resource) def _present(self, resource): - scope_uris = self.data.pop('scopeUris', None) + scope_uris = self.data.pop("scopeUris", None) result = self.resource_present(resource, self.RESOURCE_FACT_NAME) if scope_uris is not None: - result = self.resource_scopes_set(result, 'fc_network', scope_uris) + result = self.resource_scopes_set(result, "fc_network", scope_uris) return result @@ -122,5 +120,5 @@ def main(): FcNetworkModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py index 19c1e2af030..680a12deceb 100644 --- a/plugins/modules/oneview_fc_network_info.py +++ b/plugins/modules/oneview_fc_network_info.py @@ -83,11 +83,7 @@ class FcNetworkInfoModule(OneViewModuleBase): def __init__(self): - - argument_spec = dict( - name=dict(type='str'), - params=dict(type='dict') - ) + argument_spec = dict(name=dict(type="str"), params=dict(type="dict")) super().__init__( additional_arg_spec=argument_spec, @@ -95,9 +91,8 @@ def __init__(self): ) def execute_module(self): - - if self.module.params['name']: - fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name']) + if self.module.params["name"]: + fc_networks = self.oneview_client.fc_networks.get_by("name", self.module.params["name"]) else: fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) @@ -108,5 +103,5 @@ def main(): FcNetworkInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py index 8e4502a3555..e62a688246a 100644 --- a/plugins/modules/oneview_fcoe_network.py +++ b/plugins/modules/oneview_fcoe_network.py @@ -81,36 +81,35 @@ class FcoeNetworkModule(OneViewModuleBase): - MSG_CREATED = 'FCoE Network created successfully.' - MSG_UPDATED = 'FCoE Network updated successfully.' - MSG_DELETED = 'FCoE Network deleted successfully.' - MSG_ALREADY_PRESENT = 'FCoE Network is already present.' - MSG_ALREADY_ABSENT = 'FCoE Network is already absent.' - RESOURCE_FACT_NAME = 'fcoe_network' + MSG_CREATED = "FCoE Network created successfully." + MSG_UPDATED = "FCoE Network updated successfully." + MSG_DELETED = "FCoE Network deleted successfully." + MSG_ALREADY_PRESENT = "FCoE Network is already present." + MSG_ALREADY_ABSENT = "FCoE Network is already absent." + RESOURCE_FACT_NAME = "fcoe_network" def __init__(self): - - additional_arg_spec = dict(data=dict(required=True, type='dict'), - state=dict(default='present', - choices=['present', 'absent'])) + additional_arg_spec = dict( + data=dict(required=True, type="dict"), state=dict(default="present", choices=["present", "absent"]) + ) super().__init__(additional_arg_spec=additional_arg_spec, validate_etag_support=True) self.resource_client = self.oneview_client.fcoe_networks def execute_module(self): - resource = self.get_by_name(self.data.get('name')) + resource = self.get_by_name(self.data.get("name")) - if self.state == 'present': + if self.state == "present": return self.__present(resource) - elif self.state == 'absent': + elif self.state == "absent": return self.resource_absent(resource) def __present(self, resource): - scope_uris = self.data.pop('scopeUris', None) + scope_uris = self.data.pop("scopeUris", None) result = self.resource_present(resource, self.RESOURCE_FACT_NAME) if scope_uris is not None: - result = self.resource_scopes_set(result, 'fcoe_network', scope_uris) + result = self.resource_scopes_set(result, "fcoe_network", scope_uris) return result @@ -118,5 +117,5 @@ def main(): FcoeNetworkModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py index c1ab8a8c075..3b44a678d3b 100644 --- a/plugins/modules/oneview_fcoe_network_info.py +++ b/plugins/modules/oneview_fcoe_network_info.py @@ -83,8 +83,8 @@ class FcoeNetworkInfoModule(OneViewModuleBase): def __init__(self): argument_spec = dict( - name=dict(type='str'), - params=dict(type='dict'), + name=dict(type="str"), + params=dict(type="dict"), ) super().__init__( @@ -93,9 +93,8 @@ def __init__(self): ) def execute_module(self): - - if self.module.params['name']: - fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name']) + if self.module.params["name"]: + fcoe_networks = self.oneview_client.fcoe_networks.get_by("name", self.module.params["name"]) else: fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) @@ -106,5 +105,5 @@ def main(): FcoeNetworkInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py index 8da2be4aafd..01f60879d07 100644 --- a/plugins/modules/oneview_logical_interconnect_group.py +++ b/plugins/modules/oneview_logical_interconnect_group.py @@ -100,61 +100,64 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound +from ansible_collections.community.general.plugins.module_utils.oneview import ( + OneViewModuleBase, + OneViewModuleResourceNotFound, +) class LogicalInterconnectGroupModule(OneViewModuleBase): - MSG_CREATED = 'Logical Interconnect Group created successfully.' - MSG_UPDATED = 'Logical Interconnect Group updated successfully.' - MSG_DELETED = 'Logical Interconnect Group deleted successfully.' - MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.' - MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.' - MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.' + MSG_CREATED = "Logical Interconnect Group created successfully." + MSG_UPDATED = "Logical Interconnect Group updated successfully." + MSG_DELETED = "Logical Interconnect Group deleted successfully." + MSG_ALREADY_PRESENT = "Logical Interconnect Group is already present." + MSG_ALREADY_ABSENT = "Logical Interconnect Group is already absent." + MSG_INTERCONNECT_TYPE_NOT_FOUND = "Interconnect Type was not found." - RESOURCE_FACT_NAME = 'logical_interconnect_group' + RESOURCE_FACT_NAME = "logical_interconnect_group" def __init__(self): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - data=dict(required=True, type='dict') + state=dict(default="present", choices=["present", "absent"]), data=dict(required=True, type="dict") ) super().__init__(additional_arg_spec=argument_spec, validate_etag_support=True) self.resource_client = self.oneview_client.logical_interconnect_groups def execute_module(self): - resource = self.get_by_name(self.data['name']) + resource = self.get_by_name(self.data["name"]) - if self.state == 'present': + if self.state == "present": return self.__present(resource) - elif self.state == 'absent': + elif self.state == "absent": return self.resource_absent(resource) def __present(self, resource): - scope_uris = self.data.pop('scopeUris', None) + scope_uris = self.data.pop("scopeUris", None) self.__replace_name_by_uris(self.data) result = self.resource_present(resource, self.RESOURCE_FACT_NAME) if scope_uris is not None: - result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris) + result = self.resource_scopes_set(result, "logical_interconnect_group", scope_uris) return result def __replace_name_by_uris(self, data): - map_template = data.get('interconnectMapTemplate') + map_template = data.get("interconnectMapTemplate") if map_template: - map_entry_templates = map_template.get('interconnectMapEntryTemplates') + map_entry_templates = map_template.get("interconnectMapEntryTemplates") if map_entry_templates: for value in map_entry_templates: - permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None) + permitted_interconnect_type_name = value.pop("permittedInterconnectTypeName", None) if permitted_interconnect_type_name: - value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name( - permitted_interconnect_type_name).get('uri') + value["permittedInterconnectTypeUri"] = self.__get_interconnect_type_by_name( + permitted_interconnect_type_name + ).get("uri") def __get_interconnect_type_by_name(self, name): - i_type = self.oneview_client.interconnect_types.get_by('name', name) + i_type = self.oneview_client.interconnect_types.get_by("name", name) if i_type: return i_type[0] else: @@ -165,5 +168,5 @@ def main(): LogicalInterconnectGroupModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py index d7914879b62..accd6aa3307 100644 --- a/plugins/modules/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/oneview_logical_interconnect_group_info.py @@ -95,10 +95,9 @@ class LogicalInterconnectGroupInfoModule(OneViewModuleBase): def __init__(self): - argument_spec = dict( - name=dict(type='str'), - params=dict(type='dict'), + name=dict(type="str"), + params=dict(type="dict"), ) super().__init__( @@ -107,8 +106,8 @@ def __init__(self): ) def execute_module(self): - if self.module.params.get('name'): - ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name']) + if self.module.params.get("name"): + ligs = self.oneview_client.logical_interconnect_groups.get_by("name", self.module.params["name"]) else: ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) @@ -119,5 +118,5 @@ def main(): LogicalInterconnectGroupInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py index e7686628512..7649d613df5 100644 --- a/plugins/modules/oneview_network_set.py +++ b/plugins/modules/oneview_network_set.py @@ -92,36 +92,39 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound +from ansible_collections.community.general.plugins.module_utils.oneview import ( + OneViewModuleBase, + OneViewModuleResourceNotFound, +) class NetworkSetModule(OneViewModuleBase): - MSG_CREATED = 'Network Set created successfully.' - MSG_UPDATED = 'Network Set updated successfully.' - MSG_DELETED = 'Network Set deleted successfully.' - MSG_ALREADY_PRESENT = 'Network Set is already present.' - MSG_ALREADY_ABSENT = 'Network Set is already absent.' - MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: ' - RESOURCE_FACT_NAME = 'network_set' + MSG_CREATED = "Network Set created successfully." + MSG_UPDATED = "Network Set updated successfully." + MSG_DELETED = "Network Set deleted successfully." + MSG_ALREADY_PRESENT = "Network Set is already present." + MSG_ALREADY_ABSENT = "Network Set is already absent." + MSG_ETHERNET_NETWORK_NOT_FOUND = "Ethernet Network not found: " + RESOURCE_FACT_NAME = "network_set" argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - data=dict(required=True, type='dict')) + state=dict(default="present", choices=["present", "absent"]), data=dict(required=True, type="dict") + ) def __init__(self): super().__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True) self.resource_client = self.oneview_client.network_sets def execute_module(self): - resource = self.get_by_name(self.data.get('name')) + resource = self.get_by_name(self.data.get("name")) - if self.state == 'present': + if self.state == "present": return self._present(resource) - elif self.state == 'absent': + elif self.state == "absent": return self.resource_absent(resource) def _present(self, resource): - scope_uris = self.data.pop('scopeUris', None) + scope_uris = self.data.pop("scopeUris", None) self._replace_network_name_by_uri(self.data) result = self.resource_present(resource, self.RESOURCE_FACT_NAME) if scope_uris is not None: @@ -129,27 +132,27 @@ def _present(self, resource): return result def _get_ethernet_network_by_name(self, name): - result = self.oneview_client.ethernet_networks.get_by('name', name) + result = self.oneview_client.ethernet_networks.get_by("name", name) return result[0] if result else None def _get_network_uri(self, network_name_or_uri): - if network_name_or_uri.startswith('/rest/ethernet-networks'): + if network_name_or_uri.startswith("/rest/ethernet-networks"): return network_name_or_uri else: enet_network = self._get_ethernet_network_by_name(network_name_or_uri) if enet_network: - return enet_network['uri'] + return enet_network["uri"] else: raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri) def _replace_network_name_by_uri(self, data): - if 'networkUris' in data: - data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']] + if "networkUris" in data: + data["networkUris"] = [self._get_network_uri(x) for x in data["networkUris"]] def main(): NetworkSetModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py index 55053a90eae..b55ae60fa5e 100644 --- a/plugins/modules/oneview_network_set_info.py +++ b/plugins/modules/oneview_network_set_info.py @@ -135,9 +135,9 @@ class NetworkSetInfoModule(OneViewModuleBase): argument_spec = dict( - name=dict(type='str'), - options=dict(type='list', elements='str'), - params=dict(type='dict'), + name=dict(type="str"), + options=dict(type="list", elements="str"), + params=dict(type="dict"), ) def __init__(self): @@ -147,14 +147,13 @@ def __init__(self): ) def execute_module(self): + name = self.module.params.get("name") - name = self.module.params.get('name') - - if 'withoutEthernet' in self.options: - filter_by_name = f"\"'name'='{name}'\"" if name else '' + if "withoutEthernet" in self.options: + filter_by_name = f"\"'name'='{name}'\"" if name else "" network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name) elif name: - network_sets = self.oneview_client.network_sets.get_by('name', name) + network_sets = self.oneview_client.network_sets.get_by("name", name) else: network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) @@ -165,5 +164,5 @@ def main(): NetworkSetInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py index f77457e07b6..7438c8a5641 100644 --- a/plugins/modules/oneview_san_manager.py +++ b/plugins/modules/oneview_san_manager.py @@ -128,20 +128,23 @@ type: dict """ -from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError +from ansible_collections.community.general.plugins.module_utils.oneview import ( + OneViewModuleBase, + OneViewModuleValueError, +) class SanManagerModule(OneViewModuleBase): - MSG_CREATED = 'SAN Manager created successfully.' - MSG_UPDATED = 'SAN Manager updated successfully.' - MSG_DELETED = 'SAN Manager deleted successfully.' - MSG_ALREADY_PRESENT = 'SAN Manager is already present.' - MSG_ALREADY_ABSENT = 'SAN Manager is already absent.' + MSG_CREATED = "SAN Manager created successfully." + MSG_UPDATED = "SAN Manager updated successfully." + MSG_DELETED = "SAN Manager deleted successfully." + MSG_ALREADY_PRESENT = "SAN Manager is already present." + MSG_ALREADY_ABSENT = "SAN Manager is already absent." MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found." argument_spec = dict( - state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']), - data=dict(type='dict', required=True) + state=dict(type="str", default="present", choices=["absent", "present", "connection_information_set"]), + data=dict(type="dict", required=True), ) def __init__(self): @@ -149,12 +152,12 @@ def __init__(self): self.resource_client = self.oneview_client.san_managers def execute_module(self): - if self.data.get('connectionInfo'): - for connection_hash in self.data.get('connectionInfo'): - if connection_hash.get('name') == 'Host': - resource_name = connection_hash.get('value') - elif self.data.get('name'): - resource_name = self.data.get('name') + if self.data.get("connectionInfo"): + for connection_hash in self.data.get("connectionInfo"): + if connection_hash.get("name") == "Host": + resource_name = connection_hash.get("value") + elif self.data.get("name"): + resource_name = self.data.get("name") else: msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. ' msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.' @@ -162,33 +165,33 @@ def execute_module(self): resource = self.resource_client.get_by_name(resource_name) - if self.state == 'present': + if self.state == "present": changed, msg, san_manager = self._present(resource) return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) - elif self.state == 'absent': - return self.resource_absent(resource, method='remove') + elif self.state == "absent": + return self.resource_absent(resource, method="remove") - elif self.state == 'connection_information_set': + elif self.state == "connection_information_set": changed, msg, san_manager = self._connection_information_set(resource) return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) def _present(self, resource): if not resource: - provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data)) + provider_uri = self.data.get("providerUri", self._get_provider_uri_by_display_name(self.data)) return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri) else: merged_data = resource.copy() merged_data.update(self.data) # Remove 'connectionInfo' from comparison, since it is not possible to validate it. - resource.pop('connectionInfo', None) - merged_data.pop('connectionInfo', None) + resource.pop("connectionInfo", None) + merged_data.pop("connectionInfo", None) if self.compare(resource, merged_data): return False, self.MSG_ALREADY_PRESENT, resource else: - updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource["uri"]) return True, self.MSG_UPDATED, updated_san_manager def _connection_information_set(self, resource): @@ -197,14 +200,14 @@ def _connection_information_set(self, resource): else: merged_data = resource.copy() merged_data.update(self.data) - merged_data.pop('refreshState', None) - if not self.data.get('connectionInfo', None): - raise OneViewModuleValueError('A connectionInfo field is required for this operation.') - updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + merged_data.pop("refreshState", None) + if not self.data.get("connectionInfo", None): + raise OneViewModuleValueError("A connectionInfo field is required for this operation.") + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource["uri"]) return True, self.MSG_UPDATED, updated_san_manager def _get_provider_uri_by_display_name(self, data): - display_name = data.get('providerDisplayName') + display_name = data.get("providerDisplayName") provider_uri = self.resource_client.get_provider_uri(display_name) if not provider_uri: @@ -217,5 +220,5 @@ def main(): SanManagerModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py index 86d16491a36..c3f3692bcd1 100644 --- a/plugins/modules/oneview_san_manager_info.py +++ b/plugins/modules/oneview_san_manager_info.py @@ -89,10 +89,7 @@ class SanManagerInfoModule(OneViewModuleBase): - argument_spec = dict( - provider_display_name=dict(type='str'), - params=dict(type='dict') - ) + argument_spec = dict(provider_display_name=dict(type="str"), params=dict(type="dict")) def __init__(self): super().__init__( @@ -102,8 +99,8 @@ def __init__(self): self.resource_client = self.oneview_client.san_managers def execute_module(self): - if self.module.params.get('provider_display_name'): - provider_display_name = self.module.params['provider_display_name'] + if self.module.params.get("provider_display_name"): + provider_display_name = self.module.params["provider_display_name"] san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name) if san_manager: resources = [san_manager] @@ -119,5 +116,5 @@ def main(): SanManagerInfoModule().run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py index 7177076b968..8e04373c20c 100644 --- a/plugins/modules/online_server_info.py +++ b/plugins/modules/online_server_info.py @@ -130,15 +130,16 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec + Online, + OnlineException, + online_argument_spec, ) class OnlineServerInfo(Online): - def __init__(self, module): super().__init__(module) - self.name = 'api/v1/server' + self.name = "api/v1/server" def _get_server_detail(self, server_path): try: @@ -149,10 +150,7 @@ def _get_server_detail(self, server_path): def all_detailed_servers(self): servers_api_path = self.get_resources() - server_data = ( - self._get_server_detail(server_api_path) - for server_api_path in servers_api_path - ) + server_data = (self._get_server_detail(server_api_path) for server_api_path in servers_api_path) return [s for s in server_data if s is not None] @@ -165,12 +163,10 @@ def main(): try: servers_info = OnlineServerInfo(module).all_detailed_servers() - module.exit_json( - online_server_info=servers_info - ) + module.exit_json(online_server_info=servers_info) except OnlineException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py index 2d7f2866bb9..5f7a384963e 100644 --- a/plugins/modules/online_user_info.py +++ b/plugins/modules/online_user_info.py @@ -47,15 +47,16 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec + Online, + OnlineException, + online_argument_spec, ) class OnlineUserInfo(Online): - def __init__(self, module): super().__init__(module) - self.name = 'api/v1/user' + self.name = "api/v1/user" def main(): @@ -65,12 +66,10 @@ def main(): ) try: - module.exit_json( - online_user_info=OnlineUserInfo(module).get_resources() - ) + module.exit_json(online_user_info=OnlineUserInfo(module).get_resources()) except OnlineException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py index 68be3943590..3687a4089ae 100644 --- a/plugins/modules/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -149,7 +149,7 @@ from ansible.module_utils.basic import AnsibleModule -ISCSIADM = 'iscsiadm' +ISCSIADM = "iscsiadm" iscsiadm_cmd = None @@ -160,7 +160,7 @@ def compare_nodelists(l1, l2): def iscsi_get_cached_nodes(module, portal=None): - cmd = [iscsiadm_cmd, '--mode', 'node'] + cmd = [iscsiadm_cmd, "--mode", "node"] rc, out, err = module.run_command(cmd) nodes = [] @@ -170,9 +170,9 @@ def iscsi_get_cached_nodes(module, portal=None): # line format is "ip:port,target_portal_group_tag targetname" parts = line.split() if len(parts) > 2: - module.fail_json(msg='error parsing output', cmd=cmd) + module.fail_json(msg="error parsing output", cmd=cmd) target = parts[1] - parts = parts[0].split(':') + parts = parts[0].split(":") target_portal = parts[0] if portal is None or portal == target_portal: @@ -190,21 +190,21 @@ def iscsi_get_cached_nodes(module, portal=None): def iscsi_discover(module, portal, port): - cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', f'{portal}:{port}'] + cmd = [iscsiadm_cmd, "--mode", "discovery", "--type", "sendtargets", "--portal", f"{portal}:{port}"] module.run_command(cmd, check_rc=True) def iscsi_rescan(module, target=None): if target is None: - cmd = [iscsiadm_cmd, '--mode', 'session', '--rescan'] + cmd = [iscsiadm_cmd, "--mode", "session", "--rescan"] else: - cmd = [iscsiadm_cmd, '--mode', 'node', '--rescan', '-T', target] + cmd = [iscsiadm_cmd, "--mode", "node", "--rescan", "-T", target] rc, out, err = module.run_command(cmd) return out def target_loggedon(module, target, portal=None, port=None): - cmd = [iscsiadm_cmd, '--mode', 'session'] + cmd = [iscsiadm_cmd, "--mode", "session"] rc, out, err = module.run_command(cmd) if portal is None: @@ -222,38 +222,61 @@ def target_loggedon(module, target, portal=None, port=None): def target_login(module, target, check_rc, portal=None, port=None): - node_auth = module.params['node_auth'] - node_user = module.params['node_user'] - node_pass = module.params['node_pass'] - node_user_in = module.params['node_user_in'] - node_pass_in = module.params['node_pass_in'] + node_auth = module.params["node_auth"] + node_user = module.params["node_user"] + node_pass = module.params["node_pass"] + node_user_in = module.params["node_user_in"] + node_pass_in = module.params["node_pass_in"] if node_user: - params = [('node.session.auth.authmethod', node_auth), - ('node.session.auth.username', node_user), - ('node.session.auth.password', node_pass)] - for (name, value) in params: - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + params = [ + ("node.session.auth.authmethod", node_auth), + ("node.session.auth.username", node_user), + ("node.session.auth.password", node_pass), + ] + for name, value in params: + cmd = [ + iscsiadm_cmd, + "--mode", + "node", + "--targetname", + target, + "--op=update", + "--name", + name, + "--value", + value, + ] module.run_command(cmd, check_rc=check_rc) if node_user_in: - params = [('node.session.auth.username_in', node_user_in), - ('node.session.auth.password_in', node_pass_in)] - for (name, value) in params: - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + params = [("node.session.auth.username_in", node_user_in), ("node.session.auth.password_in", node_pass_in)] + for name, value in params: + cmd = [ + iscsiadm_cmd, + "--mode", + "node", + "--targetname", + target, + "--op=update", + "--name", + name, + "--value", + value, + ] module.run_command(cmd, check_rc=check_rc) - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] + cmd = [iscsiadm_cmd, "--mode", "node", "--targetname", target, "--login"] if portal is not None and port is not None: - cmd.append('--portal') - cmd.append(f'{portal}:{port}') + cmd.append("--portal") + cmd.append(f"{portal}:{port}") rc, out, err = module.run_command(cmd, check_rc=check_rc) return rc def target_logout(module, target): - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout'] + cmd = [iscsiadm_cmd, "--mode", "node", "--targetname", target, "--logout"] module.run_command(cmd, check_rc=True) @@ -261,7 +284,7 @@ def target_device_node(target): # if anyone know a better way to find out which devicenodes get created for # a given target... - devices = glob.glob(f'/dev/disk/by-path/*{target}*') + devices = glob.glob(f"/dev/disk/by-path/*{target}*") devdisks = [] for dev in devices: # exclude partitions @@ -274,37 +297,59 @@ def target_device_node(target): def target_isauto(module, target, portal=None, port=None): - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target] + cmd = [iscsiadm_cmd, "--mode", "node", "--targetname", target] if portal is not None and port is not None: - cmd.append('--portal') - cmd.append(f'{portal}:{port}') + cmd.append("--portal") + cmd.append(f"{portal}:{port}") dummy, out, dummy = module.run_command(cmd, check_rc=True) lines = out.splitlines() for line in lines: - if 'node.startup' in line: - return 'automatic' in line + if "node.startup" in line: + return "automatic" in line return False def target_setauto(module, target, portal=None, port=None): - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic'] + cmd = [ + iscsiadm_cmd, + "--mode", + "node", + "--targetname", + target, + "--op=update", + "--name", + "node.startup", + "--value", + "automatic", + ] if portal is not None and port is not None: - cmd.append('--portal') - cmd.append(f'{portal}:{port}') + cmd.append("--portal") + cmd.append(f"{portal}:{port}") module.run_command(cmd, check_rc=True) def target_setmanual(module, target, portal=None, port=None): - cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual'] + cmd = [ + iscsiadm_cmd, + "--mode", + "node", + "--targetname", + target, + "--op=update", + "--name", + "node.startup", + "--value", + "manual", + ] if portal is not None and port is not None: - cmd.append('--portal') - cmd.append(f'{portal}:{port}') + cmd.append("--portal") + cmd.append(f"{portal}:{port}") module.run_command(cmd, check_rc=True) @@ -313,60 +358,58 @@ def main(): # load ansible module object module = AnsibleModule( argument_spec=dict( - # target - portal=dict(type='str', aliases=['ip']), - port=dict(type='str', default='3260'), - target=dict(type='str', aliases=['name', 'targetname']), - node_auth=dict(type='str', default='CHAP'), - node_user=dict(type='str'), - node_pass=dict(type='str', no_log=True), - node_user_in=dict(type='str'), - node_pass_in=dict(type='str', no_log=True), - + portal=dict(type="str", aliases=["ip"]), + port=dict(type="str", default="3260"), + target=dict(type="str", aliases=["name", "targetname"]), + node_auth=dict(type="str", default="CHAP"), + node_user=dict(type="str"), + node_pass=dict(type="str", no_log=True), + node_user_in=dict(type="str"), + node_pass_in=dict(type="str", no_log=True), # actions - login=dict(type='bool', aliases=['state']), - auto_node_startup=dict(type='bool', aliases=['automatic']), - auto_portal_startup=dict(type='bool'), - discover=dict(type='bool', default=False), - show_nodes=dict(type='bool', default=False), - rescan=dict(type='bool', default=False), + login=dict(type="bool", aliases=["state"]), + auto_node_startup=dict(type="bool", aliases=["automatic"]), + auto_portal_startup=dict(type="bool"), + discover=dict(type="bool", default=False), + show_nodes=dict(type="bool", default=False), + rescan=dict(type="bool", default=False), ), - - required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], + required_together=[["node_user", "node_pass"], ["node_user_in", "node_pass_in"]], required_if=[ - ('discover', True, ['portal']), - ('auto_node_startup', True, ['target']), - ('auto_portal_startup', True, ['target'])], + ("discover", True, ["portal"]), + ("auto_node_startup", True, ["target"]), + ("auto_portal_startup", True, ["target"]), + ], supports_check_mode=True, ) global iscsiadm_cmd - iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) + iscsiadm_cmd = module.get_bin_path("iscsiadm", required=True) # parameters - portal = module.params['portal'] + portal = module.params["portal"] if portal: try: portal = socket.getaddrinfo(portal, None)[0][4][0] except socket.gaierror: module.fail_json(msg="Portal address is incorrect") - target = module.params['target'] - port = module.params['port'] - login = module.params['login'] - automatic = module.params['auto_node_startup'] - automatic_portal = module.params['auto_portal_startup'] - discover = module.params['discover'] - show_nodes = module.params['show_nodes'] - rescan = module.params['rescan'] + target = module.params["target"] + port = module.params["port"] + login = module.params["login"] + automatic = module.params["auto_node_startup"] + automatic_portal = module.params["auto_portal_startup"] + discover = module.params["discover"] + show_nodes = module.params["show_nodes"] + rescan = module.params["rescan"] check = module.check_mode cached = iscsi_get_cached_nodes(module, portal) # return json dict - result = {'changed': False} + result = {"changed": False} login_to_all_nodes = False check_rc = True @@ -377,8 +420,8 @@ def main(): iscsi_discover(module, portal, port) nodes = iscsi_get_cached_nodes(module, portal) if not compare_nodelists(cached, nodes): - result['changed'] |= True - result['cache_updated'] = True + result["changed"] |= True + result["cache_updated"] = True else: nodes = cached @@ -400,94 +443,94 @@ def main(): module.fail_json(msg="Specified target not found") if show_nodes: - result['nodes'] = nodes + result["nodes"] = nodes if login is not None: if login_to_all_nodes: - result['devicenodes'] = [] + result["devicenodes"] = [] for index_target in nodes: loggedon = target_loggedon(module, index_target, portal, port) if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False + result["changed"] |= False if login: - result['devicenodes'] += target_device_node(index_target) + result["devicenodes"] += target_device_node(index_target) elif not check: if login: login_result = target_login(module, index_target, check_rc, portal, port) # give udev some time time.sleep(1) - result['devicenodes'] += target_device_node(index_target) + result["devicenodes"] += target_device_node(index_target) else: target_logout(module, index_target) # Check if there are multiple targets on a single portal and # do not mark the task changed if host could not login to one of them if len(nodes) > 1 and login_result == 24: - result['changed'] |= False - result['connection_changed'] = False + result["changed"] |= False + result["connection_changed"] = False else: - result['changed'] |= True - result['connection_changed'] = True + result["changed"] |= True + result["connection_changed"] = True else: - result['changed'] |= True - result['connection_changed'] = True + result["changed"] |= True + result["connection_changed"] = True else: loggedon = target_loggedon(module, target, portal, port) if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False + result["changed"] |= False if login: - result['devicenodes'] = target_device_node(target) + result["devicenodes"] = target_device_node(target) elif not check: if login: target_login(module, target, portal, port) # give udev some time time.sleep(1) - result['devicenodes'] = target_device_node(target) + result["devicenodes"] = target_device_node(target) else: target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True + result["changed"] |= True + result["connection_changed"] = True else: - result['changed'] |= True - result['connection_changed'] = True + result["changed"] |= True + result["connection_changed"] = True if automatic is not None and not login_to_all_nodes: isauto = target_isauto(module, target) if (automatic and isauto) or (not automatic and not isauto): - result['changed'] |= False - result['automatic_changed'] = False + result["changed"] |= False + result["automatic_changed"] = False elif not check: if automatic: target_setauto(module, target) else: target_setmanual(module, target) - result['changed'] |= True - result['automatic_changed'] = True + result["changed"] |= True + result["automatic_changed"] = True else: - result['changed'] |= True - result['automatic_changed'] = True + result["changed"] |= True + result["automatic_changed"] = True if automatic_portal is not None and not login_to_all_nodes: isauto = target_isauto(module, target, portal, port) if (automatic_portal and isauto) or (not automatic_portal and not isauto): - result['changed'] |= False - result['automatic_portal_changed'] = False + result["changed"] |= False + result["automatic_portal_changed"] = False elif not check: if automatic_portal: target_setauto(module, target, portal, port) else: target_setmanual(module, target, portal, port) - result['changed'] |= True - result['automatic_portal_changed'] = True + result["changed"] |= True + result["automatic_portal_changed"] = True else: - result['changed'] |= True - result['automatic_portal_changed'] = True + result["changed"] |= True + result["automatic_portal_changed"] = True if rescan is not False: - result['changed'] = True - result['sessions'] = iscsi_rescan(module, target) + result["changed"] = True + result["sessions"] = iscsi_rescan(module, target) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py index 4f08ea3f685..d6b41e8c488 100644 --- a/plugins/modules/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -167,14 +167,14 @@ def execute_command(cmd, module): # We set TERM to 'dumb' to keep pkg_add happy if the machine running # ansible is using a TERM that the managed machine does not know about, # e.g.: "No progress meter: failed termcap lookup on xterm-kitty". - return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) + return module.run_command(cmd_args, environ_update={"TERM": "dumb"}) def get_all_installed(module): """ Get all installed packaged. Used to support diff mode """ - command = 'pkg_info -Iq' + command = "pkg_info -Iq" rc, stdout, stderr = execute_command(command, module) @@ -186,7 +186,7 @@ def get_all_installed(module): # Function used to find out if a package is currently installed. def get_package_state(names, pkg_spec, module): - info_cmd = 'pkg_info -Iq' + info_cmd = "pkg_info -Iq" for name in names: command = f"{info_cmd} inst:{name}" @@ -196,63 +196,66 @@ def get_package_state(names, pkg_spec, module): if stderr: match = re.search(rf"^Can't find inst:{re.escape(name)}$", stderr) if match: - pkg_spec[name]['installed_state'] = False + pkg_spec[name]["installed_state"] = False else: module.fail_json(msg=f"failed in get_package_state(): {stderr}") if stdout: # If the requested package name is just a stem, like "python", we may # find multiple packages with that name. - pkg_spec[name]['installed_names'] = stdout.splitlines() + pkg_spec[name]["installed_names"] = stdout.splitlines() module.debug(f"get_package_state(): installed_names = {pkg_spec[name]['installed_names']}") - pkg_spec[name]['installed_state'] = True + pkg_spec[name]["installed_state"] = True else: - pkg_spec[name]['installed_state'] = False + pkg_spec[name]["installed_state"] = False # Function used to make sure a package is present. def package_present(names, pkg_spec, module): - build = module.params['build'] + build = module.params["build"] for name in names: # It is possible package_present() has been called from package_latest(). # In that case we do not want to operate on the whole list of names, # only the leftovers. - if pkg_spec['package_latest_leftovers']: - if name not in pkg_spec['package_latest_leftovers']: + if pkg_spec["package_latest_leftovers"]: + if name not in pkg_spec["package_latest_leftovers"]: module.debug(f"package_present(): ignoring '{name}' which is not a package_latest() leftover") continue else: module.debug(f"package_present(): handling package_latest() leftovers, installing '{name}'") if module.check_mode: - install_cmd = 'pkg_add -Imn' + install_cmd = "pkg_add -Imn" else: if build is True: port_dir = f"{module.params['ports_dir']}/{get_package_source_path(name, pkg_spec, module)}" if os.path.isdir(port_dir): - if pkg_spec[name]['flavor']: - flavors = pkg_spec[name]['flavor'].replace('-', ' ') - install_cmd = f"cd {port_dir} && make clean=depends && FLAVOR=\"{flavors}\" make install && make clean=depends" - elif pkg_spec[name]['subpackage']: - install_cmd = f"cd {port_dir} && make clean=depends && SUBPACKAGE=\"{pkg_spec[name]['subpackage']}\" make install && make clean=depends" + if pkg_spec[name]["flavor"]: + flavors = pkg_spec[name]["flavor"].replace("-", " ") + install_cmd = f'cd {port_dir} && make clean=depends && FLAVOR="{flavors}" make install && make clean=depends' + elif pkg_spec[name]["subpackage"]: + install_cmd = f'cd {port_dir} && make clean=depends && SUBPACKAGE="{pkg_spec[name]["subpackage"]}" make install && make clean=depends' else: install_cmd = f"cd {port_dir} && make install && make clean=depends" else: module.fail_json(msg=f"the port source directory {port_dir} does not exist") else: - install_cmd = 'pkg_add -Im' + install_cmd = "pkg_add -Im" - if module.params['snapshot'] is True: - install_cmd += ' -Dsnap' - - if pkg_spec[name]['installed_state'] is False: + if module.params["snapshot"] is True: + install_cmd += " -Dsnap" + if pkg_spec[name]["installed_state"] is False: # Attempt to install the package if build is True and not module.check_mode: - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True) + (pkg_spec[name]["rc"], pkg_spec[name]["stdout"], pkg_spec[name]["stderr"]) = module.run_command( + install_cmd, module, use_unsafe_shell=True + ) else: - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{install_cmd} {name}", module) + (pkg_spec[name]["rc"], pkg_spec[name]["stdout"], pkg_spec[name]["stderr"]) = execute_command( + f"{install_cmd} {name}", module + ) # The behaviour of pkg_add is a bit different depending on if a # specific version is supplied or not. @@ -265,80 +268,81 @@ def package_present(names, pkg_spec, module): # packages-specs(7) notion of a version. If using the branch syntax # (like "python%3.5") even though a branch name may look like a # version string it is not used an one by pkg_add. - if pkg_spec[name]['version'] or build is True: + if pkg_spec[name]["version"] or build is True: # Depend on the return code. module.debug(f"package_present(): depending on return code for name '{name}'") - if pkg_spec[name]['rc']: - pkg_spec[name]['changed'] = False + if pkg_spec[name]["rc"]: + pkg_spec[name]["changed"] = False else: # Depend on stderr instead. module.debug(f"package_present(): depending on stderr for name '{name}'") - if pkg_spec[name]['stderr']: + if pkg_spec[name]["stderr"]: # There is a corner case where having an empty directory in # installpath prior to the right location will result in a # "file:/local/package/directory/ is empty" message on stderr # while still installing the package, so we need to look for # for a message like "packagename-1.0: ok" just in case. - match = re.search(rf"\W{re.escape(pkg_spec[name]['stem'])}-[^:]+: ok\W", pkg_spec[name]['stdout']) + match = re.search(rf"\W{re.escape(pkg_spec[name]['stem'])}-[^:]+: ok\W", pkg_spec[name]["stdout"]) if match: # It turns out we were able to install the package. module.debug(f"package_present(): we were able to install package for name '{name}'") - pkg_spec[name]['changed'] = True + pkg_spec[name]["changed"] = True else: # We really did fail, fake the return code. module.debug(f"package_present(): we really did fail for name '{name}'") - pkg_spec[name]['rc'] = 1 - pkg_spec[name]['changed'] = False + pkg_spec[name]["rc"] = 1 + pkg_spec[name]["changed"] = False else: module.debug(f"package_present(): stderr was not set for name '{name}'") - if pkg_spec[name]['rc'] == 0: - pkg_spec[name]['changed'] = True + if pkg_spec[name]["rc"] == 0: + pkg_spec[name]["changed"] = True else: - pkg_spec[name]['rc'] = 0 - pkg_spec[name]['stdout'] = '' - pkg_spec[name]['stderr'] = '' - pkg_spec[name]['changed'] = False + pkg_spec[name]["rc"] = 0 + pkg_spec[name]["stdout"] = "" + pkg_spec[name]["stderr"] = "" + pkg_spec[name]["changed"] = False # Function used to make sure a package is the latest available version. def package_latest(names, pkg_spec, module): - if module.params['build'] is True: + if module.params["build"] is True: module.fail_json(msg=f"the combination of build={module.params['build']} and state=latest is not supported") - upgrade_cmd = 'pkg_add -um' + upgrade_cmd = "pkg_add -um" if module.check_mode: - upgrade_cmd += 'n' + upgrade_cmd += "n" - if module.params['clean']: - upgrade_cmd += 'c' + if module.params["clean"]: + upgrade_cmd += "c" - if module.params['quick']: - upgrade_cmd += 'q' + if module.params["quick"]: + upgrade_cmd += "q" - if module.params['snapshot']: - upgrade_cmd += ' -Dsnap' + if module.params["snapshot"]: + upgrade_cmd += " -Dsnap" for name in names: - if pkg_spec[name]['installed_state'] is True: - + if pkg_spec[name]["installed_state"] is True: # Attempt to upgrade the package. - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{upgrade_cmd} {name}", module) + (pkg_spec[name]["rc"], pkg_spec[name]["stdout"], pkg_spec[name]["stderr"]) = execute_command( + f"{upgrade_cmd} {name}", module + ) # Look for output looking something like "nmap-6.01->6.25: ok" to see if # something changed (or would have changed). Use \W to delimit the match # from progress meter output. - pkg_spec[name]['changed'] = False - for installed_name in pkg_spec[name]['installed_names']: + pkg_spec[name]["changed"] = False + for installed_name in pkg_spec[name]["installed_names"]: module.debug(f"package_latest(): checking for pre-upgrade package name: {installed_name}") - match = re.search(rf"\W{re.escape(installed_name)}->.+: ok\W", pkg_spec[name]['stdout']) + match = re.search(rf"\W{re.escape(installed_name)}->.+: ok\W", pkg_spec[name]["stdout"]) if match: module.debug(f"package_latest(): pre-upgrade package name match: {installed_name}") - pkg_spec[name]['changed'] = True + pkg_spec[name]["changed"] = True break # FIXME: This part is problematic. Based on the issues mentioned (and @@ -350,74 +354,76 @@ def package_latest(names, pkg_spec, module): # parse out a successful update above. This way we will report a # successful run when we actually modify something but fail # otherwise. - if pkg_spec[name]['changed'] is not True: - if pkg_spec[name]['stderr']: - pkg_spec[name]['rc'] = 1 + if pkg_spec[name]["changed"] is not True: + if pkg_spec[name]["stderr"]: + pkg_spec[name]["rc"] = 1 else: # Note packages that need to be handled by package_present module.debug(f"package_latest(): package '{name}' is not installed, will be handled by package_present()") - pkg_spec['package_latest_leftovers'].append(name) + pkg_spec["package_latest_leftovers"].append(name) # If there were any packages that were not installed we call # package_present() which will handle those. - if pkg_spec['package_latest_leftovers']: + if pkg_spec["package_latest_leftovers"]: module.debug("package_latest(): calling package_present() to handle leftovers") package_present(names, pkg_spec, module) # Function used to make sure a package is not installed. def package_absent(names, pkg_spec, module): - remove_cmd = 'pkg_delete -I' + remove_cmd = "pkg_delete -I" if module.check_mode: - remove_cmd += 'n' + remove_cmd += "n" - if module.params['clean']: - remove_cmd += 'c' + if module.params["clean"]: + remove_cmd += "c" - if module.params['quick']: - remove_cmd += 'q' + if module.params["quick"]: + remove_cmd += "q" for name in names: - if pkg_spec[name]['installed_state'] is True: + if pkg_spec[name]["installed_state"] is True: # Attempt to remove the package. - (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command(f"{remove_cmd} {name}", module) + (pkg_spec[name]["rc"], pkg_spec[name]["stdout"], pkg_spec[name]["stderr"]) = execute_command( + f"{remove_cmd} {name}", module + ) - if pkg_spec[name]['rc'] == 0: - pkg_spec[name]['changed'] = True + if pkg_spec[name]["rc"] == 0: + pkg_spec[name]["changed"] = True else: - pkg_spec[name]['changed'] = False + pkg_spec[name]["changed"] = False else: - pkg_spec[name]['rc'] = 0 - pkg_spec[name]['stdout'] = '' - pkg_spec[name]['stderr'] = '' - pkg_spec[name]['changed'] = False + pkg_spec[name]["rc"] = 0 + pkg_spec[name]["stdout"] = "" + pkg_spec[name]["stderr"] = "" + pkg_spec[name]["changed"] = False # Function used to remove unused dependencies. def package_rm_unused_deps(pkg_spec, module): - rm_unused_deps_cmd = 'pkg_delete -Ia' + rm_unused_deps_cmd = "pkg_delete -Ia" if module.check_mode: - rm_unused_deps_cmd += 'n' + rm_unused_deps_cmd += "n" - if module.params['clean']: - rm_unused_deps_cmd += 'c' + if module.params["clean"]: + rm_unused_deps_cmd += "c" - if module.params['quick']: - rm_unused_deps_cmd += 'q' + if module.params["quick"]: + rm_unused_deps_cmd += "q" # If we run the commands, we set changed to true to let # the package list change detection code do the actual work. # Create a minimal pkg_spec entry for '*' to store return values. - pkg_spec['*'] = {} + pkg_spec["*"] = {} # Attempt to remove unused dependencies. - pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(rm_unused_deps_cmd, module) - pkg_spec['*']['changed'] = True + pkg_spec["*"]["rc"], pkg_spec["*"]["stdout"], pkg_spec["*"]["stderr"] = execute_command(rm_unused_deps_cmd, module) + pkg_spec["*"]["changed"] = True # Function used to parse the package name based on packages-specs(7). @@ -427,9 +433,8 @@ def package_rm_unused_deps(pkg_spec, module): # packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a # description. def parse_package_name(names, pkg_spec, module): - # Initialize empty list of package_latest() leftovers. - pkg_spec['package_latest_leftovers'] = [] + pkg_spec["package_latest_leftovers"] = [] for name in names: module.debug(f"parse_package_name(): parsing name: {name}") @@ -447,19 +452,23 @@ def parse_package_name(names, pkg_spec, module): # If name includes a version. if version_match: - match = re.search("^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", name) + match = re.search( + "^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", + name, + ) if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = '-' - pkg_spec[name]['version'] = match.group('version') - pkg_spec[name]['flavor_separator'] = match.group('flavor_separator') - pkg_spec[name]['flavor'] = match.group('flavor') - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'version' + pkg_spec[name]["stem"] = match.group("stem") + pkg_spec[name]["version_separator"] = "-" + pkg_spec[name]["version"] = match.group("version") + pkg_spec[name]["flavor_separator"] = match.group("flavor_separator") + pkg_spec[name]["flavor"] = match.group("flavor") + pkg_spec[name]["branch"] = match.group("branch") + pkg_spec[name]["style"] = "version" module.debug( f"version_match: stem: {pkg_spec[name]['stem']}, version: {pkg_spec[name]['version']}, " f"flavor_separator: {pkg_spec[name]['flavor_separator']}, flavor: {pkg_spec[name]['flavor']}, branch: {pkg_spec[name]['branch']}, " - f"style: {pkg_spec[name]['style']}") + f"style: {pkg_spec[name]['style']}" + ) else: module.fail_json(msg=f"unable to parse package name at version_match: {name}") @@ -467,15 +476,17 @@ def parse_package_name(names, pkg_spec, module): elif versionless_match: match = re.search("^(?P[^%]+)--(?P[a-z].*)?(%(?P.+))?$", name) if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = '-' - pkg_spec[name]['version'] = None - pkg_spec[name]['flavor_separator'] = '-' - pkg_spec[name]['flavor'] = match.group('flavor') - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'versionless' - module.debug(f"versionless_match: stem: {pkg_spec[name]['stem']}, flavor: {pkg_spec[name]['flavor']}, " - f"branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}") + pkg_spec[name]["stem"] = match.group("stem") + pkg_spec[name]["version_separator"] = "-" + pkg_spec[name]["version"] = None + pkg_spec[name]["flavor_separator"] = "-" + pkg_spec[name]["flavor"] = match.group("flavor") + pkg_spec[name]["branch"] = match.group("branch") + pkg_spec[name]["style"] = "versionless" + module.debug( + f"versionless_match: stem: {pkg_spec[name]['stem']}, flavor: {pkg_spec[name]['flavor']}, " + f"branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}" + ) else: module.fail_json(msg=f"unable to parse package name at versionless_match: {name}") @@ -485,45 +496,49 @@ def parse_package_name(names, pkg_spec, module): else: match = re.search("^(?P[^%]+)(%(?P.+))?$", name) if match: - pkg_spec[name]['stem'] = match.group('stem') - pkg_spec[name]['version_separator'] = None - pkg_spec[name]['version'] = None - pkg_spec[name]['flavor_separator'] = None - pkg_spec[name]['flavor'] = None - pkg_spec[name]['branch'] = match.group('branch') - pkg_spec[name]['style'] = 'stem' - module.debug(f"stem_match: stem: {pkg_spec[name]['stem']}, branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}") + pkg_spec[name]["stem"] = match.group("stem") + pkg_spec[name]["version_separator"] = None + pkg_spec[name]["version"] = None + pkg_spec[name]["flavor_separator"] = None + pkg_spec[name]["flavor"] = None + pkg_spec[name]["branch"] = match.group("branch") + pkg_spec[name]["style"] = "stem" + module.debug( + f"stem_match: stem: {pkg_spec[name]['stem']}, branch: {pkg_spec[name]['branch']}, style: {pkg_spec[name]['style']}" + ) else: module.fail_json(msg=f"unable to parse package name at else: {name}") # Verify that the managed host is new enough to support branch syntax. - if pkg_spec[name]['branch']: + if pkg_spec[name]["branch"]: branch_release = "6.0" if LooseVersion(platform.release()) < LooseVersion(branch_release): - module.fail_json(msg=f"package name using 'branch' syntax requires at least OpenBSD {branch_release}: {name}") + module.fail_json( + msg=f"package name using 'branch' syntax requires at least OpenBSD {branch_release}: {name}" + ) # Sanity check that there are no trailing dashes in flavor. # Try to stop strange stuff early so we can be strict later. - if pkg_spec[name]['flavor']: - match = re.search("-$", pkg_spec[name]['flavor']) + if pkg_spec[name]["flavor"]: + match = re.search("-$", pkg_spec[name]["flavor"]) if match: module.fail_json(msg=f"trailing dash in flavor: {pkg_spec[name]['flavor']}") # Function used for figuring out the port path. def get_package_source_path(name, pkg_spec, module): - pkg_spec[name]['subpackage'] = None - if pkg_spec[name]['stem'] == 'sqlports': - return 'databases/sqlports' + pkg_spec[name]["subpackage"] = None + if pkg_spec[name]["stem"] == "sqlports": + return "databases/sqlports" else: # try for an exact match first - sqlports_db_file = '/usr/local/share/sqlports' + sqlports_db_file = "/usr/local/share/sqlports" if not os.path.isfile(sqlports_db_file): module.fail_json(msg=f"sqlports file '{sqlports_db_file}' is missing") conn = sqlite3.connect(sqlports_db_file) - first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' + first_part_of_query = "SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname" query = f"{first_part_of_query} = ?" module.debug(f"package_package_source_path(): exact query: {query}") cursor = conn.execute(query, (name,)) @@ -531,16 +546,26 @@ def get_package_source_path(name, pkg_spec, module): # next, try for a fuzzier match if len(results) < 1: - looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%') + looking_for = ( + pkg_spec[name]["stem"] + + (pkg_spec[name]["version_separator"] or "-") + + (pkg_spec[name]["version"] or "%") + ) query = f"{first_part_of_query} LIKE ?" - if pkg_spec[name]['flavor']: - looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor'] + if pkg_spec[name]["flavor"]: + looking_for += pkg_spec[name]["flavor_separator"] + pkg_spec[name]["flavor"] module.debug(f"package_package_source_path(): fuzzy flavor query: {query}") cursor = conn.execute(query, (looking_for,)) - elif pkg_spec[name]['style'] == 'versionless': - query += ' AND fullpkgname NOT LIKE ?' + elif pkg_spec[name]["style"] == "versionless": + query += " AND fullpkgname NOT LIKE ?" module.debug(f"package_package_source_path(): fuzzy versionless query: {query}") - cursor = conn.execute(query, (looking_for, f"{looking_for}-%",)) + cursor = conn.execute( + query, + ( + looking_for, + f"{looking_for}-%", + ), + ) else: module.debug(f"package_package_source_path(): fuzzy query: {query}") cursor = conn.execute(query, (looking_for,)) @@ -556,43 +581,43 @@ def get_package_source_path(name, pkg_spec, module): # there's exactly 1 match, so figure out the subpackage, if any, then return fullpkgpath = results[0][0] - parts = fullpkgpath.split(',') - if len(parts) > 1 and parts[1][0] == '-': - pkg_spec[name]['subpackage'] = parts[1] + parts = fullpkgpath.split(",") + if len(parts) > 1 and parts[1][0] == "-": + pkg_spec[name]["subpackage"] = parts[1] return parts[0] # Function used for upgrading all installed packages. def upgrade_packages(pkg_spec, module): if module.check_mode: - upgrade_cmd = 'pkg_add -Imnu' + upgrade_cmd = "pkg_add -Imnu" else: - upgrade_cmd = 'pkg_add -Imu' + upgrade_cmd = "pkg_add -Imu" - if module.params['snapshot']: - upgrade_cmd += ' -Dsnap' + if module.params["snapshot"]: + upgrade_cmd += " -Dsnap" # Create a minimal pkg_spec entry for '*' to store return values. - pkg_spec['*'] = {} + pkg_spec["*"] = {} # Attempt to upgrade all packages. - pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(upgrade_cmd, module) + pkg_spec["*"]["rc"], pkg_spec["*"]["stdout"], pkg_spec["*"]["stderr"] = execute_command(upgrade_cmd, module) # Try to find any occurrence of a package changing version like: # "bzip2-1.0.6->1.0.6p0: ok". - match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) + match = re.search(r"\W\w.+->.+: ok\W", pkg_spec["*"]["stdout"]) if match: - pkg_spec['*']['changed'] = True + pkg_spec["*"]["changed"] = True else: - pkg_spec['*']['changed'] = False + pkg_spec["*"]["changed"] = False # It seems we can not trust the return value, so depend on the presence of # stderr to know if something failed. - if pkg_spec['*']['stderr']: - pkg_spec['*']['rc'] = 1 + if pkg_spec["*"]["stderr"]: + pkg_spec["*"]["rc"] = 1 else: - pkg_spec['*']['rc'] = 0 + pkg_spec["*"]["rc"] = 0 # =========================================== @@ -600,29 +625,29 @@ def upgrade_packages(pkg_spec, module): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), - build=dict(type='bool', default=False), - snapshot=dict(type='bool', default=False), - ports_dir=dict(type='path', default='/usr/ports'), - quick=dict(type='bool', default=False), - clean=dict(type='bool', default=False), - autoremove=dict(type='bool', default=False), + name=dict(type="list", elements="str", required=True), + state=dict(type="str", default="present", choices=["absent", "installed", "latest", "present", "removed"]), + build=dict(type="bool", default=False), + snapshot=dict(type="bool", default=False), + ports_dir=dict(type="path", default="/usr/ports"), + quick=dict(type="bool", default=False), + clean=dict(type="bool", default=False), + autoremove=dict(type="bool", default=False), ), - mutually_exclusive=[['snapshot', 'build']], - supports_check_mode=True + mutually_exclusive=[["snapshot", "build"]], + supports_check_mode=True, ) - name = module.params['name'] - state = module.params['state'] - build = module.params['build'] - ports_dir = module.params['ports_dir'] + name = module.params["name"] + state = module.params["state"] + build = module.params["build"] + ports_dir = module.params["ports_dir"] result = {} - result['name'] = name - result['state'] = state - result['build'] = build - result['diff'] = {} + result["name"] = name + result["state"] = state + result["build"] = build + result["diff"] = {} # The data structure used to keep track of package information. pkg_spec = {} @@ -634,29 +659,29 @@ def main(): module.fail_json(msg=f"the ports source directory {ports_dir} does not exist") # build sqlports if its not installed yet - parse_package_name(['sqlports'], pkg_spec, module) - get_package_state(['sqlports'], pkg_spec, module) - if not pkg_spec['sqlports']['installed_state']: + parse_package_name(["sqlports"], pkg_spec, module) + get_package_state(["sqlports"], pkg_spec, module) + if not pkg_spec["sqlports"]["installed_state"]: module.debug(f"main(): installing 'sqlports' because build={module.params['build']}") - package_present(['sqlports'], pkg_spec, module) + package_present(["sqlports"], pkg_spec, module) asterisk_name = False for n in name: - if n == '*': + if n == "*": if len(name) != 1: module.fail_json(msg="the package name '*' can not be mixed with other names") asterisk_name = True if asterisk_name: - if state != 'latest' and not module.params['autoremove']: + if state != "latest" and not module.params["autoremove"]: module.fail_json(msg="the package name '*' is only valid when using state=latest or autoremove=true") - if state == 'latest': + if state == "latest": # Perform an upgrade of all installed packages. upgrade_packages(pkg_spec, module) - if module.params['autoremove']: + if module.params["autoremove"]: # Remove unused dependencies. package_rm_unused_deps(pkg_spec, module) else: @@ -666,22 +691,24 @@ def main(): # Not sure how the branch syntax is supposed to play together # with build mode. Disable it for now. for n in name: - if pkg_spec[n]['branch'] and module.params['build'] is True: - module.fail_json(msg=f"the combination of 'branch' syntax and build={module.params['build']} is not supported: {n}") + if pkg_spec[n]["branch"] and module.params["build"] is True: + module.fail_json( + msg=f"the combination of 'branch' syntax and build={module.params['build']} is not supported: {n}" + ) # Get state for all package names. get_package_state(name, pkg_spec, module) # Perform requested action. - if state in ['installed', 'present']: + if state in ["installed", "present"]: package_present(name, pkg_spec, module) - elif state in ['absent', 'removed']: + elif state in ["absent", "removed"]: package_absent(name, pkg_spec, module) - elif state == 'latest': + elif state == "latest": package_latest(name, pkg_spec, module) # Handle autoremove if requested for non-asterisk packages - if module.params['autoremove']: + if module.params["autoremove"]: package_rm_unused_deps(pkg_spec, module) # The combined changed status for all requested packages. If anything @@ -694,25 +721,25 @@ def main(): # We combine all error messages in this comma separated string, for example: # "msg": "Can't find nmapp\n, Can't find nmappp\n" - combined_error_message = '' + combined_error_message = "" # Loop over all requested package names and check if anything failed or # changed. for n in name: - if pkg_spec[n]['rc'] != 0: + if pkg_spec[n]["rc"] != 0: combined_failed = True - if pkg_spec[n]['stderr']: + if pkg_spec[n]["stderr"]: if combined_error_message: combined_error_message += f", {pkg_spec[n]['stderr']}" else: - combined_error_message = pkg_spec[n]['stderr'] + combined_error_message = pkg_spec[n]["stderr"] else: if combined_error_message: combined_error_message += f", {pkg_spec[n]['stdout']}" else: - combined_error_message = pkg_spec[n]['stdout'] + combined_error_message = pkg_spec[n]["stdout"] - if pkg_spec[n]['changed'] is True: + if pkg_spec[n]["changed"] is True: combined_changed = True # If combined_error_message contains anything at least some part of the @@ -720,15 +747,15 @@ def main(): if combined_failed: module.fail_json(msg=combined_error_message, **result) - result['changed'] = combined_changed + result["changed"] = combined_changed if not module.check_mode: new_package_list = get_all_installed(module) - result['diff'] = dict(before=original_package_list, after=new_package_list) - result['changed'] = (result['diff']['before'] != result['diff']['after']) + result["diff"] = dict(before=original_package_list, after=new_package_list) + result["changed"] = result["diff"]["before"] != result["diff"]["after"] module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py index 2f6175f59af..27ccde07106 100644 --- a/plugins/modules/opendj_backendprop.py +++ b/plugins/modules/opendj_backendprop.py @@ -98,19 +98,24 @@ class BackendProp: - def __init__(self, module): self._module = module def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): my_command = [ f"{opendj_bindir}/dsconfig", - 'get-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '-n', '-X', '-s' + "get-backend-prop", + "-h", + hostname, + "--port", + str(port), + "--bindDN", + username, + "--backend-name", + backend_name, + "-n", + "-X", + "-s", ] + password_method rc, stdout, stderr = self._module.run_command(my_command) if rc == 0: @@ -121,13 +126,19 @@ def get_property(self, opendj_bindir, hostname, port, username, password_method, def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): my_command = [ f"{opendj_bindir}/dsconfig", - 'set-backend-prop', - '-h', hostname, - '--port', str(port), - '--bindDN', username, - '--backend-name', backend_name, - '--set', f"{name}:{value}", - '-n', '-X' + "set-backend-prop", + "-h", + hostname, + "--port", + str(port), + "--bindDN", + username, + "--backend-name", + backend_name, + "--set", + f"{name}:{value}", + "-n", + "-X", ] + password_method rc, stdout, stderr = self._module.run_command(my_command) if rc == 0: @@ -136,7 +147,7 @@ def set_property(self, opendj_bindir, hostname, port, username, password_method, self._module.fail_json(msg=f"Error message: {stderr}") def validate_data(self, data=None, name=None, value=None): - for config_line in data.split('\n'): + for config_line in data.split("\n"): if config_line: split_line = config_line.split() if split_line[0] == name: @@ -160,46 +171,50 @@ def main(): state=dict(default="present"), ), supports_check_mode=True, - mutually_exclusive=[['password', 'passwordfile']], - required_one_of=[['password', 'passwordfile']] + mutually_exclusive=[["password", "passwordfile"]], + required_one_of=[["password", "passwordfile"]], ) - opendj_bindir = module.params['opendj_bindir'] - hostname = module.params['hostname'] - port = module.params['port'] - username = module.params['username'] - password = module.params['password'] - passwordfile = module.params['passwordfile'] - backend_name = module.params['backend'] - name = module.params['name'] - value = module.params['value'] - state = module.params['state'] + opendj_bindir = module.params["opendj_bindir"] + hostname = module.params["hostname"] + port = module.params["port"] + username = module.params["username"] + password = module.params["password"] + passwordfile = module.params["passwordfile"] + backend_name = module.params["backend"] + name = module.params["name"] + value = module.params["value"] + state = module.params["state"] if module.params["password"] is not None: - password_method = ['-w', password] + password_method = ["-w", password] elif module.params["passwordfile"] is not None: - password_method = ['-j', passwordfile] + password_method = ["-j", passwordfile] opendj = BackendProp(module) - validate = opendj.get_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name) + validate = opendj.get_property( + opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + ) if validate: if not opendj.validate_data(data=validate, name=name, value=value): if module.check_mode: module.exit_json(changed=True) - if opendj.set_property(opendj_bindir=opendj_bindir, - hostname=hostname, - port=port, - username=username, - password_method=password_method, - backend_name=backend_name, - name=name, - value=value): + if opendj.set_property( + opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + name=name, + value=value, + ): module.exit_json(changed=True) else: module.exit_json(changed=False) @@ -209,5 +224,5 @@ def main(): module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py index 9bb3ec779f1..472f65fdeca 100644 --- a/plugins/modules/openwrt_init.py +++ b/plugins/modules/openwrt_init.py @@ -84,7 +84,7 @@ # =============================== # Check if service is enabled def is_enabled(): - rc, dummy, dummy = module.run_command([init_script, 'enabled']) + rc, dummy, dummy = module.run_command([init_script, "enabled"]) return rc == 0 @@ -95,85 +95,85 @@ def main(): # init module = AnsibleModule( argument_spec=dict( - name=dict(required=True, type='str', aliases=['service']), - state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']), - enabled=dict(type='bool'), - pattern=dict(type='str'), + name=dict(required=True, type="str", aliases=["service"]), + state=dict(type="str", choices=["started", "stopped", "restarted", "reloaded"]), + enabled=dict(type="bool"), + pattern=dict(type="str"), ), supports_check_mode=True, - required_one_of=[('state', 'enabled')], + required_one_of=[("state", "enabled")], ) # initialize - service = module.params['name'] + service = module.params["name"] init_script = f"/etc/init.d/{service}" result = { - 'name': service, - 'changed': False, + "name": service, + "changed": False, } # check if service exists if not os.path.exists(init_script): - module.fail_json(msg=f'service {service} does not exist') + module.fail_json(msg=f"service {service} does not exist") # Enable/disable service startup at boot if requested - if module.params['enabled'] is not None: + if module.params["enabled"] is not None: # do we need to enable the service? enabled = is_enabled() # default to current state - result['enabled'] = enabled + result["enabled"] = enabled # Change enable/disable if needed - if enabled != module.params['enabled']: - result['changed'] = True - action = 'enable' if module.params['enabled'] else 'disable' + if enabled != module.params["enabled"]: + result["changed"] = True + action = "enable" if module.params["enabled"] else "disable" if not module.check_mode: rc, dummy, err = module.run_command([init_script, action]) # openwrt init scripts can return a non-zero exit code on a successful 'enable' # command if the init script doesn't contain a STOP value, so we ignore the exit # code and explicitly check if the service is now in the desired state - if is_enabled() != module.params['enabled']: + if is_enabled() != module.params["enabled"]: module.fail_json(msg=f"Unable to {action} service {service}: {err}") - result['enabled'] = not enabled + result["enabled"] = not enabled - if module.params['state'] is not None: + if module.params["state"] is not None: running = False # check if service is currently running - if module.params['pattern']: + if module.params["pattern"]: # Find ps binary - psbin = module.get_bin_path('ps', True) + psbin = module.get_bin_path("ps", True) # this should be busybox ps, so we only want/need to the 'w' option - rc, psout, dummy = module.run_command([psbin, 'w']) + rc, psout, dummy = module.run_command([psbin, "w"]) # If rc is 0, set running as appropriate if rc == 0: lines = psout.split("\n") - running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines) + running = any((module.params["pattern"] in line and "pattern=" not in line) for line in lines) else: - rc, dummy, dummy = module.run_command([init_script, 'running']) + rc, dummy, dummy = module.run_command([init_script, "running"]) if rc == 0: running = True # default to desired state - result['state'] = module.params['state'] + result["state"] = module.params["state"] # determine action, if any action = None - if module.params['state'] == 'started': + if module.params["state"] == "started": if not running: - action = 'start' - result['changed'] = True - elif module.params['state'] == 'stopped': + action = "start" + result["changed"] = True + elif module.params["state"] == "stopped": if running: - action = 'stop' - result['changed'] = True + action = "stop" + result["changed"] = True else: - action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded - result['state'] = 'started' - result['changed'] = True + action = module.params["state"][:-2] # remove 'ed' from restarted/reloaded + result["state"] = "started" + result["changed"] = True if action: if not module.check_mode: @@ -184,5 +184,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py index d3bd1029ab4..69afc8e9d94 100644 --- a/plugins/modules/opkg.py +++ b/plugins/modules/opkg.py @@ -123,9 +123,21 @@ class Opkg(StateModuleHelper): argument_spec=dict( name=dict(aliases=["pkg"], required=True, type="list", elements="str"), state=dict(default="present", choices=["present", "installed", "absent", "removed"]), - force=dict(choices=["depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", - "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), - update_cache=dict(default=False, type='bool'), + force=dict( + choices=[ + "depends", + "maintainer", + "reinstall", + "overwrite", + "downgrade", + "space", + "postinstall", + "remove", + "checksum", + "removal-of-dependent-packages", + ] + ), + update_cache=dict(default=False, type="bool"), executable=dict(type="path"), ), ) @@ -168,8 +180,8 @@ def __init_module__(self): @staticmethod def split_name_and_version(package): - """ Split the name and the version when using the NAME=VERSION syntax """ - splitted = package.split('=', 1) + """Split the name and the version when using the NAME=VERSION syntax""" + splitted = package.split("=", 1) if len(splitted) == 1: return splitted[0], None else: @@ -185,7 +197,10 @@ def state_present(self): with self.runner("state force package") as ctx: for package in self.vars.name: pkg_name, pkg_version = self.split_name_and_version(package) - if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall": + if ( + not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) + or self.vars.force == "reinstall" + ): ctx.run(package=package) self.vars.set("run_info", ctx.run_info, verbosity=4) if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version): @@ -219,5 +234,5 @@ def main(): Opkg.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py index 66f6763764c..c30597e6d6e 100644 --- a/plugins/modules/osx_defaults.py +++ b/plugins/modules/osx_defaults.py @@ -148,38 +148,39 @@ def __init__(self, msg): # /exceptions -------------------------------------------------------------- }}} + # class MacDefaults -------------------------------------------------------- {{{ class OSXDefaults: - """ Class to manage Mac OS user defaults """ + """Class to manage Mac OS user defaults""" # init ---------------------------------------------------------------- {{{ def __init__(self, module): - """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + """Initialize this module. Finds 'defaults' executable and preps the parameters""" # Initial var for storing current defaults value self.current_value = None self.module = module - self.domain = module.params['domain'] - self.host = module.params['host'] - self.key = module.params['key'] - self.check_type = module.params['check_type'] - self.type = module.params['type'] - self.array_add = module.params['array_add'] - self.value = module.params['value'] - self.state = module.params['state'] - self.path = module.params['path'] + self.domain = module.params["domain"] + self.host = module.params["host"] + self.key = module.params["key"] + self.check_type = module.params["check_type"] + self.type = module.params["type"] + self.array_add = module.params["array_add"] + self.value = module.params["value"] + self.state = module.params["state"] + self.path = module.params["path"] # Try to find the defaults executable self.executable = self.module.get_bin_path( - 'defaults', + "defaults", required=False, - opt_dirs=self.path.split(':'), + opt_dirs=self.path.split(":"), ) if not self.executable: raise OSXDefaultsException("Unable to locate defaults executable.") # Ensure the value is the correct type - if self.state != 'absent': + if self.state != "absent": self.value = self._convert_type(self.type, self.value) # /init --------------------------------------------------------------- }}} @@ -195,7 +196,7 @@ def is_int(value): @staticmethod def _convert_type(data_type, value): - """ Converts value to given type """ + """Converts value to given type""" if data_type == "string": return str(value) elif data_type in ["bool", "boolean"]: @@ -210,9 +211,7 @@ def _convert_type(data_type, value): try: return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") except ValueError: - raise OSXDefaultsException( - f"Invalid date value: {value!r}. Required format yyy-mm-dd hh:mm:ss." - ) + raise OSXDefaultsException(f"Invalid date value: {value!r}. Required format yyy-mm-dd hh:mm:ss.") elif data_type in ["int", "integer"]: if not OSXDefaults.is_int(value): raise OSXDefaultsException(f"Invalid integer value: {value!r}") @@ -228,24 +227,24 @@ def _convert_type(data_type, value): raise OSXDefaultsException("Invalid value. Expected value to be an array") return value - raise OSXDefaultsException(f'Type is not supported: {data_type}') + raise OSXDefaultsException(f"Type is not supported: {data_type}") def _host_args(self): - """ Returns a normalized list of commandline arguments based on the "host" attribute """ + """Returns a normalized list of commandline arguments based on the "host" attribute""" if self.host is None: return [] - elif self.host == 'currentHost': - return ['-currentHost'] + elif self.host == "currentHost": + return ["-currentHost"] else: - return ['-host', self.host] + return ["-host", self.host] def _base_command(self): - """ Returns a list containing the "defaults" executable and any common base arguments """ + """Returns a list containing the "defaults" executable and any common base arguments""" return [self.executable] + self._host_args() @staticmethod def _convert_defaults_str_to_list(value): - """ Converts array output from defaults to an list """ + """Converts array output from defaults to an list""" # Split output of defaults. Every line contains a value value = value.splitlines() @@ -254,7 +253,7 @@ def _convert_defaults_str_to_list(value): value.pop(-1) # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes - value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value] + value = [re.sub('^ *"?|"?,? *$', "", x.replace('\\"', '"')) for x in value] return value @@ -262,7 +261,7 @@ def _convert_defaults_str_to_list(value): # commands ------------------------------------------------------------ {{{ def read(self): - """ Reads value of this domain & key from defaults """ + """Reads value of this domain & key from defaults""" # First try to find out the type rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key]) @@ -275,7 +274,7 @@ def read(self): raise OSXDefaultsException(f"An error occurred while reading key type from defaults: {err}") # Ok, lets parse the type from output - data_type = out.strip().replace('Type is ', '') + data_type = out.strip().replace("Type is ", "") # Now get the current value rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key]) @@ -295,7 +294,7 @@ def read(self): self.current_value = self._convert_type(data_type, out) def write(self): - """ Writes value to this domain & key to defaults """ + """Writes value to this domain & key to defaults""" # We need to convert some values so the defaults commandline understands it if isinstance(self.value, bool): if self.value: @@ -307,7 +306,7 @@ def write(self): elif self.array_add and self.current_value is not None: value = list(set(self.value) - set(self.current_value)) elif isinstance(self.value, datetime): - value = self.value.strftime('%Y-%m-%d %H:%M:%S') + value = self.value.strftime("%Y-%m-%d %H:%M:%S") else: value = self.value @@ -319,15 +318,16 @@ def write(self): if not isinstance(value, list): value = [value] - rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, f"-{self.type}"] + value, - expand_user_and_vars=False) + rc, out, err = self.module.run_command( + self._base_command() + ["write", self.domain, self.key, f"-{self.type}"] + value, expand_user_and_vars=False + ) if rc != 0: - raise OSXDefaultsException(f'An error occurred while writing value to defaults: {err}') + raise OSXDefaultsException(f"An error occurred while writing value to defaults: {err}") def delete(self): - """ Deletes defaults key from domain """ - rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) + """Deletes defaults key from domain""" + rc, out, err = self.module.run_command(self._base_command() + ["delete", self.domain, self.key]) if rc != 0: raise OSXDefaultsException(f"An error occurred while deleting key from defaults: {err}") @@ -337,11 +337,10 @@ def delete(self): """ Does the magic! :) """ def run(self): - # Get the current value from defaults self.read() - if self.state == 'list': + if self.state == "list": self.module.exit_json(key=self.key, value=self.current_value) # Handle absent state @@ -360,10 +359,19 @@ def run(self): raise OSXDefaultsException(f"Type mismatch. Type in defaults: {type(self.current_value).__name__}") # Current value matches the given value. Nothing need to be done. Arrays need extra care - if self.type == "array" and self.current_value is not None and not self.array_add and \ - set(self.current_value) == set(self.value): + if ( + self.type == "array" + and self.current_value is not None + and not self.array_add + and set(self.current_value) == set(self.value) + ): return False - elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0: + elif ( + self.type == "array" + and self.current_value is not None + and self.array_add + and len(list(set(self.value) - set(self.current_value))) == 0 + ): return False elif self.current_value == self.value: return False @@ -385,20 +393,22 @@ def run(self): def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str', default='NSGlobalDomain'), - host=dict(type='str'), - key=dict(type='str', no_log=False), - check_type=dict(type='bool', default=True), - type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), - array_add=dict(type='bool', default=False), - value=dict(type='raw'), - state=dict(type='str', default='present', choices=['absent', 'list', 'present']), - path=dict(type='str', default='/usr/bin:/usr/local/bin'), + domain=dict(type="str", default="NSGlobalDomain"), + host=dict(type="str"), + key=dict(type="str", no_log=False), + check_type=dict(type="bool", default=True), + type=dict( + type="str", + default="string", + choices=["array", "bool", "boolean", "date", "float", "int", "integer", "string"], + ), + array_add=dict(type="bool", default=False), + value=dict(type="raw"), + state=dict(type="str", default="present", choices=["absent", "list", "present"]), + path=dict(type="str", default="/usr/bin:/usr/local/bin"), ), supports_check_mode=True, - required_if=( - ('state', 'present', ['value']), - ), + required_if=(("state", "present", ["value"]),), ) try: @@ -410,5 +420,5 @@ def main(): # /main ------------------------------------------------------------------- }}} -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py index c4a9c322d5a..b9e95975692 100644 --- a/plugins/modules/ovh_ip_failover.py +++ b/plugins/modules/ovh_ip_failover.py @@ -116,6 +116,7 @@ import ovh import ovh.exceptions from ovh.exceptions import APIError + HAS_OVH = True except ImportError: HAS_OVH = False @@ -124,24 +125,22 @@ def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') + endpoint = ansibleModule.params.get("endpoint") + application_key = ansibleModule.params.get("application_key") + application_secret = ansibleModule.params.get("application_secret") + consumer_key = ansibleModule.params.get("consumer_key") return ovh.Client( endpoint=endpoint, application_key=application_key, application_secret=application_secret, - consumer_key=consumer_key + consumer_key=consumer_key, ) def waitForNoTask(client, name, timeout): currentTimeout = timeout - while client.get(f'/ip/{quote_plus(name)}/task', - function='genericMoveFloatingIp', - status='todo'): + while client.get(f"/ip/{quote_plus(name)}/task", function="genericMoveFloatingIp", status="todo"): time.sleep(1) # Delay for 1 sec currentTimeout -= 1 if currentTimeout < 0: @@ -152,8 +151,8 @@ def waitForNoTask(client, name, timeout): def waitForTaskDone(client, name, taskId, timeout): currentTimeout = timeout while True: - task = client.get(f'/ip/{quote_plus(name)}/task/{taskId}') - if task['status'] == 'done': + task = client.get(f"/ip/{quote_plus(name)}/task/{taskId}") + if task["status"] == "done": return True time.sleep(5) # Delay for 5 sec to not harass the API currentTimeout -= 5 @@ -167,84 +166,87 @@ def main(): name=dict(required=True), service=dict(required=True), endpoint=dict(required=True), - wait_completion=dict(default=True, type='bool'), - wait_task_completion=dict(default=0, type='int'), + wait_completion=dict(default=True, type="bool"), + wait_task_completion=dict(default=0, type="int"), application_key=dict(required=True, no_log=True), application_secret=dict(required=True, no_log=True), consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') + timeout=dict(default=120, type="int"), ), - supports_check_mode=True + supports_check_mode=True, ) - result = dict( - changed=False - ) + result = dict(changed=False) if not HAS_OVH: - module.fail_json(msg='ovh-api python module is required to run this module ') + module.fail_json(msg="ovh-api python module is required to run this module ") # Get parameters - name = module.params.get('name') - service = module.params.get('service') - timeout = module.params.get('timeout') - wait_completion = module.params.get('wait_completion') - wait_task_completion = module.params.get('wait_task_completion') + name = module.params.get("name") + service = module.params.get("service") + timeout = module.params.get("timeout") + wait_completion = module.params.get("wait_completion") + wait_task_completion = module.params.get("wait_task_completion") # Connect to OVH API client = getOvhClient(module) # Check that the load balancing exists try: - ips = client.get('/ip', ip=name, type='failover') + ips = client.get("/ip", ip=name, type="failover") except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the list of ips, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the list of ips, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) - if name not in ips and f'{name}/32' not in ips: - module.fail_json(msg=f'IP {name} does not exist') + if name not in ips and f"{name}/32" not in ips: + module.fail_json(msg=f"IP {name} does not exist") # Check that no task is pending before going on try: if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for no pending tasks before executing the module ') + msg=f"Timeout of {timeout} seconds while waiting for no pending tasks before executing the module " + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the list of pending tasks of the ip, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the list of pending tasks of the ip, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) try: - ipproperties = client.get(f'/ip/{quote_plus(name)}') + ipproperties = client.get(f"/ip/{quote_plus(name)}") except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the properties of the ip, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the properties of the ip, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) - if ipproperties['routedTo']['serviceName'] != service: + if ipproperties["routedTo"]["serviceName"] != service: if not module.check_mode: if wait_task_completion == 0: # Move the IP and get the created taskId - task = client.post(f'/ip/{quote_plus(name)}/move', to=service) - taskId = task['taskId'] - result['moved'] = True + task = client.post(f"/ip/{quote_plus(name)}/move", to=service) + taskId = task["taskId"] + result["moved"] = True else: # Just wait for the given taskId to be completed taskId = wait_task_completion - result['moved'] = False - result['taskId'] = taskId + result["moved"] = False + result["taskId"] = taskId if wait_completion or wait_task_completion != 0: if not waitForTaskDone(client, name, taskId, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for completion of move ip to service') - result['waited'] = True + msg=f"Timeout of {timeout} seconds while waiting for completion of move ip to service" + ) + result["waited"] = True else: - result['waited'] = False - result['changed'] = True + result["waited"] = False + result["changed"] = True module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py index aeb26bff116..47ff24ddb38 100644 --- a/plugins/modules/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -110,6 +110,7 @@ import ovh import ovh.exceptions from ovh.exceptions import APIError + HAS_OVH = True except ImportError: HAS_OVH = False @@ -118,22 +119,22 @@ def getOvhClient(ansibleModule): - endpoint = ansibleModule.params.get('endpoint') - application_key = ansibleModule.params.get('application_key') - application_secret = ansibleModule.params.get('application_secret') - consumer_key = ansibleModule.params.get('consumer_key') + endpoint = ansibleModule.params.get("endpoint") + application_key = ansibleModule.params.get("application_key") + application_secret = ansibleModule.params.get("application_secret") + consumer_key = ansibleModule.params.get("consumer_key") return ovh.Client( endpoint=endpoint, application_key=application_key, application_secret=application_secret, - consumer_key=consumer_key + consumer_key=consumer_key, ) def waitForNoTask(client, name, timeout): currentTimeout = timeout - while len(client.get(f'/ip/loadBalancing/{name}/task')) > 0: + while len(client.get(f"/ip/loadBalancing/{name}/task")) > 0: time.sleep(1) # Delay for 1 sec currentTimeout -= 1 if currentTimeout < 0: @@ -146,60 +147,65 @@ def main(): argument_spec=dict( name=dict(required=True), backend=dict(required=True), - weight=dict(default=8, type='int'), - probe=dict(default='none', - choices=['none', 'http', 'icmp', 'oco']), - state=dict(default='present', choices=['present', 'absent']), + weight=dict(default=8, type="int"), + probe=dict(default="none", choices=["none", "http", "icmp", "oco"]), + state=dict(default="present", choices=["present", "absent"]), endpoint=dict(required=True), application_key=dict(required=True, no_log=True), application_secret=dict(required=True, no_log=True), consumer_key=dict(required=True, no_log=True), - timeout=dict(default=120, type='int') + timeout=dict(default=120, type="int"), ) ) if not HAS_OVH: - module.fail_json(msg='ovh-api python module is required to run this module') + module.fail_json(msg="ovh-api python module is required to run this module") # Get parameters - name = module.params.get('name') - state = module.params.get('state') - backend = module.params.get('backend') - weight = module.params.get('weight') - probe = module.params.get('probe') - timeout = module.params.get('timeout') + name = module.params.get("name") + state = module.params.get("state") + backend = module.params.get("backend") + weight = module.params.get("weight") + probe = module.params.get("probe") + timeout = module.params.get("timeout") # Connect to OVH API client = getOvhClient(module) # Check that the load balancing exists try: - loadBalancings = client.get('/ip/loadBalancing') + loadBalancings = client.get("/ip/loadBalancing") except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the list of loadBalancing, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the list of loadBalancing, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) if name not in loadBalancings: - module.fail_json(msg=f'IP LoadBalancing {name} does not exist') + module.fail_json(msg=f"IP LoadBalancing {name} does not exist") # Check that no task is pending before going on try: if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for no pending tasks before executing the module ') + msg=f"Timeout of {timeout} seconds while waiting for no pending tasks before executing the module " + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the list of pending tasks of the loadBalancing, check application key, secret, consumerkey and ' - f'parameters. Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the list of pending tasks of the loadBalancing, check application key, secret, consumerkey and " + f"parameters. Error returned by OVH API was : {apiError}" + ) try: - backends = client.get(f'/ip/loadBalancing/{name}/backend') + backends = client.get(f"/ip/loadBalancing/{name}/backend") except APIError as apiError: module.fail_json( - msg=('Unable to call OVH API for getting the list of backends ' - 'of the loadBalancing, check application key, secret, consumerkey ' - f'and parameters. Error returned by OVH API was : {apiError}')) + msg=( + "Unable to call OVH API for getting the list of backends " + "of the loadBalancing, check application key, secret, consumerkey " + f"and parameters. Error returned by OVH API was : {apiError}" + ) + ) backendExists = backend in backends moduleChanged = False @@ -207,78 +213,83 @@ def main(): if backendExists: # Remove backend try: - client.delete( - f'/ip/loadBalancing/{name}/backend/{backend}') + client.delete(f"/ip/loadBalancing/{name}/backend/{backend}") if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for completion of removing backend task') + msg=f"Timeout of {timeout} seconds while waiting for completion of removing backend task" + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for deleting the backend, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for deleting the backend, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) moduleChanged = True else: if backendExists: # Get properties try: - backendProperties = client.get( - f'/ip/loadBalancing/{name}/backend/{backend}') + backendProperties = client.get(f"/ip/loadBalancing/{name}/backend/{backend}") except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for getting the backend properties, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for getting the backend properties, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) - if backendProperties['weight'] != weight: + if backendProperties["weight"] != weight: # Change weight try: - client.post( - f'/ip/loadBalancing/{name}/backend/{backend}/setWeight', weight=weight) + client.post(f"/ip/loadBalancing/{name}/backend/{backend}/setWeight", weight=weight) if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for completion of setWeight to backend task') + msg=f"Timeout of {timeout} seconds while waiting for completion of setWeight to backend task" + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for updating the weight of the backend, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for updating the weight of the backend, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) moduleChanged = True - if backendProperties['probe'] != probe: + if backendProperties["probe"] != probe: # Change probe - backendProperties['probe'] = probe + backendProperties["probe"] = probe try: - client.put( - f'/ip/loadBalancing/{name}/backend/{backend}', probe=probe) + client.put(f"/ip/loadBalancing/{name}/backend/{backend}", probe=probe) if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for completion of setProbe to backend task') + msg=f"Timeout of {timeout} seconds while waiting for completion of setProbe to backend task" + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for updating the probe of the backend, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for updating the probe of the backend, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) moduleChanged = True else: # Creates backend try: try: - client.post(f'/ip/loadBalancing/{name}/backend', - ipBackend=backend, probe=probe, weight=weight) + client.post(f"/ip/loadBalancing/{name}/backend", ipBackend=backend, probe=probe, weight=weight) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) if not waitForNoTask(client, name, timeout): module.fail_json( - msg=f'Timeout of {timeout} seconds while waiting for completion of backend creation task') + msg=f"Timeout of {timeout} seconds while waiting for completion of backend creation task" + ) except APIError as apiError: module.fail_json( - msg=f'Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. ' - f'Error returned by OVH API was : {apiError}') + msg=f"Unable to call OVH API for creating the backend, check application key, secret, consumerkey and parameters. " + f"Error returned by OVH API was : {apiError}" + ) moduleChanged = True module.exit_json(changed=moduleChanged) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py index 4022d664636..33c9770f216 100644 --- a/plugins/modules/ovh_monthly_billing.py +++ b/plugins/modules/ovh_monthly_billing.py @@ -83,6 +83,7 @@ import ovh import ovh.exceptions from ovh.exceptions import APIError + HAS_OVH = True except ImportError: HAS_OVH = False @@ -101,59 +102,59 @@ def main(): application_secret=dict(no_log=True), consumer_key=dict(no_log=True), ), - supports_check_mode=True + supports_check_mode=True, ) # Get parameters - project_id = module.params.get('project_id') - instance_id = module.params.get('instance_id') - endpoint = module.params.get('endpoint') - application_key = module.params.get('application_key') - application_secret = module.params.get('application_secret') - consumer_key = module.params.get('consumer_key') + project_id = module.params.get("project_id") + instance_id = module.params.get("instance_id") + endpoint = module.params.get("endpoint") + application_key = module.params.get("application_key") + application_secret = module.params.get("application_secret") + consumer_key = module.params.get("consumer_key") project = "" instance = "" ovh_billing_status = "" if not HAS_OVH: - module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') + module.fail_json(msg="python-ovh is required to run this module, see https://github.com/ovh/python-ovh") # Connect to OVH API client = ovh.Client( endpoint=endpoint, application_key=application_key, application_secret=application_secret, - consumer_key=consumer_key + consumer_key=consumer_key, ) # Check that the instance exists try: - project = client.get(f'/cloud/project/{project_id}') + project = client.get(f"/cloud/project/{project_id}") except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg=f'project {project_id} does not exist') + module.fail_json(msg=f"project {project_id} does not exist") # Check that the instance exists try: - instance = client.get(f'/cloud/project/{project_id}/instance/{instance_id}') + instance = client.get(f"/cloud/project/{project_id}/instance/{instance_id}") except ovh.exceptions.ResourceNotFoundError: - module.fail_json(msg=f'instance {instance_id} does not exist in project {project_id}') + module.fail_json(msg=f"instance {instance_id} does not exist in project {project_id}") # Is monthlyBilling already enabled or pending ? - if instance['monthlyBilling'] is not None: - if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: - module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) + if instance["monthlyBilling"] is not None: + if instance["monthlyBilling"]["status"] in ["ok", "activationPending"]: + module.exit_json(changed=False, ovh_billing_status=instance["monthlyBilling"]) if module.check_mode: module.exit_json(changed=True, msg="Dry Run!") try: - ovh_billing_status = client.post(f'/cloud/project/{project_id}/instance/{instance_id}/activeMonthlyBilling') - module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) + ovh_billing_status = client.post(f"/cloud/project/{project_id}/instance/{instance_id}/activeMonthlyBilling") + module.exit_json(changed=True, ovh_billing_status=ovh_billing_status["monthlyBilling"]) except APIError as apiError: module.fail_json(changed=False, msg=f"Failed to call OVH API: {apiError}") # We should never reach here - module.fail_json(msg='Internal ovh_monthly_billing module error') + module.fail_json(msg="Internal ovh_monthly_billing module error") if __name__ == "__main__": diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py index ddd56283a12..b702e971e9c 100644 --- a/plugins/modules/pacemaker_cluster.py +++ b/plugins/modules/pacemaker_cluster.py @@ -66,17 +66,19 @@ """ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode +from ansible_collections.community.general.plugins.module_utils.pacemaker import ( + pacemaker_runner, + get_pacemaker_maintenance_mode, +) class PacemakerCluster(StateModuleHelper): module = dict( argument_spec=dict( - state=dict(type='str', choices=[ - 'cleanup', 'offline', 'online', 'restart', 'maintenance'], required=True), - name=dict(type='str', aliases=['node']), - timeout=dict(type='int', default=300), - force=dict(type='bool', default=True) + state=dict(type="str", choices=["cleanup", "offline", "online", "restart", "maintenance"], required=True), + name=dict(type="str", aliases=["node"]), + timeout=dict(type="int", default=300), + force=dict(type="bool", default=True), ), supports_check_mode=True, ) @@ -84,86 +86,105 @@ class PacemakerCluster(StateModuleHelper): def __init_module__(self): self.runner = pacemaker_runner(self.module) - self.vars.set('apply_all', True if not self.module.params['name'] else False) - get_args = dict(cli_action='cluster', state='status', name=None, apply_all=self.vars.apply_all) - if self.module.params['state'] == "maintenance": - get_args['cli_action'] = "property" - get_args['state'] = "config" - get_args['name'] = "maintenance-mode" - elif self.module.params['state'] == "cleanup": - get_args['cli_action'] = "resource" - get_args['name'] = self.module.params['name'] - - self.vars.set('get_args', get_args) - self.vars.set('previous_value', self._get()['out']) - self.vars.set('value', self.vars.previous_value, change=True, diff=True) - - if self.module.params['state'] == "cleanup": + self.vars.set("apply_all", True if not self.module.params["name"] else False) + get_args = dict(cli_action="cluster", state="status", name=None, apply_all=self.vars.apply_all) + if self.module.params["state"] == "maintenance": + get_args["cli_action"] = "property" + get_args["state"] = "config" + get_args["name"] = "maintenance-mode" + elif self.module.params["state"] == "cleanup": + get_args["cli_action"] = "resource" + get_args["name"] = self.module.params["name"] + + self.vars.set("get_args", get_args) + self.vars.set("previous_value", self._get()["out"]) + self.vars.set("value", self.vars.previous_value, change=True, diff=True) + + if self.module.params["state"] == "cleanup": self.module.deprecate( 'The value `cleanup` for "state" is being deprecated, use pacemaker_resource module instead.', - version='14.0.0', - collection_name='community.general' + version="14.0.0", + collection_name="community.general", ) def __quit_module__(self): - self.vars.set('value', self._get()['out']) + self.vars.set("value", self._get()["out"]) def _process_command_output(self, fail_on_err, ignore_err_msg=""): def process(rc, out, err): if fail_on_err and rc != 0 and err and ignore_err_msg not in err: - self.do_raise(f'pcs failed with error (rc={rc}): {err}') + self.do_raise(f"pcs failed with error (rc={rc}): {err}") out = out.rstrip() return None if out == "" else out + return process def _get(self): - with self.runner('cli_action state name') as ctx: - result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name']) - return dict(rc=result[0], - out=(result[1] if result[1] != "" else None), - err=result[2]) + with self.runner("cli_action state name") as ctx: + result = ctx.run( + cli_action=self.vars.get_args["cli_action"], + state=self.vars.get_args["state"], + name=self.vars.get_args["name"], + ) + return dict(rc=result[0], out=(result[1] if result[1] != "" else None), err=result[2]) def state_cleanup(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Fail"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="resource") def state_offline(self): - with self.runner('cli_action state name apply_all wait', - output_process=self._process_command_output(True, "not currently running"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + with self.runner( + "cli_action state name apply_all wait", + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True, + ) as ctx: + ctx.run(cli_action="cluster", apply_all=self.vars.apply_all, wait=self.module.params["timeout"]) def state_online(self): - with self.runner('cli_action state name apply_all wait', - output_process=self._process_command_output(True, "currently running"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + with self.runner( + "cli_action state name apply_all wait", + output_process=self._process_command_output(True, "currently running"), + check_mode_skip=True, + ) as ctx: + ctx.run(cli_action="cluster", apply_all=self.vars.apply_all, wait=self.module.params["timeout"]) if get_pacemaker_maintenance_mode(self.runner): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: - ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Fail"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="property", state="maintenance", name="maintenance-mode=false") def state_maintenance(self): - with self.runner('cli_action state name', - output_process=self._process_command_output(True, "Fail"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='property', name='maintenance-mode=true') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Fail"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="property", name="maintenance-mode=true") def state_restart(self): - with self.runner('cli_action state name apply_all wait', - output_process=self._process_command_output(True, "not currently running"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) - ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + with self.runner( + "cli_action state name apply_all wait", + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True, + ) as ctx: + ctx.run( + cli_action="cluster", state="offline", apply_all=self.vars.apply_all, wait=self.module.params["timeout"] + ) + ctx.run( + cli_action="cluster", state="online", apply_all=self.vars.apply_all, wait=self.module.params["timeout"] + ) if get_pacemaker_maintenance_mode(self.runner): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: - ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Fail"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="property", state="maintenance", name="maintenance-mode=false") def main(): PacemakerCluster.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pacemaker_info.py b/plugins/modules/pacemaker_info.py index a9f065133f2..c75ef5ec409 100644 --- a/plugins/modules/pacemaker_info.py +++ b/plugins/modules/pacemaker_info.py @@ -72,7 +72,7 @@ class PacemakerInfo(ModuleHelper): "resource_info": "resource", "stonith_info": "stonith", "constraint_info": "constraint", - "property_info": "property" + "property_info": "property", } output_params = list(info_vars.keys()) @@ -85,13 +85,16 @@ def __init_module__(self): def _process_command_output(self, cli_action=""): def process(rc, out, err): if rc != 0: - self.do_raise(f'pcs {cli_action} config failed with error (rc={rc}): {err}') + self.do_raise(f"pcs {cli_action} config failed with error (rc={rc}): {err}") out = json.loads(out) return None if out == "" else out + return process def _get_info(self, cli_action): - with self.runner("cli_action config output_format", output_process=self._process_command_output(cli_action)) as ctx: + with self.runner( + "cli_action config output_format", output_process=self._process_command_output(cli_action) + ) as ctx: return ctx.run(cli_action=cli_action, output_format="json") def __run__(self): @@ -103,5 +106,5 @@ def main(): PacemakerInfo.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py index 2768590080a..b8e8cf3c1f9 100644 --- a/plugins/modules/pacemaker_resource.py +++ b/plugins/modules/pacemaker_resource.py @@ -146,66 +146,80 @@ """ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode +from ansible_collections.community.general.plugins.module_utils.pacemaker import ( + pacemaker_runner, + get_pacemaker_maintenance_mode, +) class PacemakerResource(StateModuleHelper): module = dict( argument_spec=dict( - state=dict(type='str', default='present', choices=[ - 'present', 'absent', 'cloned', 'enabled', 'disabled', 'cleanup']), - name=dict(type='str'), - resource_type=dict(type='dict', options=dict( - resource_name=dict(type='str'), - resource_standard=dict(type='str'), - resource_provider=dict(type='str'), - )), - resource_option=dict(type='list', elements='str', default=list()), - resource_operation=dict(type='list', elements='dict', default=list(), options=dict( - operation_action=dict(type='str'), - operation_option=dict(type='list', elements='str'), - )), - resource_meta=dict(type='list', elements='str'), - resource_argument=dict(type='dict', options=dict( - argument_action=dict(type='str', choices=['clone', 'master', 'group', 'promotable']), - argument_option=dict(type='list', elements='str'), - )), - resource_clone_ids=dict(type='list', elements='str'), - resource_clone_meta=dict(type='list', elements='str'), - wait=dict(type='int', default=300), + state=dict( + type="str", default="present", choices=["present", "absent", "cloned", "enabled", "disabled", "cleanup"] + ), + name=dict(type="str"), + resource_type=dict( + type="dict", + options=dict( + resource_name=dict(type="str"), + resource_standard=dict(type="str"), + resource_provider=dict(type="str"), + ), + ), + resource_option=dict(type="list", elements="str", default=list()), + resource_operation=dict( + type="list", + elements="dict", + default=list(), + options=dict( + operation_action=dict(type="str"), + operation_option=dict(type="list", elements="str"), + ), + ), + resource_meta=dict(type="list", elements="str"), + resource_argument=dict( + type="dict", + options=dict( + argument_action=dict(type="str", choices=["clone", "master", "group", "promotable"]), + argument_option=dict(type="list", elements="str"), + ), + ), + resource_clone_ids=dict(type="list", elements="str"), + resource_clone_meta=dict(type="list", elements="str"), + wait=dict(type="int", default=300), ), required_if=[ - ('state', 'present', ['resource_type', 'resource_option', 'name']), - ('state', 'absent', ['name']), - ('state', 'enabled', ['name']), - ('state', 'disabled', ['name']), + ("state", "present", ["resource_type", "resource_option", "name"]), + ("state", "absent", ["name"]), + ("state", "enabled", ["name"]), + ("state", "disabled", ["name"]), ], supports_check_mode=True, ) def __init_module__(self): self.runner = pacemaker_runner(self.module) - self.vars.set('previous_value', self._get()['out']) - self.vars.set('value', self.vars.previous_value, change=True, diff=True) - self.module.params['name'] = self.module.params['name'] or None + self.vars.set("previous_value", self._get()["out"]) + self.vars.set("value", self.vars.previous_value, change=True, diff=True) + self.module.params["name"] = self.module.params["name"] or None def __quit_module__(self): - self.vars.set('value', self._get()['out']) + self.vars.set("value", self._get()["out"]) def _process_command_output(self, fail_on_err, ignore_err_msg=""): def process(rc, out, err): if fail_on_err and rc != 0 and err and ignore_err_msg not in err: - self.do_raise(f'pcs failed with error (rc={rc}): {err}') + self.do_raise(f"pcs failed with error (rc={rc}): {err}") out = out.rstrip() return None if out == "" else out + return process def _get(self): - with self.runner('cli_action state name') as ctx: - result = ctx.run(cli_action="resource", state='status') - return dict(rc=result[0], - out=(result[1] if result[1] != "" else None), - err=result[2]) + with self.runner("cli_action state name") as ctx: + result = ctx.run(cli_action="resource", state="status") + return dict(rc=result[0], out=(result[1] if result[1] != "" else None), err=result[2]) def fmt_as_stack_argument(self, value, arg): if value is not None: @@ -213,44 +227,65 @@ def fmt_as_stack_argument(self, value, arg): def state_absent(self): force = get_pacemaker_maintenance_mode(self.runner) - with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource', force=force) + with self.runner( + "cli_action state name force", + output_process=self._process_command_output(True, "does not exist"), + check_mode_skip=True, + ) as ctx: + ctx.run(cli_action="resource", force=force) def state_present(self): with self.runner( - 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument ' - 'resource_clone_ids resource_clone_meta wait', - output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='resource', resource_clone_ids=self.fmt_as_stack_argument(self.module.params["resource_clone_ids"], "clone")) + "cli_action state name resource_type resource_option resource_operation resource_meta resource_argument " + "resource_clone_ids resource_clone_meta wait", + output_process=self._process_command_output( + not get_pacemaker_maintenance_mode(self.runner), "already exists" + ), + check_mode_skip=True, + ) as ctx: + ctx.run( + cli_action="resource", + resource_clone_ids=self.fmt_as_stack_argument(self.module.params["resource_clone_ids"], "clone"), + ) def state_cloned(self): with self.runner( - 'cli_action state name resource_clone_ids resource_clone_meta wait', + "cli_action state name resource_clone_ids resource_clone_meta wait", output_process=self._process_command_output( - not get_pacemaker_maintenance_mode(self.runner), - "already a clone resource"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource', resource_clone_meta=self.fmt_as_stack_argument(self.module.params["resource_clone_meta"], "meta")) + not get_pacemaker_maintenance_mode(self.runner), "already a clone resource" + ), + check_mode_skip=True, + ) as ctx: + ctx.run( + cli_action="resource", + resource_clone_meta=self.fmt_as_stack_argument(self.module.params["resource_clone_meta"], "meta"), + ) def state_enabled(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Starting"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="resource") def state_disabled(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="resource") def state_cleanup(self): - runner_args = ['cli_action', 'state'] - if self.module.params['name']: - runner_args.append('name') - with self.runner(runner_args, output_process=self._process_command_output(True, "Clean"), check_mode_skip=True) as ctx: - ctx.run(cli_action='resource') + runner_args = ["cli_action", "state"] + if self.module.params["name"]: + runner_args.append("name") + with self.runner( + runner_args, output_process=self._process_command_output(True, "Clean"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="resource") def main(): PacemakerResource.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pacemaker_stonith.py b/plugins/modules/pacemaker_stonith.py index f33215a8883..d4eb229694f 100644 --- a/plugins/modules/pacemaker_stonith.py +++ b/plugins/modules/pacemaker_stonith.py @@ -6,7 +6,7 @@ from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ module: pacemaker_stonith short_description: Manage Pacemaker STONITH author: @@ -95,9 +95,9 @@ - Timeout period for polling the STONITH creation. type: int default: 300 -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create virtual-ip STONITH community.general.pacemaker_stonith: state: present @@ -109,9 +109,9 @@ - operation_action: monitor operation_options: - "interval=30s" -''' +""" -RETURN = ''' +RETURN = """ previous_value: description: The value of the STONITH before executing the module. type: str @@ -122,7 +122,7 @@ type: str sample: " * virtual-stonith\t(stonith:fence_virt):\t Started" returned: on success -''' +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner @@ -131,48 +131,55 @@ class PacemakerStonith(StateModuleHelper): module = dict( argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - name=dict(type='str', required=True), - stonith_type=dict(type='str'), - stonith_options=dict(type='list', elements='str', default=[]), - stonith_operations=dict(type='list', elements='dict', default=[], options=dict( - operation_action=dict(type='str'), - operation_options=dict(type='list', elements='str'), - )), - stonith_metas=dict(type='list', elements='str'), - stonith_argument=dict(type='dict', options=dict( - argument_action=dict(type='str', choices=['before', 'after', 'group']), - argument_options=dict(type='list', elements='str'), - )), - agent_validation=dict(type='bool', default=False), - wait=dict(type='int', default=300), + state=dict(type="str", default="present", choices=["present", "absent", "enabled", "disabled"]), + name=dict(type="str", required=True), + stonith_type=dict(type="str"), + stonith_options=dict(type="list", elements="str", default=[]), + stonith_operations=dict( + type="list", + elements="dict", + default=[], + options=dict( + operation_action=dict(type="str"), + operation_options=dict(type="list", elements="str"), + ), + ), + stonith_metas=dict(type="list", elements="str"), + stonith_argument=dict( + type="dict", + options=dict( + argument_action=dict(type="str", choices=["before", "after", "group"]), + argument_options=dict(type="list", elements="str"), + ), + ), + agent_validation=dict(type="bool", default=False), + wait=dict(type="int", default=300), ), - required_if=[('state', 'present', ['stonith_type', 'stonith_options'])], - supports_check_mode=True + required_if=[("state", "present", ["stonith_type", "stonith_options"])], + supports_check_mode=True, ) def __init_module__(self): self.runner = pacemaker_runner(self.module) - self.vars.set('previous_value', self._get()['out']) - self.vars.set('value', self.vars.previous_value, change=True, diff=True) + self.vars.set("previous_value", self._get()["out"]) + self.vars.set("value", self.vars.previous_value, change=True, diff=True) def __quit_module__(self): - self.vars.set('value', self._get()['out']) + self.vars.set("value", self._get()["out"]) def _process_command_output(self, fail_on_err, ignore_err_msg=""): def process(rc, out, err): if fail_on_err and rc != 0 and err and ignore_err_msg not in err: - self.do_raise(f'pcs failed with error (rc={rc}): {err}') + self.do_raise(f"pcs failed with error (rc={rc}): {err}") out = out.rstrip() return None if out == "" else out + return process def _get(self): - with self.runner('cli_action state name') as ctx: - result = ctx.run(cli_action='stonith', state='status') - return dict(rc=result[0], - out=result[1] if result[1] != "" else None, - err=result[2]) + with self.runner("cli_action state name") as ctx: + result = ctx.run(cli_action="stonith", state="status") + return dict(rc=result[0], out=result[1] if result[1] != "" else None, err=result[2]) def fmt_stonith_resource(self): return dict(resource_name=self.vars.stonith_type) @@ -181,38 +188,53 @@ def fmt_stonith_resource(self): def fmt_stonith_operations(self): modified_stonith_operations = [] for stonith_operation in self.vars.stonith_operations: - modified_stonith_operations.append(dict(operation_action=stonith_operation.get('operation_action'), - operation_option=stonith_operation.get('operation_options'))) + modified_stonith_operations.append( + dict( + operation_action=stonith_operation.get("operation_action"), + operation_option=stonith_operation.get("operation_options"), + ) + ) return modified_stonith_operations def state_absent(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: - ctx.run(cli_action='stonith') + with self.runner( + "cli_action state name", + output_process=self._process_command_output(True, "does not exist"), + check_mode_skip=True, + ) as ctx: + ctx.run(cli_action="stonith") def state_present(self): with self.runner( - 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument agent_validation wait', - output_process=self._process_command_output(True, "already exists"), - check_mode_skip=True) as ctx: - ctx.run(cli_action='stonith', - resource_type=self.fmt_stonith_resource(), - resource_option=self.vars.stonith_options, - resource_operation=self.fmt_stonith_operations(), - resource_meta=self.vars.stonith_metas, - resource_argument=self.vars.stonith_argument) + "cli_action state name resource_type resource_option resource_operation resource_meta resource_argument agent_validation wait", + output_process=self._process_command_output(True, "already exists"), + check_mode_skip=True, + ) as ctx: + ctx.run( + cli_action="stonith", + resource_type=self.fmt_stonith_resource(), + resource_option=self.vars.stonith_options, + resource_operation=self.fmt_stonith_operations(), + resource_meta=self.vars.stonith_metas, + resource_argument=self.vars.stonith_argument, + ) def state_enabled(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: - ctx.run(cli_action='stonith') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Starting"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="stonith") def state_disabled(self): - with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: - ctx.run(cli_action='stonith') + with self.runner( + "cli_action state name", output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True + ) as ctx: + ctx.run(cli_action="stonith") def main(): PacemakerStonith.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py index 8bb10cbcc35..23c26ecf4cb 100644 --- a/plugins/modules/packet_device.py +++ b/plugins/modules/packet_device.py @@ -287,25 +287,25 @@ HAS_PACKET_SDK = False -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = rf'({NAME_RE}\.)*{NAME_RE}$' +NAME_RE = r"({0}|{0}{1}*{0})".format(r"[a-zA-Z0-9]", r"[a-zA-Z0-9\-]") +HOSTNAME_RE = rf"({NAME_RE}\.)*{NAME_RE}$" MAX_DEVICES = 100 PACKET_DEVICE_STATES = ( - 'queued', - 'provisioning', - 'failed', - 'powering_on', - 'active', - 'powering_off', - 'inactive', - 'rebooting', + "queued", + "provisioning", + "failed", + "powering_on", + "active", + "powering_off", + "inactive", + "rebooting", ) PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" -ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] +ALLOWED_STATES = ["absent", "active", "inactive", "rebooted", "present"] def serialize_device(device): @@ -342,16 +342,16 @@ def serialize_device(device): """ device_data = {} - device_data['id'] = device.id - device_data['hostname'] = device.hostname - device_data['tags'] = device.tags - device_data['locked'] = device.locked - device_data['state'] = device.state - device_data['ip_addresses'] = [ + device_data["id"] = device.id + device_data["hostname"] = device.hostname + device_data["tags"] = device.tags + device_data["locked"] = device.locked + device_data["state"] = device.state + device_data["ip_addresses"] = [ { - 'address': addr_data['address'], - 'address_family': addr_data['address_family'], - 'public': addr_data['public'], + "address": addr_data["address"], + "address_family": addr_data["address_family"], + "public": addr_data["public"], } for addr_data in device.ip_addresses ] @@ -361,19 +361,19 @@ def serialize_device(device): # - public_ipv6 # - private_ipv4 # - private_ipv6 (if there is one) - for ipdata in device_data['ip_addresses']: - if ipdata['public']: - if ipdata['address_family'] == 6: - device_data['public_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['public_ipv4'] = ipdata['address'] - elif not ipdata['public']: - if ipdata['address_family'] == 6: + for ipdata in device_data["ip_addresses"]: + if ipdata["public"]: + if ipdata["address_family"] == 6: + device_data["public_ipv6"] = ipdata["address"] + elif ipdata["address_family"] == 4: + device_data["public_ipv4"] = ipdata["address"] + elif not ipdata["public"]: + if ipdata["address_family"] == 6: # Packet doesn't give public ipv6 yet, but maybe one # day they will - device_data['private_ipv6'] = ipdata['address'] - elif ipdata['address_family'] == 4: - device_data['private_ipv4'] = ipdata['address'] + device_data["private_ipv6"] = ipdata["address"] + elif ipdata["address_family"] == 4: + device_data["private_ipv4"] = ipdata["address"] return device_data @@ -390,8 +390,8 @@ def is_valid_uuid(myuuid): def listify_string_name_or_id(s): - if ',' in s: - return s.split(',') + if "," in s: + return s.split(",") else: return [s] @@ -400,9 +400,9 @@ def get_hostname_list(module): # hostname is a list-typed param, so I guess it should return list # (and it does, in Ansible 2.2.1) but in order to be defensive, # I keep here the code to convert an eventual string to list - hostnames = module.params.get('hostnames') - count = module.params.get('count') - count_offset = module.params.get('count_offset') + hostnames = module.params.get("hostnames") + count = module.params.get("count") + count_offset = module.params.get("count_offset") if isinstance(hostnames, str): hostnames = listify_string_name_or_id(hostnames) if not isinstance(hostnames, list): @@ -412,8 +412,9 @@ def get_hostname_list(module): hostnames = [h.strip() for h in hostnames] if len(hostnames) > 1 and count > 1: - _msg = ("If you set count>1, you should only specify one hostname " - "with the %d formatter, not a list of hostnames.") + _msg = ( + "If you set count>1, you should only specify one hostname with the %d formatter, not a list of hostnames." + ) raise Exception(_msg) if len(hostnames) == 1 and count > 0: @@ -422,7 +423,7 @@ def get_hostname_list(module): if re.search(r"%\d{0,2}d", hostname_spec): hostnames = [hostname_spec % i for i in count_range] elif count > 1: - hostname_spec = '%s%%02d' % hostname_spec + hostname_spec = "%s%%02d" % hostname_spec hostnames = [hostname_spec % i for i in count_range] for hn in hostnames: @@ -435,7 +436,7 @@ def get_hostname_list(module): def get_device_id_list(module): - device_ids = module.params.get('device_ids') + device_ids = module.params.get("device_ids") if isinstance(device_ids, str): device_ids = listify_string_name_or_id(device_ids) @@ -452,23 +453,22 @@ def get_device_id_list(module): def create_single_device(module, packet_conn, hostname): - - for param in ('hostnames', 'operating_system', 'plan'): + for param in ("hostnames", "operating_system", "plan"): if not module.params.get(param): raise Exception(f"{param} parameter is required for new device.") - project_id = module.params.get('project_id') - plan = module.params.get('plan') - tags = module.params.get('tags') - user_data = module.params.get('user_data') - facility = module.params.get('facility') - operating_system = module.params.get('operating_system') - locked = module.params.get('locked') - ipxe_script_url = module.params.get('ipxe_script_url') - always_pxe = module.params.get('always_pxe') - if operating_system != 'custom_ipxe': - for param in ('ipxe_script_url', 'always_pxe'): + project_id = module.params.get("project_id") + plan = module.params.get("plan") + tags = module.params.get("tags") + user_data = module.params.get("user_data") + facility = module.params.get("facility") + operating_system = module.params.get("operating_system") + locked = module.params.get("locked") + ipxe_script_url = module.params.get("ipxe_script_url") + always_pxe = module.params.get("always_pxe") + if operating_system != "custom_ipxe": + for param in ("ipxe_script_url", "always_pxe"): if module.params.get(param): - raise Exception(f'{param} parameter is not valid for non custom_ipxe operating_system.') + raise Exception(f"{param} parameter is not valid for non custom_ipxe operating_system.") device = packet_conn.create_device( project_id=project_id, @@ -480,7 +480,8 @@ def create_single_device(module, packet_conn, hostname): userdata=user_data, locked=locked, ipxe_script_url=ipxe_script_url, - always_pxe=always_pxe) + always_pxe=always_pxe, + ) return device @@ -491,29 +492,29 @@ def refresh_device_list(module, packet_conn, devices): def wait_for_devices_active(module, packet_conn, watched_devices): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") wait_timeout = time.time() + wait_timeout refreshed = watched_devices while wait_timeout > time.time(): refreshed = refresh_device_list(module, packet_conn, watched_devices) - if all(d.state == 'active' for d in refreshed): + if all(d.state == "active" for d in refreshed): return refreshed time.sleep(5) - raise Exception("Waiting for state \"active\" timed out for devices: %s" - % [d.hostname for d in refreshed if d.state != "active"]) + raise Exception( + 'Waiting for state "active" timed out for devices: %s' % [d.hostname for d in refreshed if d.state != "active"] + ) def wait_for_public_IPv(module, packet_conn, created_devices): - def has_public_ip(addr_list, ip_v): - return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) + return any(a["public"] and a["address_family"] == ip_v and a["address"] for a in addr_list) def all_have_public_ip(ds, ip_v): return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) - address_family = module.params.get('wait_for_public_IPv') + address_family = module.params.get("wait_for_public_IPv") - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): refreshed = refresh_device_list(module, packet_conn, created_devices) @@ -521,23 +522,23 @@ def all_have_public_ip(ds, ip_v): return refreshed time.sleep(5) - raise Exception(f"Waiting for IPv{address_family} address timed out. Hostnames: {[d.hostname for d in created_devices]}") + raise Exception( + f"Waiting for IPv{address_family} address timed out. Hostnames: {[d.hostname for d in created_devices]}" + ) def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') - return packet_conn.list_devices( - project_id, params={ - 'per_page': MAX_DEVICES}) + project_id = module.params.get("project_id") + return packet_conn.list_devices(project_id, params={"per_page": MAX_DEVICES}) def get_specified_device_identifiers(module): - if module.params.get('device_ids'): + if module.params.get("device_ids"): device_id_list = get_device_id_list(module) - return {'ids': device_id_list, 'hostnames': []} - elif module.params.get('hostnames'): + return {"ids": device_id_list, "hostnames": []} + elif module.params.get("hostnames"): hostname_list = get_hostname_list(module) - return {'hostnames': hostname_list, 'ids': []} + return {"hostnames": hostname_list, "ids": []} def act_on_devices(module, packet_conn, target_state): @@ -545,31 +546,32 @@ def act_on_devices(module, packet_conn, target_state): existing_devices = get_existing_devices(module, packet_conn) changed = False create_hostnames = [] - if target_state in ['present', 'active', 'rebooted']: + if target_state in ["present", "active", "rebooted"]: # states where we might create non-existing specified devices existing_devices_names = [ed.hostname for ed in existing_devices] - create_hostnames = [hn for hn in specified_identifiers['hostnames'] - if hn not in existing_devices_names] + create_hostnames = [hn for hn in specified_identifiers["hostnames"] if hn not in existing_devices_names] - process_devices = [d for d in existing_devices - if (d.id in specified_identifiers['ids']) or - (d.hostname in specified_identifiers['hostnames'])] + process_devices = [ + d + for d in existing_devices + if (d.id in specified_identifiers["ids"]) or (d.hostname in specified_identifiers["hostnames"]) + ] - if target_state != 'present': + if target_state != "present": _absent_state_map = {} for s in PACKET_DEVICE_STATES: _absent_state_map[s] = packet.Device.delete state_map = { - 'absent': _absent_state_map, - 'active': {'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, - 'inactive': {'active': packet.Device.power_off}, - 'rebooted': {'active': packet.Device.reboot, - 'inactive': packet.Device.power_on, - 'provisioning': None, 'rebooting': None - }, + "absent": _absent_state_map, + "active": {"inactive": packet.Device.power_on, "provisioning": None, "rebooting": None}, + "inactive": {"active": packet.Device.power_off}, + "rebooted": { + "active": packet.Device.reboot, + "inactive": packet.Device.power_on, + "provisioning": None, + "rebooting": None, + }, } # First do non-creation actions, it might be faster @@ -582,80 +584,77 @@ def act_on_devices(module, packet_conn, target_state): api_operation(d) changed = True else: - _msg = ( - f"I don't know how to process existing device {d.hostname} from state {d.state} to state {target_state}") + _msg = f"I don't know how to process existing device {d.hostname} from state {d.state} to state {target_state}" raise Exception(_msg) # At last create missing devices created_devices = [] if create_hostnames: - created_devices = [create_single_device(module, packet_conn, n) - for n in create_hostnames] - if module.params.get('wait_for_public_IPv'): - created_devices = wait_for_public_IPv( - module, packet_conn, created_devices) + created_devices = [create_single_device(module, packet_conn, n) for n in create_hostnames] + if module.params.get("wait_for_public_IPv"): + created_devices = wait_for_public_IPv(module, packet_conn, created_devices) changed = True processed_devices = created_devices + process_devices - if target_state == 'active': - processed_devices = wait_for_devices_active( - module, packet_conn, processed_devices) + if target_state == "active": + processed_devices = wait_for_devices_active(module, packet_conn, processed_devices) - return { - 'changed': changed, - 'devices': [serialize_device(d) for d in processed_devices] - } + return {"changed": changed, "devices": [serialize_device(d) for d in processed_devices]} def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - count=dict(type='int', default=1), - count_offset=dict(type='int', default=1), - device_ids=dict(type='list', elements='str'), + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), no_log=True), + count=dict(type="int", default=1), + count_offset=dict(type="int", default=1), + device_ids=dict(type="list", elements="str"), facility=dict(), - features=dict(type='dict'), - hostnames=dict(type='list', elements='str', aliases=['name']), - tags=dict(type='list', elements='str'), - locked=dict(type='bool', default=False, aliases=['lock']), + features=dict(type="dict"), + hostnames=dict(type="list", elements="str", aliases=["name"]), + tags=dict(type="list", elements="str"), + locked=dict(type="bool", default=False, aliases=["lock"]), operating_system=dict(), plan=dict(), project_id=dict(required=True), - state=dict(choices=ALLOWED_STATES, default='present'), + state=dict(choices=ALLOWED_STATES, default="present"), user_data=dict(), - wait_for_public_IPv=dict(type='int', choices=[4, 6]), - wait_timeout=dict(type='int', default=900), - ipxe_script_url=dict(default=''), - always_pxe=dict(type='bool', default=False), + wait_for_public_IPv=dict(type="int", choices=[4, 6]), + wait_timeout=dict(type="int", default=900), + ipxe_script_url=dict(default=""), + always_pxe=dict(type="bool", default=False), ), - required_one_of=[('device_ids', 'hostnames',)], + required_one_of=[ + ( + "device_ids", + "hostnames", + ) + ], mutually_exclusive=[ - ('hostnames', 'device_ids'), - ('count', 'device_ids'), - ('count_offset', 'device_ids'), - ] + ("hostnames", "device_ids"), + ("count", "device_ids"), + ("count_offset", "device_ids"), + ], ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") try: module.exit_json(**act_on_devices(module, packet_conn, state)) except Exception as e: - module.fail_json(msg=f'failed to set device state {state}, error: {e}', exception=traceback.format_exc()) + module.fail_json(msg=f"failed to set device state {state}, error: {e}", exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py index 438a136e062..9f557a2119f 100644 --- a/plugins/modules/packet_ip_subnet.py +++ b/plugins/modules/packet_ip_subnet.py @@ -161,15 +161,15 @@ HAS_PACKET_SDK = False -NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') -HOSTNAME_RE = rf'({NAME_RE}\.)*{NAME_RE}$' +NAME_RE = r"({0}|{0}{1}*{0})".format(r"[a-zA-Z0-9]", r"[a-zA-Z0-9\-]") +HOSTNAME_RE = rf"({NAME_RE}\.)*{NAME_RE}$" PROJECT_MAX_DEVICES = 100 PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" -ALLOWED_STATES = ['absent', 'present'] +ALLOWED_STATES = ["absent", "present"] def is_valid_hostname(hostname): @@ -185,28 +185,27 @@ def is_valid_uuid(myuuid): def get_existing_devices(module, packet_conn): - project_id = module.params.get('project_id') + project_id = module.params.get("project_id") if not is_valid_uuid(project_id): raise Exception(f"Project ID {project_id} does not seem to be valid") - per_page = module.params.get('device_count') - return packet_conn.list_devices( - project_id, params={'per_page': per_page}) + per_page = module.params.get("device_count") + return packet_conn.list_devices(project_id, params={"per_page": per_page}) def get_specified_device_identifiers(module): - if module.params.get('device_id'): - _d_id = module.params.get('device_id') + if module.params.get("device_id"): + _d_id = module.params.get("device_id") if not is_valid_uuid(_d_id): raise Exception(f"Device ID '{_d_id}' does not seem to be valid") - return {'device_id': _d_id, 'hostname': None} - elif module.params.get('hostname'): - _hn = module.params.get('hostname') + return {"device_id": _d_id, "hostname": None} + elif module.params.get("hostname"): + _hn = module.params.get("hostname") if not is_valid_hostname(_hn): raise Exception(f"Hostname '{_hn}' does not seem to be valid") - return {'hostname': _hn, 'device_id': None} + return {"hostname": _hn, "device_id": None} else: - return {'hostname': None, 'device_id': None} + return {"hostname": None, "device_id": None} def parse_subnet_cidr(cidr): @@ -221,7 +220,7 @@ def parse_subnet_cidr(cidr): def act_on_assignment(target_state, module, packet_conn): - return_dict = {'changed': False} + return_dict = {"changed": False} specified_cidr = module.params.get("cidr") address, prefixlen = parse_subnet_cidr(specified_cidr) @@ -230,26 +229,24 @@ def act_on_assignment(target_state, module, packet_conn): if module.check_mode: return return_dict - if (specified_identifier['hostname'] is None) and ( - specified_identifier['device_id'] is None): - if target_state == 'absent': + if (specified_identifier["hostname"] is None) and (specified_identifier["device_id"] is None): + if target_state == "absent": # The special case to release the IP from any assignment for d in get_existing_devices(module, packet_conn): for ia in d.ip_addresses: - if address == ia['address'] and prefixlen == ia['cidr']: - packet_conn.call_api(ia['href'], "DELETE") - return_dict['changed'] = True - return_dict['subnet'] = ia - return_dict['device_id'] = d.id + if address == ia["address"] and prefixlen == ia["cidr"]: + packet_conn.call_api(ia["href"], "DELETE") + return_dict["changed"] = True + return_dict["subnet"] = ia + return_dict["device_id"] = d.id return return_dict - raise Exception("If you assign an address, you must specify either " - "target device ID or target unique hostname.") + raise Exception("If you assign an address, you must specify either target device ID or target unique hostname.") - if specified_identifier['device_id'] is not None: - device = packet_conn.get_device(specified_identifier['device_id']) + if specified_identifier["device_id"] is not None: + device = packet_conn.get_device(specified_identifier["device_id"]) else: all_devices = get_existing_devices(module, packet_conn) - hn = specified_identifier['hostname'] + hn = specified_identifier["hostname"] matching_devices = [d for d in all_devices if d.hostname == hn] if len(matching_devices) > 1: raise Exception(f"There are more than one devices matching given hostname {hn}") @@ -257,68 +254,61 @@ def act_on_assignment(target_state, module, packet_conn): raise Exception(f"There is no device matching given hostname {hn}") device = matching_devices[0] - return_dict['device_id'] = device.id - assignment_dicts = [i for i in device.ip_addresses - if i['address'] == address and i['cidr'] == prefixlen] + return_dict["device_id"] = device.id + assignment_dicts = [i for i in device.ip_addresses if i["address"] == address and i["cidr"] == prefixlen] if len(assignment_dicts) > 1: raise Exception(f"IP address {specified_cidr} is assigned more than once for device {device.hostname}") if target_state == "absent": if len(assignment_dicts) == 1: - packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") - return_dict['subnet'] = assignment_dicts[0] - return_dict['changed'] = True + packet_conn.call_api(assignment_dicts[0]["href"], "DELETE") + return_dict["subnet"] = assignment_dicts[0] + return_dict["changed"] = True elif target_state == "present": if len(assignment_dicts) == 0: - new_assignment = packet_conn.call_api( - f"devices/{device.id}/ips", "POST", {"address": f"{specified_cidr}"}) - return_dict['changed'] = True - return_dict['subnet'] = new_assignment + new_assignment = packet_conn.call_api(f"devices/{device.id}/ips", "POST", {"address": f"{specified_cidr}"}) + return_dict["changed"] = True + return_dict["subnet"] = new_assignment return return_dict def main(): module = AnsibleModule( argument_spec=dict( - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - device_id=dict(type='str'), - hostname=dict(type='str'), - project_id=dict(type='str'), - device_count=dict(type='int', default=PROJECT_MAX_DEVICES), - cidr=dict(type='str', required=True, aliases=['name']), - state=dict(choices=ALLOWED_STATES, default='present'), + auth_token=dict(type="str", fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), no_log=True), + device_id=dict(type="str"), + hostname=dict(type="str"), + project_id=dict(type="str"), + device_count=dict(type="int", default=PROJECT_MAX_DEVICES), + cidr=dict(type="str", required=True, aliases=["name"]), + state=dict(choices=ALLOWED_STATES, default="present"), ), supports_check_mode=True, - mutually_exclusive=[('hostname', 'device_id')], - required_one_of=[['hostname', 'device_id', 'project_id']], + mutually_exclusive=[("hostname", "device_id")], + required_one_of=[["hostname", "device_id", "project_id"]], required_by=dict( - hostname=('project_id',), + hostname=("project_id",), ), ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") try: module.exit_json(**act_on_assignment(state, module, packet_conn)) except Exception as e: - module.fail_json( - msg=f"failed to set IP subnet to state {state}, error: {e}") + module.fail_json(msg=f"failed to set IP subnet to state {state}, error: {e}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py index b7ebff3ccbf..4c39cf4ffa1 100644 --- a/plugins/modules/packet_project.py +++ b/plugins/modules/packet_project.py @@ -133,43 +133,34 @@ def act_on_project(target_state, module, packet_conn): - result_dict = {'changed': False} - given_id = module.params.get('id') - given_name = module.params.get('name') + result_dict = {"changed": False} + given_id = module.params.get("id") + given_name = module.params.get("name") if given_id: - matching_projects = [ - p for p in packet_conn.list_projects() if given_id == p.id] + matching_projects = [p for p in packet_conn.list_projects() if given_id == p.id] else: - matching_projects = [ - p for p in packet_conn.list_projects() if given_name == p.name] + matching_projects = [p for p in packet_conn.list_projects() if given_name == p.name] - if target_state == 'present': + if target_state == "present": if len(matching_projects) == 0: - org_id = module.params.get('org_id') - custom_data = module.params.get('custom_data') - payment_method = module.params.get('payment_method') + org_id = module.params.get("org_id") + custom_data = module.params.get("custom_data") + payment_method = module.params.get("payment_method") if not org_id: - params = { - "name": given_name, - "payment_method_id": payment_method, - "customdata": custom_data - } + params = {"name": given_name, "payment_method_id": payment_method, "customdata": custom_data} new_project_data = packet_conn.call_api("projects", "POST", params) new_project = packet.Project(new_project_data, packet_conn) else: new_project = packet_conn.create_organization_project( - org_id=org_id, - name=given_name, - payment_method_id=payment_method, - customdata=custom_data + org_id=org_id, name=given_name, payment_method_id=payment_method, customdata=custom_data ) - result_dict['changed'] = True + result_dict["changed"] = True matching_projects.append(new_project) - result_dict['name'] = matching_projects[0].name - result_dict['id'] = matching_projects[0].id + result_dict["name"] = matching_projects[0].name + result_dict["id"] = matching_projects[0].id else: if len(matching_projects) > 1: _msg = f"More than projects matched for module call with state = absent: {to_native(matching_projects)}" @@ -177,9 +168,9 @@ def act_on_project(target_state, module, packet_conn): if len(matching_projects) == 1: p = matching_projects[0] - result_dict['name'] = p.name - result_dict['id'] = p.id - result_dict['changed'] = True + result_dict["name"] = p.name + result_dict["id"] = p.id + result_dict["changed"] = True try: p.delete() except Exception as e: @@ -191,49 +182,49 @@ def act_on_project(target_state, module, packet_conn): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), - name=dict(type='str'), - id=dict(type='str'), - org_id=dict(type='str'), - payment_method=dict(type='str'), - custom_data=dict(type='str'), + state=dict(choices=["present", "absent"], default="present"), + auth_token=dict(type="str", fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), no_log=True), + name=dict(type="str"), + id=dict(type="str"), + org_id=dict(type="str"), + payment_method=dict(type="str"), + custom_data=dict(type="str"), ), supports_check_mode=True, - required_one_of=[("name", "id",)], + required_one_of=[ + ( + "name", + "id", + ) + ], mutually_exclusive=[ - ('name', 'id'), - ] + ("name", "id"), + ], ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") - if state in ['present', 'absent']: + if state in ["present", "absent"]: if module.check_mode: module.exit_json(changed=False) try: module.exit_json(**act_on_project(state, module, packet_conn)) except Exception as e: - module.fail_json( - msg=f"failed to set project state {state}: {e}") + module.fail_json(msg=f"failed to set project state {state}: {e}") else: module.fail_json(msg=f"{state} is not a valid state for this module") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py index 09adbdc5b7a..a9e221a864e 100644 --- a/plugins/modules/packet_sshkey.py +++ b/plugins/modules/packet_sshkey.py @@ -115,7 +115,7 @@ def serialize_sshkey(sshkey): sshkey_data = {} - copy_keys = ['id', 'key', 'label', 'fingerprint'] + copy_keys = ["id", "key", "label", "fingerprint"] for name in copy_keys: sshkey_data[name] = getattr(sshkey, name) return sshkey_data @@ -132,42 +132,43 @@ def is_valid_uuid(myuuid): def load_key_string(key_str): ret_dict = {} key_str = key_str.strip() - ret_dict['key'] = key_str + ret_dict["key"] = key_str cut_key = key_str.split() if len(cut_key) in [2, 3]: if len(cut_key) == 3: - ret_dict['label'] = cut_key[2] + ret_dict["label"] = cut_key[2] else: raise Exception(f"Public key {key_str} is in wrong format") return ret_dict def get_sshkey_selector(module): - key_id = module.params.get('id') + key_id = module.params.get("id") if key_id: if not is_valid_uuid(key_id): raise Exception(f"sshkey ID {key_id} is not valid UUID") - selecting_fields = ['label', 'fingerprint', 'id', 'key'] + selecting_fields = ["label", "fingerprint", "id", "key"] select_dict = {} for f in selecting_fields: if module.params.get(f) is not None: select_dict[f] = module.params.get(f) - if module.params.get('key_file'): - with open(module.params.get('key_file')) as _file: + if module.params.get("key_file"): + with open(module.params.get("key_file")) as _file: loaded_key = load_key_string(_file.read()) - select_dict['key'] = loaded_key['key'] - if module.params.get('label') is None: - if loaded_key.get('label'): - select_dict['label'] = loaded_key['label'] + select_dict["key"] = loaded_key["key"] + if module.params.get("label") is None: + if loaded_key.get("label"): + select_dict["label"] = loaded_key["label"] def selector(k): - if 'key' in select_dict: + if "key" in select_dict: # if key string is specified, compare only the key strings - return k.key == select_dict['key'] + return k.key == select_dict["key"] else: # if key string not specified, all the fields must match return all(select_dict[f] == getattr(k, f) for f in select_dict) + return selector @@ -176,27 +177,28 @@ def act_on_sshkeys(target_state, module, packet_conn): existing_sshkeys = packet_conn.list_ssh_keys() matching_sshkeys = list(filter(selector, existing_sshkeys)) changed = False - if target_state == 'present': + if target_state == "present": if matching_sshkeys == []: # there is no key matching the fields from module call # => create the key, label and newkey = {} - if module.params.get('key_file'): - with open(module.params.get('key_file')) as f: + if module.params.get("key_file"): + with open(module.params.get("key_file")) as f: newkey = load_key_string(f.read()) - if module.params.get('key'): - newkey = load_key_string(module.params.get('key')) - if module.params.get('label'): - newkey['label'] = module.params.get('label') - for param in ('label', 'key'): + if module.params.get("key"): + newkey = load_key_string(module.params.get("key")) + if module.params.get("label"): + newkey["label"] = module.params.get("label") + for param in ("label", "key"): if param not in newkey: - _msg = ("If you want to ensure a key is present, you must " - "supply both a label and a key string, either in " - f"module params, or in a key file. {param} is missing") + _msg = ( + "If you want to ensure a key is present, you must " + "supply both a label and a key string, either in " + f"module params, or in a key file. {param} is missing" + ) raise Exception(_msg) matching_sshkeys = [] - new_key_response = packet_conn.create_ssh_key( - newkey['label'], newkey['key']) + new_key_response = packet_conn.create_ssh_key(newkey["label"], newkey["key"]) changed = True matching_sshkeys.append(new_key_response) @@ -210,55 +212,51 @@ def act_on_sshkeys(target_state, module, packet_conn): _msg = f"while trying to remove sshkey {k.label}, id {k.id} {target_state}, got error: {e}" raise Exception(_msg) - return { - 'changed': changed, - 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] - } + return {"changed": changed, "sshkeys": [serialize_sshkey(k) for k in matching_sshkeys]} def main(): module = AnsibleModule( argument_spec=dict( - state=dict(choices=['present', 'absent'], default='present'), - auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), - no_log=True), - label=dict(type='str', aliases=['name']), - id=dict(type='str'), - fingerprint=dict(type='str'), - key=dict(type='str', no_log=True), - key_file=dict(type='path'), + state=dict(choices=["present", "absent"], default="present"), + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), no_log=True), + label=dict(type="str", aliases=["name"]), + id=dict(type="str"), + fingerprint=dict(type="str"), + key=dict(type="str", no_log=True), + key_file=dict(type="path"), ), mutually_exclusive=[ - ('label', 'id'), - ('label', 'fingerprint'), - ('id', 'fingerprint'), - ('key', 'fingerprint'), - ('key', 'id'), - ('key_file', 'key'), - ] + ("label", "id"), + ("label", "fingerprint"), + ("id", "fingerprint"), + ("key", "fingerprint"), + ("key", "id"), + ("key_file", "key"), + ], ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") - if state in ['present', 'absent']: + if state in ["present", "absent"]: try: module.exit_json(**act_on_sshkeys(state, module, packet_conn)) except Exception as e: - module.fail_json(msg=f'failed to set sshkey state: {e}') + module.fail_json(msg=f"failed to set sshkey state: {e}") else: - module.fail_json(msg=f'{state} is not a valid state for this module') + module.fail_json(msg=f"{state} is not a valid state for this module") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_volume.py b/plugins/modules/packet_volume.py index 40308edf31c..e49f4af37e7 100644 --- a/plugins/modules/packet_volume.py +++ b/plugins/modules/packet_volume.py @@ -198,17 +198,17 @@ def is_valid_uuid(myuuid): def get_volume_selector(module): - if module.params.get('id'): - i = module.params.get('id') + if module.params.get("id"): + i = module.params.get("id") if not is_valid_uuid(i): raise Exception(f"Volume ID '{i}' is not a valid UUID") - return lambda v: v['id'] == i - elif module.params.get('name'): - n = module.params.get('name') - return lambda v: v['name'] == n - elif module.params.get('description'): - d = module.params.get('description') - return lambda v: v['description'] == d + return lambda v: v["id"] == i + elif module.params.get("name"): + n = module.params.get("name") + return lambda v: v["name"] == n + elif module.params.get("description"): + d = module.params.get("description") + return lambda v: v["description"] == d def get_or_fail(params, key): @@ -219,11 +219,11 @@ def get_or_fail(params, key): def act_on_volume(target_state, module, packet_conn): - return_dict = {'changed': False} + return_dict = {"changed": False} s = get_volume_selector(module) project_id = module.params.get("project_id") api_method = f"projects/{project_id}/storage" - all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] + all_volumes = packet_conn.call_api(api_method, "GET")["volumes"] matching_volumes = [v for v in all_volumes if s(v)] if target_state == "present": @@ -239,12 +239,12 @@ def act_on_volume(target_state, module, packet_conn): } new_volume_data = packet_conn.call_api(api_method, "POST", params) - return_dict['changed'] = True - for k in ['id', 'name', 'description']: + return_dict["changed"] = True + for k in ["id", "name", "description"]: return_dict[k] = new_volume_data[k] else: - for k in ['id', 'name', 'description']: + for k in ["id", "name", "description"]: return_dict[k] = matching_volumes[0][k] else: @@ -255,8 +255,8 @@ def act_on_volume(target_state, module, packet_conn): if len(matching_volumes) == 1: volume = matching_volumes[0] packet_conn.call_api(f"storage/{volume['id']}", "DELETE") - return_dict['changed'] = True - for k in ['id', 'name', 'description']: + return_dict["changed"] = True + for k in ["id", "name", "description"]: return_dict[k] = volume[k] return return_dict @@ -265,44 +265,40 @@ def act_on_volume(target_state, module, packet_conn): def main(): module = AnsibleModule( argument_spec=dict( - id=dict(type='str'), + id=dict(type="str"), description=dict(type="str"), - name=dict(type='str'), + name=dict(type="str"), state=dict(choices=VOLUME_STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), + auth_token=dict(type="str", fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), no_log=True), project_id=dict(required=True), plan=dict(choices=VOLUME_PLANS, default="storage_1"), facility=dict(type="str"), size=dict(type="int"), locked=dict(type="bool", default=False), - snapshot_policy=dict(type='dict'), - billing_cycle=dict(type='str', choices=BILLING, default="hourly"), + snapshot_policy=dict(type="dict"), + billing_cycle=dict(type="str", choices=BILLING, default="hourly"), ), supports_check_mode=True, required_one_of=[("name", "id", "description")], mutually_exclusive=[ - ('name', 'id'), - ('id', 'description'), - ('name', 'description'), - ] + ("name", "id"), + ("id", "description"), + ("name", "description"), + ], ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") if state in VOLUME_STATES: if module.check_mode: @@ -311,11 +307,10 @@ def main(): try: module.exit_json(**act_on_volume(state, module, packet_conn)) except Exception as e: - module.fail_json( - msg=f"failed to set volume state {state}: {to_native(e)}") + module.fail_json(msg=f"failed to set volume state {state}: {to_native(e)}") else: module.fail_json(msg=f"{state} is not a valid state for this module") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py index 0d1872c72fd..29f96a7eb90 100644 --- a/plugins/modules/packet_volume_attachment.py +++ b/plugins/modules/packet_volume_attachment.py @@ -158,32 +158,30 @@ def is_valid_uuid(myuuid): def get_volume_selector(spec): if is_valid_uuid(spec): - return lambda v: v['id'] == spec + return lambda v: v["id"] == spec else: - return lambda v: v['name'] == spec or v['description'] == spec + return lambda v: v["name"] == spec or v["description"] == spec def get_device_selector(spec): if is_valid_uuid(spec): - return lambda v: v['id'] == spec + return lambda v: v["id"] == spec else: - return lambda v: v['hostname'] == spec + return lambda v: v["hostname"] == spec def do_attach(packet_conn, vol_id, dev_id): api_method = f"storage/{vol_id}/attachments" - packet_conn.call_api( - api_method, - params={"device_id": dev_id}, - type="POST") + packet_conn.call_api(api_method, params={"device_id": dev_id}, type="POST") def do_detach(packet_conn, vol, dev_id=None): def dev_match(a): - return (dev_id is None) or (a['device']['id'] == dev_id) - for a in vol['attachments']: + return (dev_id is None) or (a["device"]["id"] == dev_id) + + for a in vol["attachments"]: if dev_match(a): - packet_conn.call_api(a['href'], type="DELETE") + packet_conn.call_api(a["href"], type="DELETE") def validate_selected(l, resource_type, spec): @@ -196,45 +194,44 @@ def validate_selected(l, resource_type, spec): def get_attached_dev_ids(volume_dict): - if len(volume_dict['attachments']) == 0: + if len(volume_dict["attachments"]) == 0: return [] else: - return [a['device']['id'] for a in volume_dict['attachments']] + return [a["device"]["id"] for a in volume_dict["attachments"]] def act_on_volume_attachment(target_state, module, packet_conn): - return_dict = {'changed': False} + return_dict = {"changed": False} volspec = module.params.get("volume") devspec = module.params.get("device") - if devspec is None and target_state == 'present': + if devspec is None and target_state == "present": raise Exception("If you want to attach a volume, you must specify a device.") project_id = module.params.get("project_id") volumes_api_method = f"projects/{project_id}/storage" - volumes = packet_conn.call_api(volumes_api_method, - params={'include': 'facility,attachments.device'})['volumes'] + volumes = packet_conn.call_api(volumes_api_method, params={"include": "facility,attachments.device"})["volumes"] v_match = get_volume_selector(volspec) matching_volumes = [v for v in volumes if v_match(v)] validate_selected(matching_volumes, "volume", volspec) volume = matching_volumes[0] - return_dict['volume_id'] = volume['id'] + return_dict["volume_id"] = volume["id"] device = None if devspec is not None: devices_api_method = f"projects/{project_id}/devices" - devices = packet_conn.call_api(devices_api_method)['devices'] + devices = packet_conn.call_api(devices_api_method)["devices"] d_match = get_device_selector(devspec) matching_devices = [d for d in devices if d_match(d)] validate_selected(matching_devices, "device", devspec) device = matching_devices[0] - return_dict['device_id'] = device['id'] + return_dict["device_id"] = device["id"] attached_device_ids = get_attached_dev_ids(volume) if target_state == "present": if len(attached_device_ids) == 0: - do_attach(packet_conn, volume['id'], device['id']) - return_dict['changed'] = True - elif device['id'] not in attached_device_ids: + do_attach(packet_conn, volume["id"], device["id"]) + return_dict["changed"] = True + elif device["id"] not in attached_device_ids: # Don't reattach volume which is attached to a different device. # Rather fail than force remove a device on state == 'present'. raise Exception(f"volume {volume} is already attached to device {attached_device_ids}") @@ -242,10 +239,10 @@ def act_on_volume_attachment(target_state, module, packet_conn): if device is None: if len(attached_device_ids) > 0: do_detach(packet_conn, volume) - return_dict['changed'] = True - elif device['id'] in attached_device_ids: - do_detach(packet_conn, volume, device['id']) - return_dict['changed'] = True + return_dict["changed"] = True + elif device["id"] in attached_device_ids: + do_detach(packet_conn, volume, device["id"]) + return_dict["changed"] = True return return_dict @@ -254,11 +251,7 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(choices=STATES, default="present"), - auth_token=dict( - type='str', - fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), - no_log=True - ), + auth_token=dict(type="str", fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), no_log=True), volume=dict(type="str", required=True), project_id=dict(type="str", required=True), device=dict(type="str"), @@ -267,31 +260,29 @@ def main(): ) if not HAS_PACKET_SDK: - module.fail_json(msg='packet required for this module') + module.fail_json(msg="packet required for this module") - if not module.params.get('auth_token'): + if not module.params.get("auth_token"): _fail_msg = f"if Packet API token is not in environment variable {PACKET_API_TOKEN_ENV_VAR}, the auth_token parameter is required" module.fail_json(msg=_fail_msg) - auth_token = module.params.get('auth_token') + auth_token = module.params.get("auth_token") packet_conn = packet.Manager(auth_token=auth_token) - state = module.params.get('state') + state = module.params.get("state") if state in STATES: if module.check_mode: module.exit_json(changed=False) try: - module.exit_json( - **act_on_volume_attachment(state, module, packet_conn)) + module.exit_json(**act_on_volume_attachment(state, module, packet_conn)) except Exception as e: - module.fail_json( - msg=f"failed to set volume_attachment state {state}: {e}") + module.fail_json(msg=f"failed to set volume_attachment state {state}: {e}") else: module.fail_json(msg=f"{state} is not a valid state for this module") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py index fffdc1c69c7..ab21861c294 100644 --- a/plugins/modules/pacman.py +++ b/plugins/modules/pacman.py @@ -344,7 +344,7 @@ def run(self): self.success() # This happens if an empty list has been provided for name - self.add_exit_infos(msg='Nothing to do') + self.add_exit_infos(msg="Nothing to do") self.success() def install_packages(self, pkgs): @@ -399,11 +399,13 @@ def _build_install_diff(pacman_verb, pkglist): for p in name_ver: # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that. # When installing from URLs, pacman can also output a 'nothing to do' message. strip that too. - if "loading packages" in p or "there is nothing to do" in p or 'Avoid running' in p: + if "loading packages" in p or "there is nothing to do" in p or "Avoid running" in p: continue name, version = p.split() if name in self.inventory["installed_pkgs"]: - before.append(f"{name}-{self.inventory['installed_pkgs'][name]}-{self.inventory['pkg_reasons'][name]}") + before.append( + f"{name}-{self.inventory['installed_pkgs'][name]}-{self.inventory['pkg_reasons'][name]}" + ) if name in pkgs_to_set_reason: after.append(f"{name}-{version}-{self.m.params['reason']}") elif name in self.inventory["pkg_reasons"]: @@ -437,8 +439,8 @@ def _build_install_diff(pacman_verb, pkglist): self.changed = True - _before_joined = '\n'.join(sorted(before)) - _after_joined = '\n'.join(sorted(after)) + _before_joined = "\n".join(sorted(before)) + _after_joined = "\n".join(sorted(after)) self.exit_params["diff"] = { "before": f"{_before_joined}\n" if before else "", "after": f"{_after_joined}\n" if after else "", @@ -511,7 +513,7 @@ def remove_packages(self, pkgs): removed_pkgs = stdout.split() self.exit_params["packages"] = removed_pkgs - _remove_pkgs_joined = '\n'.join(removed_pkgs) + _remove_pkgs_joined = "\n".join(removed_pkgs) self.exit_params["diff"] = { "before": f"{_remove_pkgs_joined}\n", # trailing \n to avoid diff complaints "after": "", @@ -550,9 +552,7 @@ def upgrade(self): self.exit_params["packages"] = self.inventory["upgradable_pkgs"].keys() if self.m.check_mode: - self.add_exit_infos( - f"{len(self.inventory['upgradable_pkgs'])} packages would have been upgraded" - ) + self.add_exit_infos(f"{len(self.inventory['upgradable_pkgs'])} packages would have been upgraded") else: cmd = [ self.pacman_path, @@ -573,7 +573,7 @@ def upgrade(self): def _list_database(self): """runs pacman --sync --list with some caching""" if self._cached_database is None: - dummy, packages, dummy = self.m.run_command([self.pacman_path, '--sync', '--list'], check_rc=True) + dummy, packages, dummy = self.m.run_command([self.pacman_path, "--sync", "--list"], check_rc=True) self._cached_database = packages.splitlines() return self._cached_database @@ -690,7 +690,7 @@ def _build_inventory(self): installed_pkgs = {} dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True) # Format of a line: "pacman 6.0.1-2" - query_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + query_re = re.compile(r"^\s*(?P\S+)\s+(?P\S+)\s*$") for l in stdout.splitlines(): query_match = query_re.match(l) if not query_match: @@ -699,14 +699,12 @@ def _build_inventory(self): installed_pkgs[pkg] = ver installed_groups = defaultdict(set) - dummy, stdout, dummy = self.m.run_command( - [self.pacman_path, "--query", "--groups"], check_rc=True - ) + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--groups"], check_rc=True) # Format of lines: # base-devel file # base-devel findutils # ... - query_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + query_groups_re = re.compile(r"^\s*(?P\S+)\s+(?P\S+)\s*$") for l in stdout.splitlines(): query_groups_match = query_groups_re.match(l) if not query_groups_match: @@ -725,15 +723,13 @@ def _build_inventory(self): available_pkgs[pkg] = ver available_groups = defaultdict(set) - dummy, stdout, dummy = self.m.run_command( - [self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True - ) + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True) # Format of lines: # vim-plugins vim-airline # vim-plugins vim-airline-themes # vim-plugins vim-ale # ... - sync_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') + sync_groups_re = re.compile(r"^\s*(?P\S+)\s+(?P\S+)\s*$") for l in stdout.splitlines(): sync_groups_match = sync_groups_re.match(l) if not sync_groups_match: @@ -742,9 +738,7 @@ def _build_inventory(self): available_groups[group].add(pkg) upgradable_pkgs = {} - rc, stdout, stderr = self.m.run_command( - [self.pacman_path, "--query", "--upgrades"], check_rc=False - ) + rc, stdout, stderr = self.m.run_command([self.pacman_path, "--query", "--upgrades"], check_rc=False) stdout = stdout.splitlines() if stdout and "Avoid running" in stdout[0]: @@ -843,7 +837,6 @@ def setup_module(): def main(): - Pacman(setup_module()).run() diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py index 75555a5dda4..7f5f8237566 100644 --- a/plugins/modules/pacman_key.py +++ b/plugins/modules/pacman_key.py @@ -137,7 +137,7 @@ class GpgListResult: """Wraps gpg --list-* output.""" def __init__(self, line): - self._parts = line.split(':') + self._parts = line.split(":") @property def kind(self): @@ -149,7 +149,7 @@ def valid(self): @property def is_fully_valid(self): - return self.valid == 'f' + return self.valid == "f" @property def key(self): @@ -180,20 +180,20 @@ class PacmanKey: def __init__(self, module): self.module = module # obtain binary paths for gpg & pacman-key - self.gpg_binary = module.get_bin_path('gpg', required=True) - self.pacman_key_binary = module.get_bin_path('pacman-key', required=True) + self.gpg_binary = module.get_bin_path("gpg", required=True) + self.pacman_key_binary = module.get_bin_path("pacman-key", required=True) # obtain module parameters - keyid = module.params['id'] - url = module.params['url'] - data = module.params['data'] - file = module.params['file'] - keyserver = module.params['keyserver'] - verify = module.params['verify'] - force_update = module.params['force_update'] - keyring = module.params['keyring'] - state = module.params['state'] - ensure_trusted = module.params['ensure_trusted'] + keyid = module.params["id"] + url = module.params["url"] + data = module.params["data"] + file = module.params["file"] + keyserver = module.params["keyserver"] + verify = module.params["verify"] + force_update = module.params["force_update"] + keyring = module.params["keyring"] + state = module.params["state"] + ensure_trusted = module.params["ensure_trusted"] self.keylength = 40 # sanitise key ID & check if key exists in the keyring @@ -204,15 +204,15 @@ def __init__(self, module): # check mode if module.check_mode: - if state == 'present': + if state == "present": changed = (key_present and force_update) or not key_present if not changed and ensure_trusted: changed = not (key_valid and self.key_is_trusted(keyring, keyid)) module.exit_json(changed=changed) - if state == 'absent': + if state == "absent": module.exit_json(changed=key_present) - if state == 'present': + if state == "present": trusted = key_valid and self.key_is_trusted(keyring, keyid) if not force_update and key_present and (not ensure_trusted or trusted): module.exit_json(changed=False) @@ -236,7 +236,7 @@ def __init__(self, module): self.lsign_key(keyring=keyring, keyid=keyid) changed = True module.exit_json(changed=changed) - elif state == 'absent': + elif state == "absent": if key_present: self.remove_key(keyring, keyid) module.exit_json(changed=True) @@ -245,18 +245,16 @@ def __init__(self, module): def gpg(self, args, keyring=None, **kwargs): cmd = [self.gpg_binary] if keyring: - cmd.append(f'--homedir={keyring}') - cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty']) + cmd.append(f"--homedir={keyring}") + cmd.extend(["--no-permission-warning", "--with-colons", "--quiet", "--batch", "--no-tty"]) return self.module.run_command(cmd + args, **kwargs) def pacman_key(self, args, keyring, **kwargs): - return self.module.run_command( - [self.pacman_key_binary, '--gpgdir', keyring] + args, - **kwargs) + return self.module.run_command([self.pacman_key_binary, "--gpgdir", keyring] + args, **kwargs) def pacman_machine_key(self, keyring): - unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring) - return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key') + unused_rc, stdout, unused_stderr = self.gpg(["--list-secret-key"], keyring=keyring) + return gpg_get_first_attr_of_kind(stdout.splitlines(), "sec", "key") def is_hexadecimal(self, string): """Check if a given string is valid hexadecimal""" @@ -271,7 +269,7 @@ def sanitise_keyid(self, keyid): Strips whitespace, uppercases all characters, and strips leading `0X`. """ - sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + sanitised_keyid = keyid.strip().upper().replace(" ", "").replace("0X", "") if len(sanitised_keyid) != self.keylength: self.module.fail_json(msg=f"key ID is not full-length: {sanitised_keyid}") if not self.is_hexadecimal(sanitised_keyid): @@ -281,17 +279,17 @@ def sanitise_keyid(self, keyid): def fetch_key(self, url): """Downloads a key from url""" response, info = fetch_url(self.module, url) - if info['status'] != 200: + if info["status"] != 200: self.module.fail_json(msg=f"failed to fetch key at {url}, error was {info['msg']}") return to_native(response.read()) def recv_key(self, keyring, keyid, keyserver): """Receives key via keyserver""" - self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True) + self.pacman_key(["--keyserver", keyserver, "--recv-keys", keyid], keyring=keyring, check_rc=True) def lsign_key(self, keyring, keyid): """Locally sign key""" - self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True) + self.pacman_key(["--lsign-key", keyid], keyring=keyring, check_rc=True) def save_key(self, data): "Saves key data to a temporary file" @@ -306,11 +304,11 @@ def add_key(self, keyring, keyfile, keyid, verify): """Add key to pacman's keyring""" if verify: self.verify_keyfile(keyfile, keyid) - self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True) + self.pacman_key(["--add", keyfile], keyring=keyring, check_rc=True) def remove_key(self, keyring, keyid): """Remove key from pacman's keyring""" - self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True) + self.pacman_key(["--delete", keyid], keyring=keyring, check_rc=True) def verify_keyfile(self, keyfile, keyid): """Verify that keyfile matches the specified key ID""" @@ -320,50 +318,50 @@ def verify_keyfile(self, keyfile, keyid): self.module.fail_json(msg="expected a key ID, got none") rc, stdout, stderr = self.gpg( - ['--with-fingerprint', '--show-keys', keyfile], + ["--with-fingerprint", "--show-keys", keyfile], check_rc=True, ) - extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id') + extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), "fpr", "user_id") if extracted_keyid != keyid: self.module.fail_json(msg=f"key ID does not match. expected {keyid}, got {extracted_keyid}") def key_validity(self, keyring, keyid): "Check if the key ID is in pacman's keyring and not expired" - rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False) + rc, stdout, stderr = self.gpg(["--no-default-keyring", "--list-keys", keyid], keyring=keyring, check_rc=False) if rc != 0: if stderr.find("No public key") >= 0: return [] else: self.module.fail_json(msg=f"gpg returned an error: {stderr}") - return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid') + return gpg_get_all_attrs_of_kind(stdout.splitlines(), "uid", "is_fully_valid") def key_is_trusted(self, keyring, keyid): """Check if key is signed and not expired.""" - unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring) - return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key') + unused_rc, stdout, unused_stderr = self.gpg(["--check-signatures", keyid], keyring=keyring) + return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), "sig", "key") def main(): module = AnsibleModule( argument_spec=dict( - id=dict(type='str', required=True), - data=dict(type='str'), - file=dict(type='path'), - url=dict(type='str'), - keyserver=dict(type='str'), - verify=dict(type='bool', default=True), - force_update=dict(type='bool', default=False), - keyring=dict(type='path', default='/etc/pacman.d/gnupg'), - ensure_trusted=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), + id=dict(type="str", required=True), + data=dict(type="str"), + file=dict(type="path"), + url=dict(type="str"), + keyserver=dict(type="str"), + verify=dict(type="bool", default=True), + force_update=dict(type="bool", default=False), + keyring=dict(type="path", default="/etc/pacman.d/gnupg"), + ensure_trusted=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), ), supports_check_mode=True, - mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), - required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + mutually_exclusive=(("data", "file", "url", "keyserver"),), + required_if=[("state", "present", ("data", "file", "url", "keyserver"), True)], ) PacmanKey(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py index c6439053e61..75ba60f0e34 100644 --- a/plugins/modules/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -160,9 +160,9 @@ def __init__(self, module, name, user, token): self.user = user self.token = token self.headers = { - 'Content-Type': 'application/json', + "Content-Type": "application/json", "Authorization": self._auth_header(), - 'Accept': 'application/vnd.pagerduty+json;version=2' + "Accept": "application/vnd.pagerduty+json;version=2", } def ongoing(self, http_call=fetch_url): @@ -170,7 +170,7 @@ def ongoing(self, http_call=fetch_url): headers = dict(self.headers) response, info = http_call(self.module, url, headers=headers) - if info['status'] != 200: + if info["status"] != 200: self.module.fail_json(msg=f"failed to lookup the ongoing window: {info['msg']}") json_out = self._read_response(response) @@ -181,19 +181,21 @@ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_ur if not requester_id: self.module.fail_json(msg="requester_id is required when maintenance window should be created") - url = 'https://api.pagerduty.com/maintenance_windows' + url = "https://api.pagerduty.com/maintenance_windows" headers = dict(self.headers) - headers.update({'From': requester_id}) + headers.update({"From": requester_id}) start, end = self._compute_start_end_time(hours, minutes) services = self._create_services_payload(service) - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} + request_data = { + "maintenance_window": {"start_time": start, "end_time": end, "description": desc, "services": services} + } data = json.dumps(request_data) - response, info = http_call(self.module, url, data=data, headers=headers, method='POST') - if info['status'] != 201: + response, info = http_call(self.module, url, data=data, headers=headers, method="POST") + if info["status"] != 201: self.module.fail_json(msg=f"failed to create the window: {info['msg']}") json_out = self._read_response(response) @@ -202,9 +204,9 @@ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_ur def _create_services_payload(self, service): if isinstance(service, list): - return [{'id': s, 'type': 'service_reference'} for s in service] + return [{"id": s, "type": "service_reference"} for s in service] else: - return [{'id': service, 'type': 'service_reference'}] + return [{"id": service, "type": "service_reference"}] def _compute_start_end_time(self, hours, minutes): now_t = now() @@ -217,8 +219,8 @@ def absent(self, window_id, http_call=fetch_url): url = f"https://api.pagerduty.com/maintenance_windows/{window_id}" headers = dict(self.headers) - response, info = http_call(self.module, url, headers=headers, method='DELETE') - if info['status'] != 204: + response, info = http_call(self.module, url, headers=headers, method="DELETE") + if info["status"] != 204: self.module.fail_json(msg=f"failed to delete the window: {info['msg']}") json_out = self._read_response(response) @@ -236,33 +238,32 @@ def _read_response(self, response): def main(): - module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), + state=dict(required=True, choices=["running", "started", "ongoing", "absent"]), name=dict(), user=dict(), token=dict(required=True, no_log=True), - service=dict(type='list', elements='str', aliases=["services"]), + service=dict(type="list", elements="str", aliases=["services"]), window_id=dict(), requester_id=dict(), - hours=dict(default='1'), # @TODO change to int? - minutes=dict(default='0'), # @TODO change to int? - desc=dict(default='Created by Ansible'), - validate_certs=dict(default=True, type='bool'), + hours=dict(default="1"), # @TODO change to int? + minutes=dict(default="0"), # @TODO change to int? + desc=dict(default="Created by Ansible"), + validate_certs=dict(default=True, type="bool"), ) ) - state = module.params['state'] - name = module.params['name'] - user = module.params['user'] - service = module.params['service'] - window_id = module.params['window_id'] - hours = module.params['hours'] - minutes = module.params['minutes'] - token = module.params['token'] - desc = module.params['desc'] - requester_id = module.params['requester_id'] + state = module.params["state"] + name = module.params["name"] + user = module.params["user"] + service = module.params["service"] + window_id = module.params["window_id"] + hours = module.params["hours"] + minutes = module.params["minutes"] + token = module.params["token"] + desc = module.params["desc"] + requester_id = module.params["requester_id"] pd = PagerDutyRequest(module, name, user, token) @@ -285,5 +286,5 @@ def main(): module.exit_json(msg="success", result=out, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py index 4b08d2587d0..53bd06284b6 100644 --- a/plugins/modules/pagerduty_alert.py +++ b/plugins/modules/pagerduty_alert.py @@ -223,36 +223,32 @@ def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): - url = 'https://api.pagerduty.com/incidents' + url = "https://api.pagerduty.com/incidents" headers = { "Content-type": "application/json", "Authorization": f"Token token={api_key}", - 'Accept': 'application/vnd.pagerduty+json;version=2' + "Accept": "application/vnd.pagerduty+json;version=2", } - params = { - 'service_ids[]': service_id, - 'sort_by': 'incident_number:desc', - 'time_zone': 'UTC' - } + params = {"service_ids[]": service_id, "sort_by": "incident_number:desc", "time_zone": "UTC"} if incident_key: - params['incident_key'] = incident_key + params["incident_key"] = incident_key url_parts = list(urlparse(url)) url_parts[4] = urlencode(params, True) url = urlunparse(url_parts) - response, info = http_call(module, url, method='get', headers=headers) + response, info = http_call(module, url, method="get", headers=headers) - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"failed to check current incident status.Reason: {info['msg']}") incidents = json.loads(response.read())["incidents"] msg = "No corresponding incident" if len(incidents) == 0: - if state in ('acknowledged', 'resolved'): + if state in ("acknowledged", "resolved"): return msg, False return msg, True elif state != incidents[0]["status"]: @@ -261,12 +257,9 @@ def check(module, name, state, service_id, integration_key, api_key, incident_ke return incidents[0], False -def send_event_v1(module, service_key, event_type, desc, - incident_key=None, client=None, client_url=None): +def send_event_v1(module, service_key, event_type, desc, incident_key=None, client=None, client_url=None): url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" - headers = { - "Content-type": "application/json" - } + headers = {"Content-type": "application/json"} data = { "service_key": service_key, @@ -274,23 +267,19 @@ def send_event_v1(module, service_key, event_type, desc, "incident_key": incident_key, "description": desc, "client": client, - "client_url": client_url + "client_url": client_url, } - response, info = fetch_url(module, url, method='post', - headers=headers, data=json.dumps(data)) - if info['status'] != 200: + response, info = fetch_url(module, url, method="post", headers=headers, data=json.dumps(data)) + if info["status"] != 200: module.fail_json(msg=f"failed to {event_type}. Reason: {info['msg']}") json_out = json.loads(response.read()) return json_out -def send_event_v2(module, service_key, event_type, payload, link, - incident_key=None, client=None, client_url=None): +def send_event_v2(module, service_key, event_type, payload, link, incident_key=None, client=None, client_url=None): url = "https://events.pagerduty.com/v2/enqueue" - headers = { - "Content-type": "application/json" - } + headers = {"Content-type": "application/json"} data = { "routing_key": service_key, "event_action": event_type, @@ -304,8 +293,7 @@ def send_event_v2(module, service_key, event_type, payload, link, data["dedup_key"] = incident_key if event_type != "trigger": data.pop("payload") - response, info = fetch_url(module, url, method="post", - headers=headers, data=json.dumps(data)) + response, info = fetch_url(module, url, method="post", headers=headers, data=json.dumps(data)) if info["status"] != 202: module.fail_json(msg=f"failed to {event_type}. Reason: {info['msg']}") json_out = json.loads(response.read()) @@ -320,80 +308,71 @@ def main(): integration_key=dict(no_log=True), service_id=dict(), service_key=dict(no_log=True), - state=dict( - required=True, choices=['triggered', 'acknowledged', 'resolved'] - ), - api_version=dict(type='str', default='v1', choices=['v1', 'v2']), + state=dict(required=True, choices=["triggered", "acknowledged", "resolved"]), + api_version=dict(type="str", default="v1", choices=["v1", "v2"]), client=dict(), client_url=dict(), component=dict(), - custom_details=dict(type='dict'), - desc=dict(default='Created via Ansible'), + custom_details=dict(type="dict"), + desc=dict(default="Created via Ansible"), incident_class=dict(), incident_key=dict(no_log=False), link_url=dict(), link_text=dict(), source=dict(), - severity=dict( - default='critical', choices=['critical', 'warning', 'error', 'info'] - ), + severity=dict(default="critical", choices=["critical", "warning", "error", "info"]), ), required_if=[ - ('api_version', 'v1', ['service_id', 'api_key']), - ('state', 'acknowledged', ['incident_key']), - ('state', 'resolved', ['incident_key']), + ("api_version", "v1", ["service_id", "api_key"]), + ("state", "acknowledged", ["incident_key"]), + ("state", "resolved", ["incident_key"]), ], - required_one_of=[('service_key', 'integration_key')], + required_one_of=[("service_key", "integration_key")], supports_check_mode=True, ) - name = module.params['name'] - service_id = module.params.get('service_id') - integration_key = module.params.get('integration_key') - service_key = module.params.get('service_key') - api_key = module.params.get('api_key') - state = module.params.get('state') - client = module.params.get('client') - client_url = module.params.get('client_url') - desc = module.params.get('desc') - incident_key = module.params.get('incident_key') + name = module.params["name"] + service_id = module.params.get("service_id") + integration_key = module.params.get("integration_key") + service_key = module.params.get("service_key") + api_key = module.params.get("api_key") + state = module.params.get("state") + client = module.params.get("client") + client_url = module.params.get("client_url") + desc = module.params.get("desc") + incident_key = module.params.get("incident_key") payload = { - 'summary': desc, - 'source': module.params.get('source'), - 'timestamp': datetime.now().isoformat(), - 'severity': module.params.get('severity'), - 'component': module.params.get('component'), - 'class': module.params.get('incident_class'), - 'custom_details': module.params.get('custom_details'), + "summary": desc, + "source": module.params.get("source"), + "timestamp": datetime.now().isoformat(), + "severity": module.params.get("severity"), + "component": module.params.get("component"), + "class": module.params.get("incident_class"), + "custom_details": module.params.get("custom_details"), } link = {} - if module.params.get('link_url'): - link['href'] = module.params.get('link_url') - if module.params.get('link_text'): - link['text'] = module.params.get('link_text') + if module.params.get("link_url"): + link["href"] = module.params.get("link_url") + if module.params.get("link_text"): + link["text"] = module.params.get("link_text") if integration_key is None: integration_key = service_key - module.warn( - '"service_key" is obsolete parameter and will be removed.' - ' Please, use "integration_key" instead' - ) + module.warn('"service_key" is obsolete parameter and will be removed. Please, use "integration_key" instead') state_event_dict = { - 'triggered': 'trigger', - 'acknowledged': 'acknowledge', - 'resolved': 'resolve', + "triggered": "trigger", + "acknowledged": "acknowledge", + "resolved": "resolve", } event_type = state_event_dict[state] - if module.params.get('api_version') == 'v1': - out, changed = check(module, name, state, service_id, - integration_key, api_key, incident_key) + if module.params.get("api_version") == "v1": + out, changed = check(module, name, state, service_id, integration_key, api_key, incident_key) if not module.check_mode and changed is True: - out = send_event_v1(module, integration_key, event_type, desc, - incident_key, client, client_url) + out = send_event_v1(module, integration_key, event_type, desc, incident_key, client, client_url) else: changed = True - if event_type == 'trigger' and not payload['source']: + if event_type == "trigger" and not payload["source"]: module.fail_json(msg='"service" is a required variable for v2 api endpoint.') out, changed = send_event_v2( module, @@ -409,5 +388,5 @@ def main(): module.exit_json(result=out, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py index ff5792bbc5a..8447821a51f 100644 --- a/plugins/modules/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -117,83 +117,76 @@ def main(): module = AnsibleModule( argument_spec=dict( - integration_key=dict(required=True, type='str', no_log=True), - summary=dict(required=True, type='str'), - source=dict(default='Ansible', type='str'), - user=dict(type='str'), - repo=dict(type='str'), - revision=dict(type='str'), - environment=dict(type='str'), - link_url=dict(type='str'), - link_text=dict(type='str'), - url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'), - validate_certs=dict(default=True, type='bool') + integration_key=dict(required=True, type="str", no_log=True), + summary=dict(required=True, type="str"), + source=dict(default="Ansible", type="str"), + user=dict(type="str"), + repo=dict(type="str"), + revision=dict(type="str"), + environment=dict(type="str"), + link_url=dict(type="str"), + link_text=dict(type="str"), + url=dict(default="https://events.pagerduty.com/v2/change/enqueue", type="str"), + validate_certs=dict(default=True, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/ - url = module.params['url'] - headers = {'Content-Type': 'application/json'} + url = module.params["url"] + headers = {"Content-Type": "application/json"} if module.check_mode: - _response, info = fetch_url( - module, url, headers=headers, method='POST') + _response, info = fetch_url(module, url, headers=headers, method="POST") - if info['status'] == 400: + if info["status"] == 400: module.exit_json(changed=True) else: module.fail_json( - msg=f"Checking the PagerDuty change event API returned an unexpected response: {info['status']}") + msg=f"Checking the PagerDuty change event API returned an unexpected response: {info['status']}" + ) custom_details = {} - if module.params['user']: - custom_details['user'] = module.params['user'] + if module.params["user"]: + custom_details["user"] = module.params["user"] - if module.params['repo']: - custom_details['repo'] = module.params['repo'] + if module.params["repo"]: + custom_details["repo"] = module.params["repo"] - if module.params['revision']: - custom_details['revision'] = module.params['revision'] + if module.params["revision"]: + custom_details["revision"] = module.params["revision"] - if module.params['environment']: - custom_details['environment'] = module.params['environment'] + if module.params["environment"]: + custom_details["environment"] = module.params["environment"] timestamp = now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") payload = { - 'summary': module.params['summary'], - 'source': module.params['source'], - 'timestamp': timestamp, - 'custom_details': custom_details + "summary": module.params["summary"], + "source": module.params["source"], + "timestamp": timestamp, + "custom_details": custom_details, } - event = { - 'routing_key': module.params['integration_key'], - 'payload': payload - } + event = {"routing_key": module.params["integration_key"], "payload": payload} - if module.params['link_url']: - link = { - 'href': module.params['link_url'] - } + if module.params["link_url"]: + link = {"href": module.params["link_url"]} - if module.params['link_text']: - link['text'] = module.params['link_text'] + if module.params["link_text"]: + link["text"] = module.params["link_text"] - event['links'] = [link] + event["links"] = [link] - _response, info = fetch_url( - module, url, data=module.jsonify(event), headers=headers, method='POST') + _response, info = fetch_url(module, url, data=module.jsonify(event), headers=headers, method="POST") - if info['status'] == 202: + if info["status"] == 202: module.exit_json(changed=True) else: - module.fail_json( - msg=f"Creating PagerDuty change event failed with {info['status']}") + module.fail_json(msg=f"Creating PagerDuty change event failed with {info['status']}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py index 36fda8f2e40..0f0016b1b74 100644 --- a/plugins/modules/pagerduty_user.py +++ b/plugins/modules/pagerduty_user.py @@ -97,81 +97,80 @@ def __init__(self, module, session): # check if the user exists def does_user_exist(self, pd_email): - for user in self._apisession.iter_all('users'): - if user['email'] == pd_email: - return user['id'] + for user in self._apisession.iter_all("users"): + if user["email"] == pd_email: + return user["id"] # create a user account on PD def add_pd_user(self, pd_name, pd_email, pd_role): try: - user = self._apisession.persist('users', 'email', { - "name": pd_name, - "email": pd_email, - "type": "user", - "role": pd_role, - }) + user = self._apisession.persist( + "users", + "email", + { + "name": pd_name, + "email": pd_email, + "type": "user", + "role": pd_role, + }, + ) return user except PDClientError as e: if e.response.status_code == 400: - self._module.fail_json( - msg=f"Failed to add {pd_name} due to invalid argument") + self._module.fail_json(msg=f"Failed to add {pd_name} due to invalid argument") if e.response.status_code == 401: self._module.fail_json(msg=f"Failed to add {pd_name} due to invalid API key") if e.response.status_code == 402: self._module.fail_json( - msg=f"Failed to add {pd_name} due to inability to perform the action within the API token") + msg=f"Failed to add {pd_name} due to inability to perform the action within the API token" + ) if e.response.status_code == 403: self._module.fail_json( - msg=f"Failed to add {pd_name} due to inability to review the requested resource within the API token") + msg=f"Failed to add {pd_name} due to inability to review the requested resource within the API token" + ) if e.response.status_code == 429: - self._module.fail_json( - msg=f"Failed to add {pd_name} due to reaching the limit of making requests") + self._module.fail_json(msg=f"Failed to add {pd_name} due to reaching the limit of making requests") # delete a user account from PD def delete_user(self, pd_user_id, pd_name): try: - user_path = path.join('/users/', pd_user_id) + user_path = path.join("/users/", pd_user_id) self._apisession.rdelete(user_path) except PDClientError as e: if e.response.status_code == 404: - self._module.fail_json( - msg=f"Failed to remove {pd_name} as user was not found") + self._module.fail_json(msg=f"Failed to remove {pd_name} as user was not found") if e.response.status_code == 403: self._module.fail_json( - msg=f"Failed to remove {pd_name} due to inability to review the requested resource within the API token") + msg=f"Failed to remove {pd_name} due to inability to review the requested resource within the API token" + ) if e.response.status_code == 401: # print out the list of incidents pd_incidents = self.get_incidents_assigned_to_user(pd_user_id) self._module.fail_json(msg=f"Failed to remove {pd_name} as user has assigned incidents {pd_incidents}") if e.response.status_code == 429: - self._module.fail_json( - msg=f"Failed to remove {pd_name} due to reaching the limit of making requests") + self._module.fail_json(msg=f"Failed to remove {pd_name} due to reaching the limit of making requests") # get incidents assigned to a user def get_incidents_assigned_to_user(self, pd_user_id): incident_info = {} - incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]}) + incidents = self._apisession.list_all("incidents", params={"user_ids[]": [pd_user_id]}) for incident in incidents: - incident_info = { - 'title': incident['title'], - 'key': incident['incident_key'], - 'status': incident['status'] - } + incident_info = {"title": incident["title"], "key": incident["incident_key"], "status": incident["status"]} return incident_info # add a user to a team/teams def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): updated_team = None for team in pd_teams: - team_info = self._apisession.find('teams', team, attribute='name') + team_info = self._apisession.find("teams", team, attribute="name") if team_info is not None: try: - updated_team = self._apisession.rput(f"/teams/{team_info['id']}/users/{pd_user_id}", json={ - 'role': pd_role - }) + updated_team = self._apisession.rput( + f"/teams/{team_info['id']}/users/{pd_user_id}", json={"role": pd_role} + ) except PDClientError: updated_team = None return updated_team @@ -180,35 +179,49 @@ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): def main(): module = AnsibleModule( argument_spec=dict( - access_token=dict(type='str', required=True, no_log=True), - pd_user=dict(type='str', required=True), - pd_email=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - pd_role=dict(type='str', default='responder', - choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), - pd_teams=dict(type='list', elements='str')), - required_if=[['state', 'present', ['pd_teams']], ], + access_token=dict(type="str", required=True, no_log=True), + pd_user=dict(type="str", required=True), + pd_email=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + pd_role=dict( + type="str", + default="responder", + choices=[ + "global_admin", + "manager", + "responder", + "observer", + "stakeholder", + "limited_stakeholder", + "restricted_access", + ], + ), + pd_teams=dict(type="list", elements="str"), + ), + required_if=[ + ["state", "present", ["pd_teams"]], + ], supports_check_mode=True, ) deps.validate(module) - access_token = module.params['access_token'] - pd_user = module.params['pd_user'] - pd_email = module.params['pd_email'] - state = module.params['state'] - pd_role = module.params['pd_role'] - pd_teams = module.params['pd_teams'] + access_token = module.params["access_token"] + pd_user = module.params["pd_user"] + pd_email = module.params["pd_email"] + state = module.params["state"] + pd_role = module.params["pd_role"] + pd_teams = module.params["pd_teams"] if pd_role: pd_role_gui_value = { - 'global_admin': 'admin', - 'manager': 'user', - 'responder': 'limited_user', - 'observer': 'observer', - 'stakeholder': 'read_only_user', - 'limited_stakeholder': 'read_only_limited_user', - 'restricted_access': 'restricted_access' + "global_admin": "admin", + "manager": "user", + "responder": "limited_user", + "observer": "observer", + "stakeholder": "read_only_user", + "limited_stakeholder": "read_only_limited_user", + "restricted_access": "restricted_access", } pd_role = pd_role_gui_value[pd_role] diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py index 0f551cdfeb4..0b93ade8117 100644 --- a/plugins/modules/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -148,53 +148,76 @@ from ansible.module_utils.common.text.converters import to_native -def _assert_is_valid_value(module, item, value, prefix=''): - if item in ['nice', 'priority']: +def _assert_is_valid_value(module, item, value, prefix=""): + if item in ["nice", "priority"]: try: valid = -20 <= int(value) <= 19 except ValueError: valid = False if not valid: - module.fail_json(msg=f"{prefix} Value of {value!r} for item {item!r} is invalid. Value must be a number in the range -20 to 19 inclusive. " - "Refer to the limits.conf(5) manual pages for more details.") - elif not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()): - module.fail_json(msg=f"{prefix} Value of {value!r} for item {item!r} is invalid. Value must either be 'unlimited', 'infinity' or -1, all of " - "which indicate no limit, or a limit of 0 or larger. Refer to the limits.conf(5) manual pages for " - "more details." % (prefix, value, item)) + module.fail_json( + msg=f"{prefix} Value of {value!r} for item {item!r} is invalid. Value must be a number in the range -20 to 19 inclusive. " + "Refer to the limits.conf(5) manual pages for more details." + ) + elif not (value in ["unlimited", "infinity", "-1"] or value.isdigit()): + module.fail_json( + msg=f"{prefix} Value of {value!r} for item {item!r} is invalid. Value must either be 'unlimited', 'infinity' or -1, all of " + "which indicate no limit, or a limit of 0 or larger. Refer to the limits.conf(5) manual pages for " + "more details." % (prefix, value, item) + ) def main(): - pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', - 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot'] - - pam_types = ['soft', 'hard', '-'] - - limits_conf = '/etc/security/limits.conf' + pam_items = [ + "core", + "data", + "fsize", + "memlock", + "nofile", + "rss", + "stack", + "cpu", + "nproc", + "as", + "maxlogins", + "maxsyslogins", + "priority", + "locks", + "sigpending", + "msgqueue", + "nice", + "rtprio", + "chroot", + ] + + pam_types = ["soft", "hard", "-"] + + limits_conf = "/etc/security/limits.conf" module = AnsibleModule( argument_spec=dict( - domain=dict(required=True, type='str'), - limit_type=dict(required=True, type='str', choices=pam_types), - limit_item=dict(required=True, type='str', choices=pam_items), - value=dict(required=True, type='str'), - use_max=dict(default=False, type='bool'), - use_min=dict(default=False, type='bool'), - backup=dict(default=False, type='bool'), - dest=dict(default=limits_conf, type='str'), - comment=dict(default='', type='str') + domain=dict(required=True, type="str"), + limit_type=dict(required=True, type="str", choices=pam_types), + limit_item=dict(required=True, type="str", choices=pam_items), + value=dict(required=True, type="str"), + use_max=dict(default=False, type="bool"), + use_min=dict(default=False, type="bool"), + backup=dict(default=False, type="bool"), + dest=dict(default=limits_conf, type="str"), + comment=dict(default="", type="str"), ), supports_check_mode=True, ) - domain = module.params['domain'] - limit_type = module.params['limit_type'] - limit_item = module.params['limit_item'] - value = module.params['value'] - use_max = module.params['use_max'] - use_min = module.params['use_min'] - backup = module.params['backup'] - limits_conf = module.params['dest'] - new_comment = module.params['comment'] + domain = module.params["domain"] + limit_type = module.params["limit_type"] + limit_item = module.params["limit_item"] + value = module.params["value"] + use_max = module.params["use_max"] + use_min = module.params["use_min"] + backup = module.params["backup"] + limits_conf = module.params["dest"] + new_comment = module.params["comment"] changed = False does_not_exist = False @@ -208,7 +231,9 @@ def main(): does_not_exist = True changed = True else: - module.fail_json(msg=f"directory {limits_conf_dir} is not writable (check presence, access rights, use sudo)") + module.fail_json( + msg=f"directory {limits_conf_dir} is not writable (check presence, access rights, use sudo)" + ) if use_max and use_min: module.fail_json(msg="Cannot use use_min and use_max at the same time.") @@ -219,45 +244,45 @@ def main(): if backup: backup_file = module.backup_local(limits_conf) - space_pattern = re.compile(r'\s+') + space_pattern = re.compile(r"\s+") if does_not_exist: lines = [] else: - with open(limits_conf, 'rb') as f: + with open(limits_conf, "rb") as f: lines = list(f) - message = '' + message = "" # Tempfile - nf = tempfile.NamedTemporaryFile(mode='w+') + nf = tempfile.NamedTemporaryFile(mode="w+") found = False new_value = value for line in lines: - line = to_native(line, errors='surrogate_or_strict') - if line.startswith('#'): + line = to_native(line, errors="surrogate_or_strict") + if line.startswith("#"): nf.write(line) continue - newline = re.sub(space_pattern, ' ', line).strip() + newline = re.sub(space_pattern, " ", line).strip() if not newline: nf.write(line) continue # Remove comment in line - newline = newline.split('#', 1)[0] + newline = newline.split("#", 1)[0] try: - old_comment = line.split('#', 1)[1] + old_comment = line.split("#", 1)[1] except Exception: - old_comment = '' + old_comment = "" newline = newline.rstrip() if not new_comment: new_comment = old_comment - line_fields = newline.split(' ') + line_fields = newline.split(" ") if len(line_fields) != 4: nf.write(line) @@ -268,8 +293,9 @@ def main(): line_item = line_fields[2] actual_value = line_fields[3] - _assert_is_valid_value(module, line_item, actual_value, - prefix=f"Invalid configuration found in '{limits_conf}'.") + _assert_is_valid_value( + module, line_item, actual_value, prefix=f"Invalid configuration found in '{limits_conf}'." + ) # Found the line if line_domain == domain and line_type == limit_type and line_item == limit_item: @@ -279,9 +305,9 @@ def main(): nf.write(line) continue - if line_type not in ['nice', 'priority']: - actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1'] - value_unlimited = value in ['unlimited', 'infinity', '-1'] + if line_type not in ["nice", "priority"]: + actual_value_unlimited = actual_value in ["unlimited", "infinity", "-1"] + value_unlimited = value in ["unlimited", "infinity", "-1"] else: actual_value_unlimited = value_unlimited = False @@ -327,12 +353,12 @@ def main(): nf.flush() - with open(nf.name, 'r') as content: + with open(nf.name, "r") as content: content_new = content.read() if not module.check_mode: if does_not_exist: - with open(limits_conf, 'a'): + with open(limits_conf, "a"): pass # Move tempfile to newfile @@ -346,14 +372,14 @@ def main(): res_args = dict( changed=changed, msg=message, - diff=dict(before=b''.join(lines), after=content_new), + diff=dict(before=b"".join(lines), after=content_new), ) if backup: - res_args['backup_file'] = backup_file + res_args["backup_file"] = backup_file module.exit_json(**res_args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py index 24107233921..71cd3ed50d4 100644 --- a/plugins/modules/pamd.py +++ b/plugins/modules/pamd.py @@ -235,17 +235,19 @@ from datetime import datetime -RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s+ +RULE_REGEX = re.compile( + r"""(?P-?(?:auth|account|session|password))\s+ (?P\[.*\]|\S*)\s+ (?P\S*)\s* - (?P.*)\s*""", re.X) + (?P.*)\s*""", + re.X, +) RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)") -VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] +VALID_TYPES = ["account", "-account", "auth", "-auth", "password", "-password", "session", "-session"] class PamdLine: - def __init__(self, line): self.line = line self.prev = None @@ -253,7 +255,7 @@ def __init__(self, line): @property def is_valid(self): - if self.line.strip() == '': + if self.line.strip() == "": return True return False @@ -275,13 +277,12 @@ class PamdEmptyLine(PamdLine): class PamdComment(PamdLine): - def __init__(self, line): super().__init__(line) @property def is_valid(self): - if self.line.startswith('#'): + if self.line.startswith("#"): return True return False @@ -292,22 +293,49 @@ def __init__(self, line): @property def is_valid(self): - if self.line.startswith('@include'): + if self.line.startswith("@include"): return True return False class PamdRule(PamdLine): - - valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive'] - valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err', - 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown', - 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail', - 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err', - 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again', - 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again', - 'incomplete', 'default'] - valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset'] + valid_simple_controls = ["required", "requisite", "sufficient", "optional", "include", "substack", "definitive"] + valid_control_values = [ + "success", + "open_err", + "symbol_err", + "service_err", + "system_err", + "buf_err", + "perm_denied", + "auth_err", + "cred_insufficient", + "authinfo_unavail", + "user_unknown", + "maxtries", + "new_authtok_reqd", + "acct_expired", + "session_err", + "cred_unavail", + "cred_expired", + "cred_err", + "no_module_data", + "conv_err", + "authtok_err", + "authtok_recover_err", + "authtok_lock_busy", + "authtok_disable_aging", + "try_again", + "ignore", + "abort", + "authtok_expired", + "module_unknown", + "bad_item", + "conv_again", + "incomplete", + "default", + ] + valid_control_actions = ["ignore", "bad", "die", "ok", "done", "reset"] def __init__(self, rule_type, rule_control, rule_path, rule_args=None): self.prev = None @@ -322,20 +350,18 @@ def __init__(self, rule_type, rule_control, rule_path, rule_args=None): # Method to check if a rule matches the type, control and path. def matches(self, rule_type, rule_control, rule_path, rule_args=None): - return (rule_type == self.rule_type and - rule_control == self.rule_control and - rule_path == self.rule_path) + return rule_type == self.rule_type and rule_control == self.rule_control and rule_path == self.rule_path @classmethod def rule_from_string(cls, line): rule_match = RULE_REGEX.search(line) - rule_args = parse_module_arguments(rule_match.group('args')) - return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args) + rule_args = parse_module_arguments(rule_match.group("args")) + return cls(rule_match.group("rule_type"), rule_match.group("control"), rule_match.group("path"), rule_args) def __str__(self): if self.rule_args: return f"{self.rule_type: <11}{self.rule_control} {self.rule_path} {' '.join(self.rule_args)}" - return f'{self.rule_type: <11}{self.rule_control} {self.rule_path}' + return f"{self.rule_type: <11}{self.rule_control} {self.rule_path}" @property def rule_control(self): @@ -345,9 +371,9 @@ def rule_control(self): @rule_control.setter def rule_control(self, control): - if control.startswith('['): - control = control.replace(' = ', '=').replace('[', '').replace(']', '') - self._control = control.split(' ') + if control.startswith("["): + control = control.replace(" = ", "=").replace("[", "").replace("]", "") + self._control = control.split(" ") else: self._control = control @@ -404,16 +430,15 @@ def validate(self): # PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this # as a doubly linked list. class PamdService: - def __init__(self, content): self._head = None self._tail = None for line in content.splitlines(): - if line.lstrip().startswith('#'): + if line.lstrip().startswith("#"): pamd_line = PamdComment(line) - elif line.lstrip().startswith('@include'): + elif line.lstrip().startswith("@include"): pamd_line = PamdInclude(line) - elif line.strip() == '': + elif line.strip() == "": pamd_line = PamdEmptyLine(line) else: pamd_line = PamdRule.rule_from_string(line) @@ -451,7 +476,6 @@ def get(self, rule_type, rule_control, rule_path): lines = [] current_line = self._head while current_line is not None: - if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path): lines.append(current_line) @@ -464,8 +488,9 @@ def has_rule(self, rule_type, rule_control, rule_path): return True return False - def update_rule(self, rule_type, rule_control, rule_path, - new_type=None, new_control=None, new_path=None, new_args=None): + def update_rule( + self, rule_type, rule_control, rule_path, new_type=None, new_control=None, new_path=None, new_args=None + ): # Get a list of rules we want to change rules_to_find = self.get(rule_type, rule_control, rule_path) @@ -496,8 +521,9 @@ def update_rule(self, rule_type, rule_control, rule_path, return changes - def insert_before(self, rule_type, rule_control, rule_path, - new_type=None, new_control=None, new_path=None, new_args=None): + def insert_before( + self, rule_type, rule_control, rule_path, new_type=None, new_control=None, new_path=None, new_args=None + ): # Get a list of rules we want to change rules_to_find = self.get(rule_type, rule_control, rule_path) changes = 0 @@ -544,8 +570,9 @@ def insert_before(self, rule_type, rule_control, rule_path, return changes - def insert_after(self, rule_type, rule_control, rule_path, - new_type=None, new_control=None, new_path=None, new_args=None): + def insert_after( + self, rule_type, rule_control, rule_path, new_type=None, new_control=None, new_path=None, new_args=None + ): # Get a list of rules we want to change rules_to_find = self.get(rule_type, rule_control, rule_path) changes = 0 @@ -710,7 +737,7 @@ def __str__(self): else: lines.insert(1, mark) - lines_joined = '\n'.join(lines) + lines_joined = "\n".join(lines) return f"{lines_joined}\n" @@ -739,20 +766,23 @@ def parse_module_arguments(module_arguments, return_none=False): def main(): - module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - type=dict(type='str', required=True, choices=VALID_TYPES), - control=dict(type='str', required=True), - module_path=dict(type='str', required=True), - new_type=dict(type='str', choices=VALID_TYPES), - new_control=dict(type='str'), - new_module_path=dict(type='str'), - module_arguments=dict(type='list', elements='str'), - state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']), - path=dict(type='path', default='/etc/pam.d'), - backup=dict(type='bool', default=False), + name=dict(type="str", required=True), + type=dict(type="str", required=True, choices=VALID_TYPES), + control=dict(type="str", required=True), + module_path=dict(type="str", required=True), + new_type=dict(type="str", choices=VALID_TYPES), + new_control=dict(type="str"), + new_module_path=dict(type="str"), + module_arguments=dict(type="list", elements="str"), + state=dict( + type="str", + default="updated", + choices=["absent", "after", "args_absent", "args_present", "before", "updated"], + ), + path=dict(type="path", default="/etc/pam.d"), + backup=dict(type="bool", default=False), ), supports_check_mode=True, required_if=[ @@ -767,43 +797,71 @@ def main(): # Open the file and read the content or fail try: - with open(fname, 'r') as service_file_obj: + with open(fname, "r") as service_file_obj: content = service_file_obj.read() except IOError as e: # If unable to read the file, fail out - module.fail_json(msg=f'Unable to open/read PAM module file {fname} with error {e}.') + module.fail_json(msg=f"Unable to open/read PAM module file {fname} with error {e}.") # Assuming we didn't fail, create the service service = PamdService(content) # Set the action - action = module.params['state'] + action = module.params["state"] changes = 0 # Take action - if action == 'updated': - changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'], - module.params['new_type'], module.params['new_control'], module.params['new_module_path'], - module.params['module_arguments']) - elif action == 'before': - changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'], - module.params['new_type'], module.params['new_control'], module.params['new_module_path'], - module.params['module_arguments']) - elif action == 'after': - changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'], - module.params['new_type'], module.params['new_control'], module.params['new_module_path'], - module.params['module_arguments']) - elif action == 'args_absent': - changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], - module.params['module_arguments']) - elif action == 'args_present': - if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]: - module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.") - - changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], - module.params['module_arguments']) - elif action == 'absent': - changes = service.remove(module.params['type'], module.params['control'], module.params['module_path']) + if action == "updated": + changes = service.update_rule( + module.params["type"], + module.params["control"], + module.params["module_path"], + module.params["new_type"], + module.params["new_control"], + module.params["new_module_path"], + module.params["module_arguments"], + ) + elif action == "before": + changes = service.insert_before( + module.params["type"], + module.params["control"], + module.params["module_path"], + module.params["new_type"], + module.params["new_control"], + module.params["new_module_path"], + module.params["module_arguments"], + ) + elif action == "after": + changes = service.insert_after( + module.params["type"], + module.params["control"], + module.params["module_path"], + module.params["new_type"], + module.params["new_control"], + module.params["new_module_path"], + module.params["module_arguments"], + ) + elif action == "args_absent": + changes = service.remove_module_arguments( + module.params["type"], + module.params["control"], + module.params["module_path"], + module.params["module_arguments"], + ) + elif action == "args_present": + if [arg for arg in parse_module_arguments(module.params["module_arguments"]) if arg.startswith("[")]: + module.fail_json( + msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'." + ) + + changes = service.add_module_arguments( + module.params["type"], + module.params["control"], + module.params["module_path"], + module.params["module_arguments"], + ) + elif action == "absent": + changes = service.remove(module.params["type"], module.params["control"], module.params["module_path"]) valid, msg = service.validate() @@ -814,26 +872,26 @@ def main(): result = dict( changed=(changes > 0), change_count=changes, - backupdest='', + backupdest="", ) # If not check mode and something changed, backup the original if necessary then write out the file or fail - if not module.check_mode and result['changed']: + if not module.check_mode and result["changed"]: # First, create a backup if desired. - if module.params['backup']: - result['backupdest'] = module.backup_local(fname) + if module.params["backup"]: + result["backupdest"] = module.backup_local(fname) try: - temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False) - with open(temp_file.name, 'w') as fd: + temp_file = NamedTemporaryFile(mode="w", dir=module.tmpdir, delete=False) + with open(temp_file.name, "w") as fd: fd.write(str(service)) except IOError: - module.fail_json(msg=f'Unable to create temporary file {temp_file}') + module.fail_json(msg=f"Unable to create temporary file {temp_file}") module.atomic_move(temp_file.name, os.path.realpath(fname)) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py index a5c7b034df5..cbd093cdcbc 100644 --- a/plugins/modules/parted.py +++ b/plugins/modules/parted.py @@ -230,30 +230,24 @@ # Reference prefixes (International System of Units and IEC) -units_si = ['B', 'KB', 'MB', 'GB', 'TB'] -units_iec = ['KiB', 'MiB', 'GiB', 'TiB'] -parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact'] +units_si = ["B", "KB", "MB", "GB", "TB"] +units_iec = ["KiB", "MiB", "GiB", "TiB"] +parted_units = units_si + units_iec + ["s", "%", "cyl", "chs", "compact"] -def parse_unit(size_str, unit=''): +def parse_unit(size_str, unit=""): """ Parses a string containing a size or boundary information """ - matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str) + matches = re.search(r"^(-?[\d.]+) *([\w%]+)?$", size_str) if matches is None: # ",," format - matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str) + matches = re.search(r"^(\d+),(\d+),(\d+)$", size_str) if matches is None: - module.fail_json( - msg=f"Error interpreting parted size output: '{size_str}'" - ) + module.fail_json(msg=f"Error interpreting parted size output: '{size_str}'") - size = { - 'cylinder': int(matches.group(1)), - 'head': int(matches.group(2)), - 'sector': int(matches.group(3)) - } - unit = 'chs' + size = {"cylinder": int(matches.group(1)), "head": int(matches.group(2)), "sector": int(matches.group(3))} + unit = "chs" else: # Normal format: "[]" @@ -288,46 +282,46 @@ def parse_partition_info(parted_output, unit): (for CHS/CYL) "number":"begin":"end":"filesystem-type":"partition-name":"flags-set"; """ - lines = [x for x in parted_output.split('\n') if x.strip() != ''] + lines = [x for x in parted_output.split("\n") if x.strip() != ""] # Generic device info - generic_params = lines[1].rstrip(';').split(':') + generic_params = lines[1].rstrip(";").split(":") # The unit is read once, because parted always returns the same unit size, unit = parse_unit(generic_params[1], unit) generic = { - 'dev': generic_params[0], - 'size': size, - 'unit': unit.lower(), - 'table': generic_params[5], - 'model': generic_params[6], - 'logical_block': int(generic_params[3]), - 'physical_block': int(generic_params[4]) + "dev": generic_params[0], + "size": size, + "unit": unit.lower(), + "table": generic_params[5], + "model": generic_params[6], + "logical_block": int(generic_params[3]), + "physical_block": int(generic_params[4]), } # CYL and CHS have an additional line in the output - if unit in ['cyl', 'chs']: - chs_info = lines[2].rstrip(';').split(':') + if unit in ["cyl", "chs"]: + chs_info = lines[2].rstrip(";").split(":") cyl_size, cyl_unit = parse_unit(chs_info[3]) - generic['chs_info'] = { - 'cylinders': int(chs_info[0]), - 'heads': int(chs_info[1]), - 'sectors': int(chs_info[2]), - 'cyl_size': cyl_size, - 'cyl_size_unit': cyl_unit.lower() + generic["chs_info"] = { + "cylinders": int(chs_info[0]), + "heads": int(chs_info[1]), + "sectors": int(chs_info[2]), + "cyl_size": cyl_size, + "cyl_size_unit": cyl_unit.lower(), } lines = lines[1:] parts = [] for line in lines[2:]: - part_params = line.rstrip(';').split(':') + part_params = line.rstrip(";").split(":") # CHS use a different format than BYT, but contrary to what stated by # the author, CYL is the same as BYT. I've tested this undocumented # behaviour down to parted version 1.8.3, which is the first version # that supports the machine parseable output. - if unit != 'chs': + if unit != "chs": size = parse_unit(part_params[3])[0] fstype = part_params[4] name = part_params[5] @@ -339,18 +333,20 @@ def parse_partition_info(parted_output, unit): name = part_params[4] flags = part_params[5] - parts.append({ - 'num': int(part_params[0]), - 'begin': parse_unit(part_params[1])[0], - 'end': parse_unit(part_params[2])[0], - 'size': size, - 'fstype': fstype, - 'name': name, - 'flags': [f.strip() for f in flags.split(', ') if f != ''], - 'unit': unit.lower(), - }) + parts.append( + { + "num": int(part_params[0]), + "begin": parse_unit(part_params[1])[0], + "end": parse_unit(part_params[2])[0], + "size": size, + "fstype": fstype, + "name": name, + "flags": [f.strip() for f in flags.split(", ") if f != ""], + "unit": unit.lower(), + } + ) - return {'generic': generic, 'partitions': parts} + return {"generic": generic, "partitions": parts} def format_disk_size(size_bytes, unit): @@ -366,14 +362,12 @@ def format_disk_size(size_bytes, unit): # Shortcut if size_bytes == 0: - return 0.0, 'b' + return 0.0, "b" # Cases where we default to 'compact' - if unit in ['', 'compact', 'cyl', 'chs']: - index = max(0, int( - (math.log10(size_bytes) - 1.0) / 3.0 - )) - unit = 'b' + if unit in ["", "compact", "cyl", "chs"]: + index = max(0, int((math.log10(size_bytes) - 1.0) / 3.0)) + unit = "b" if index < len(units_si): unit = units_si[index] @@ -384,7 +378,7 @@ def format_disk_size(size_bytes, unit): elif unit in units_iec: multiplier = 1024.0 ** units_iec.index(unit) - output = size_bytes // multiplier * (1 + 1E-16) + output = size_bytes // multiplier * (1 + 1e-16) # Corrections to round up as per IEEE754 standard if output < 10: @@ -412,7 +406,7 @@ def convert_to_bytes(size_str, unit): multiplier = 1000.0 ** units_si.index(unit) elif unit in units_iec: multiplier = 1024.0 ** (units_iec.index(unit) + 1) - elif unit in ['', 'compact', 'cyl', 'chs']: + elif unit in ["", "compact", "cyl", "chs"]: # As per format_disk_size, default to compact, which defaults to megabytes multiplier = 1000.0 ** units_si.index("MB") @@ -438,16 +432,16 @@ def get_unlabeled_device_info(device, unit): size, unit = format_disk_size(size_bytes, unit) return { - 'generic': { - 'dev': device, - 'table': "unknown", - 'size': size, - 'unit': unit, - 'logical_block': logic_block, - 'physical_block': phys_block, - 'model': f"{vendor} {model}", + "generic": { + "dev": device, + "table": "unknown", + "size": size, + "unit": unit, + "logical_block": logic_block, + "physical_block": phys_block, + "model": f"{vendor} {model}", }, - 'partitions': [] + "partitions": [], } @@ -467,10 +461,12 @@ def get_device_info(device, unit): command = [parted_exec, "-s", "-m", device, "--", "unit", unit, "print"] rc, out, err = module.run_command(command) - if rc != 0 and 'unrecognised disk label' not in err: - module.fail_json(msg=( - f"Error while getting device information with parted script: '{' '.join(command)}'"), - rc=rc, out=out, err=err + if rc != 0 and "unrecognised disk label" not in err: + module.fail_json( + msg=(f"Error while getting device information with parted script: '{' '.join(command)}'"), + rc=rc, + out=out, + err=err, ) return parse_partition_info(out, unit) @@ -491,7 +487,7 @@ def check_parted_label(device): # Older parted versions return a message in the stdout and RC > 0. rc, out, err = module.run_command([parted_exec, "-s", "-m", device, "print"]) - if rc != 0 and 'unrecognised disk label' in out.lower(): + if rc != 0 and "unrecognised disk label" in out.lower(): return True return False @@ -501,7 +497,7 @@ def parse_parted_version(out): """ Returns version tuple from the output of "parted --version" command """ - lines = [x for x in out.split('\n') if x.strip() != ''] + lines = [x for x in out.split("\n") if x.strip() != ""] if len(lines) == 0: return None, None, None @@ -509,7 +505,7 @@ def parse_parted_version(out): # parted (GNU parted) 3.3 # parted (GNU parted) 3.4.5 # parted (GNU parted) 3.3.14-dfc61 - matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip()) + matches = re.search(r"^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?", lines[0].strip()) if matches is None: return None, None, None @@ -532,9 +528,7 @@ def parted_version(): rc, out, err = module.run_command([parted_exec, "--version"]) if rc != 0: - module.fail_json( - msg="Failed to get parted version.", rc=rc, out=out, err=err - ) + module.fail_json(msg="Failed to get parted version.", rc=rc, out=out, err=err) (major, minor, rev) = parse_parted_version(out) if major is None: @@ -549,8 +543,8 @@ def parted(script, device, align): """ global module, parted_exec # pylint: disable=global-variable-not-assigned - align_option = ['-a', align] - if align == 'undefined': + align_option = ["-a", align] + if align == "undefined": align_option = [] """ @@ -559,18 +553,17 @@ def parted(script, device, align): http://savannah.gnu.org/news/?id=10114 """ if parted_version() >= (3, 4, 64): - script_option = ['-s', '-f'] + script_option = ["-s", "-f"] else: - script_option = ['-s'] + script_option = ["-s"] if script and not module.check_mode: - command = [parted_exec] + script_option + ['-m'] + align_option + [device, '--'] + script + command = [parted_exec] + script_option + ["-m"] + align_option + [device, "--"] + script rc, out, err = module.run_command(command) if rc != 0: module.fail_json( - msg=f"Error while running parted script: {' '.join(command).strip()}", - rc=rc, out=out, err=err + msg=f"Error while running parted script: {' '.join(command).strip()}", rc=rc, out=out, err=err ) @@ -579,7 +572,7 @@ def read_record(file_path, default=None): Reads the first line of a file and returns it. """ try: - with open(file_path, 'r') as f: + with open(file_path, "r") as f: return f.readline().strip() except IOError: return default @@ -609,89 +602,83 @@ def main(): script = [] module = AnsibleModule( argument_spec=dict( - device=dict(type='str', required=True), - align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']), - number=dict(type='int'), - + device=dict(type="str", required=True), + align=dict(type="str", default="optimal", choices=["cylinder", "minimal", "none", "optimal", "undefined"]), + number=dict(type="int"), # unit command - unit=dict(type='str', default='KiB', choices=parted_units), - + unit=dict(type="str", default="KiB", choices=parted_units), # mklabel command - label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']), - + label=dict( + type="str", + default="msdos", + choices=["aix", "amiga", "bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98", "sun"], + ), # mkpart [] command - part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']), - part_start=dict(type='str', default='0%'), - part_end=dict(type='str', default='100%'), - fs_type=dict(type='str'), - + part_type=dict(type="str", default="primary", choices=["extended", "logical", "primary"]), + part_start=dict(type="str", default="0%"), + part_end=dict(type="str", default="100%"), + fs_type=dict(type="str"), # name command - name=dict(type='str'), - + name=dict(type="str"), # set command - flags=dict(type='list', elements='str'), - + flags=dict(type="list", elements="str"), # rm/mkpart command - state=dict(type='str', default='info', choices=['absent', 'info', 'present']), - + state=dict(type="str", default="info", choices=["absent", "info", "present"]), # resize part - resize=dict(type='bool', default=False), + resize=dict(type="bool", default=False), ), required_if=[ - ['state', 'present', ['number']], - ['state', 'absent', ['number']], + ["state", "present", ["number"]], + ["state", "absent", ["number"]], ], supports_check_mode=True, ) - module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'} + module.run_command_environ_update = {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C", "LC_CTYPE": "C"} # Data extraction - device = module.params['device'] - align = module.params['align'] - number = module.params['number'] - unit = module.params['unit'] - label = module.params['label'] - part_type = module.params['part_type'] - part_start = module.params['part_start'] - part_end = module.params['part_end'] - name = module.params['name'] - state = module.params['state'] - flags = module.params['flags'] - fs_type = module.params['fs_type'] - resize = module.params['resize'] + device = module.params["device"] + align = module.params["align"] + number = module.params["number"] + unit = module.params["unit"] + label = module.params["label"] + part_type = module.params["part_type"] + part_start = module.params["part_start"] + part_end = module.params["part_end"] + name = module.params["name"] + state = module.params["state"] + flags = module.params["flags"] + fs_type = module.params["fs_type"] + resize = module.params["resize"] # Parted executable - parted_exec = module.get_bin_path('parted', True) + parted_exec = module.get_bin_path("parted", True) # Conditioning if number is not None and number < 1: module.fail_json(msg="The partition number must be greater then 0.") if not check_size_format(part_start): module.fail_json( - msg="The argument 'part_start' doesn't respect required format." - "The size unit is case sensitive.", - err=parse_unit(part_start) + msg="The argument 'part_start' doesn't respect required format.The size unit is case sensitive.", + err=parse_unit(part_start), ) if not check_size_format(part_end): module.fail_json( - msg="The argument 'part_end' doesn't respect required format." - "The size unit is case sensitive.", - err=parse_unit(part_end) + msg="The argument 'part_end' doesn't respect required format.The size unit is case sensitive.", + err=parse_unit(part_end), ) # Read the current disk information current_device = get_device_info(device, unit) - current_parts = current_device['partitions'] - - if state == 'present': + current_parts = current_device["partitions"] + if state == "present": # Assign label if required - mklabel_needed = current_device['generic'].get('table', None) != label + mklabel_needed = current_device["generic"].get("table", None) != label if mklabel_needed: script += ["mklabel", label] # Create partition if required - if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)): + if part_type and (mklabel_needed or not part_exists(current_parts, "num", number)): script += ["mkpart"] script += [part_type] if fs_type is not None: @@ -703,14 +690,14 @@ def main(): script = ["unit", unit] + script # If partition exists, try to resize - if resize and part_exists(current_parts, 'num', number): + if resize and part_exists(current_parts, "num", number): # Ensure new end is different to current - partition = [p for p in current_parts if p['num'] == number][0] - current_part_end = convert_to_bytes(partition['end'], unit) + partition = [p for p in current_parts if p["num"] == number][0] + current_part_end = convert_to_bytes(partition["end"], unit) size, parsed_unit = parse_unit(part_end, unit) if parsed_unit == "%": - size = int((int(current_device['generic']['size']) * size) / 100) + size = int((int(current_device["generic"]["size"]) * size) / 100) parsed_unit = unit desired_part_end = convert_to_bytes(size, parsed_unit) @@ -727,29 +714,29 @@ def main(): script = [] if not module.check_mode: - current_parts = get_device_info(device, unit)['partitions'] + current_parts = get_device_info(device, unit)["partitions"] - if part_exists(current_parts, 'num', number) or module.check_mode: + if part_exists(current_parts, "num", number) or module.check_mode: if changed and module.check_mode: - partition = {'flags': []} # Empty structure for the check-mode + partition = {"flags": []} # Empty structure for the check-mode else: - partition = [p for p in current_parts if p['num'] == number][0] + partition = [p for p in current_parts if p["num"] == number][0] # Assign name to the partition - if name is not None and partition.get('name', None) != name: + if name is not None and partition.get("name", None) != name: # The double quotes need to be included in the arg passed to parted - script += ['name', str(number), f'"{name}"'] + script += ["name", str(number), f'"{name}"'] # Manage flags if flags: # Parted infers boot with esp, if you assign esp, boot is set # and if boot is unset, esp is also unset. - if 'esp' in flags and 'boot' not in flags: - flags.append('boot') + if "esp" in flags and "boot" not in flags: + flags.append("boot") # Compute only the changes in flags status - flags_off = list(set(partition['flags']) - set(flags)) - flags_on = list(set(flags) - set(partition['flags'])) + flags_off = list(set(partition["flags"]) - set(flags)) + flags_on = list(set(flags) - set(partition["flags"])) for f in flags_on: script += ["set", str(number), f, "on"] @@ -767,25 +754,25 @@ def main(): changed = True parted(script, device, align) - elif state == 'absent': + elif state == "absent": # Remove the partition - if part_exists(current_parts, 'num', number) or module.check_mode: + if part_exists(current_parts, "num", number) or module.check_mode: script = ["rm", str(number)] output_script += script changed = True parted(script, device, align) - elif state == 'info': + elif state == "info": output_script = ["unit", unit, "print"] # Final status of the device final_device_status = get_device_info(device, unit) module.exit_json( changed=changed, - disk=final_device_status['generic'], - partitions=final_device_status['partitions'], - script=output_script + disk=final_device_status["generic"], + partitions=final_device_status["partitions"], + script=output_script, ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py index 0df5b03f077..2fad89ff94a 100644 --- a/plugins/modules/pear.py +++ b/plugins/modules/pear.py @@ -122,29 +122,29 @@ def get_local_version(pear_output): """Take pear remoteinfo output and get the installed version""" - lines = pear_output.split('\n') + lines = pear_output.split("\n") for line in lines: - if 'Installed ' in line: + if "Installed " in line: installed = line.rsplit(None, 1)[-1].strip() - if installed == '-': + if installed == "-": continue return installed return None def _get_pear_path(module): - if module.params['executable'] and os.path.isfile(module.params['executable']): - result = module.params['executable'] + if module.params["executable"] and os.path.isfile(module.params["executable"]): + result = module.params["executable"] else: - result = module.get_bin_path('pear', True, [module.params['executable']]) + result = module.get_bin_path("pear", True, [module.params["executable"]]) return result def get_repository_version(pear_output): """Take pear remote-info output and get the latest version""" - lines = pear_output.split('\n') + lines = pear_output.split("\n") for line in lines: - if 'Latest ' in line: + if "Latest " in line: return line.rsplit(None, 1)[-1].strip() return None @@ -195,7 +195,6 @@ def remove_packages(module, packages): remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg=f"removed {remove_c} package(s)") module.exit_json(changed=False, msg="package(s) already absent") @@ -241,14 +240,14 @@ def install_packages(module, state, packages, prompts): # if the package is installed and state == present # or state == latest and is up-to-date then skip installed, updated = query_package(module, package) - if installed and (state == 'present' or (state == 'latest' and updated)): + if installed and (state == "present" or (state == "latest" and updated)): continue - if state == 'present': - command = 'install' + if state == "present": + command = "install" - if state == 'latest': - command = 'upgrade' + if state == "latest": + command = "upgrade" if has_prompt and i < len(prompts): prompt_regex = prompts[i][0] @@ -258,7 +257,9 @@ def install_packages(module, state, packages, prompts): data = default_stdin cmd = [_get_pear_path(module), command, package] - rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) + rc, stdout, stderr = module.run_command( + cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True + ) if rc != 0: module.fail_json(msg=f"failed to install {package}: {to_text(stdout + stderr)}") @@ -274,9 +275,11 @@ def check_packages(module, packages, state): would_be_changed = [] for package in packages: installed, updated = query_package(module, package) - if ((state in ["present", "latest"] and not installed) or - (state == "absent" and installed) or - (state == "latest" and not updated)): + if ( + (state in ["present", "latest"] and not installed) + or (state == "absent" and installed) + or (state == "latest" and not updated) + ): would_be_changed.append(package) if would_be_changed: if state == "absent": @@ -289,36 +292,37 @@ def check_packages(module, packages, state): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['pkg'], required=True), - state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), - executable=dict(type='path'), - prompts=dict(type='list', elements='raw'), + name=dict(aliases=["pkg"], required=True), + state=dict(default="present", choices=["present", "installed", "latest", "absent", "removed"]), + executable=dict(type="path"), + prompts=dict(type="list", elements="raw"), ), - supports_check_mode=True) + supports_check_mode=True, + ) p = module.params # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' + if p["state"] in ["present", "installed"]: + p["state"] = "present" + elif p["state"] in ["absent", "removed"]: + p["state"] = "absent" - if p['name']: - pkgs = p['name'].split(',') + if p["name"]: + pkgs = p["name"].split(",") pkg_files = [] for i, pkg in enumerate(pkgs): pkg_files.append(None) if module.check_mode: - check_packages(module, pkgs, p['state']) + check_packages(module, pkgs, p["state"]) - if p['state'] in ['present', 'latest']: - install_packages(module, p['state'], pkgs, p["prompts"]) - elif p['state'] == 'absent': + if p["state"] in ["present", "latest"]: + install_packages(module, p["state"], pkgs, p["prompts"]) + elif p["state"] == "absent": remove_packages(module, pkgs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pids.py b/plugins/modules/pids.py index 20402997176..fb8cce2d107 100644 --- a/plugins/modules/pids.py +++ b/plugins/modules/pids.py @@ -80,8 +80,8 @@ class PSAdapterError(Exception): class PSAdapter(metaclass=abc.ABCMeta): - NAME_ATTRS = ('name', 'cmdline') - PATTERN_ATTRS = ('name', 'exe', 'cmdline') + NAME_ATTRS = ("name", "cmdline") + PATTERN_ATTRS = ("name", "exe", "cmdline") def __init__(self, psutil): self._psutil = psutil @@ -89,9 +89,9 @@ def __init__(self, psutil): @staticmethod def from_package(psutil): version = LooseVersion(psutil.__version__) - if version < LooseVersion('2.0.0'): + if version < LooseVersion("2.0.0"): return PSAdapter100(psutil) - elif version < LooseVersion('5.3.0'): + elif version < LooseVersion("5.3.0"): return PSAdapter200(psutil) else: return PSAdapter530(psutil) @@ -104,8 +104,11 @@ def _process_iter(self, *attrs): def _has_name(self, proc, name): attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS) - return (compare_lower(attributes['name'], name) or - attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) + return ( + compare_lower(attributes["name"], name) + or attributes["cmdline"] + and compare_lower(attributes["cmdline"][0], name) + ) def _get_proc_attributes(self, proc, *attributes): return {attribute: self._get_attribute_from_proc(proc, attribute) for attribute in attributes} @@ -130,9 +133,9 @@ def get_pids_by_pattern(self, pattern, ignore_case): def _matches_regex(self, proc, regex): # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS) - matches_name = regex.search(to_native(attributes['name'])) - matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe']))) - matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline']))) + matches_name = regex.search(to_native(attributes["name"])) + matches_exe = attributes["exe"] and regex.search(basename(to_native(attributes["exe"]))) + matches_cmd = attributes["cmdline"] and regex.search(to_native(" ".join(attributes["cmdline"]))) return any([matches_name, matches_exe, matches_cmd]) @@ -178,15 +181,14 @@ def compare_lower(a, b): class Pids: def __init__(self, module): - deps.validate(module) self._ps = PSAdapter.from_package(psutil) self._module = module - self._name = module.params['name'] - self._pattern = module.params['pattern'] - self._ignore_case = module.params['ignore_case'] + self._name = module.params["name"] + self._pattern = module.params["pattern"] + self._ignore_case = module.params["ignore_case"] self._pids = [] @@ -204,7 +206,7 @@ def execute(self): @property def result(self): return { - 'pids': self._pids, + "pids": self._pids, } @@ -215,17 +217,13 @@ def main(): pattern=dict(type="str"), ignore_case=dict(type="bool", default=False), ), - required_one_of=[ - ('name', 'pattern') - ], - mutually_exclusive=[ - ('name', 'pattern') - ], + required_one_of=[("name", "pattern")], + mutually_exclusive=[("name", "pattern")], supports_check_mode=True, ) Pids(module).execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py index 5c6ad6f88cc..92185062bf8 100644 --- a/plugins/modules/pingdom.py +++ b/plugins/modules/pingdom.py @@ -77,6 +77,7 @@ PINGDOM_IMP_ERR = None try: import pingdom + HAS_PINGDOM = True except Exception: PINGDOM_IMP_ERR = traceback.format_exc() @@ -86,7 +87,6 @@ def pause(checkid, uid, passwd, key): - c = pingdom.PingdomConnection(uid, passwd, key) c.modify_check(checkid, paused=True) check = c.get_check(checkid) @@ -98,7 +98,6 @@ def pause(checkid, uid, passwd, key): def unpause(checkid, uid, passwd, key): - c = pingdom.PingdomConnection(uid, passwd, key) c.modify_check(checkid, paused=False) check = c.get_check(checkid) @@ -110,10 +109,9 @@ def unpause(checkid, uid, passwd, key): def main(): - module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), + state=dict(required=True, choices=["running", "paused", "started", "stopped"]), checkid=dict(required=True), uid=dict(required=True), passwd=dict(required=True, no_log=True), @@ -124,11 +122,11 @@ def main(): if not HAS_PINGDOM: module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) - checkid = module.params['checkid'] - state = module.params['state'] - uid = module.params['uid'] - passwd = module.params['passwd'] - key = module.params['key'] + checkid = module.params["checkid"] + state = module.params["state"] + uid = module.params["uid"] + passwd = module.params["passwd"] + key = module.params["key"] if state == "paused" or state == "stopped": (rc, name, result) = pause(checkid, uid, passwd, key) @@ -142,5 +140,5 @@ def main(): module.exit_json(checkid=checkid, name=name, status=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py index 146ca6f06b5..f4105d47883 100644 --- a/plugins/modules/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -101,40 +101,37 @@ class PIP(CLIMgr): - def __init__(self, pip, module): - self.CLI = pip self.module = module def list_installed(self): - rc, out, err = self.module.run_command([self._cli, 'list', '-l', '--format=json']) + rc, out, err = self.module.run_command([self._cli, "list", "-l", "--format=json"]) if rc != 0: raise Exception(f"Unable to list packages rc={rc} : {err}") return json.loads(out) def get_package_details(self, package): - package['source'] = self.CLI + package["source"] = self.CLI return package def main(): - # start work module = AnsibleModule( argument_spec=dict( - clients=dict(type='list', elements='path', default=['pip']), + clients=dict(type="list", elements="path", default=["pip"]), ), - supports_check_mode=True) + supports_check_mode=True, + ) packages = {} - results = {'packages': {}} - clients = module.params['clients'] + results = {"packages": {}} + clients = module.params["clients"] found = 0 for pip in clients: - - if not os.path.basename(pip).startswith('pip'): - module.warn(f'Skipping invalid pip client: {pip}') + if not os.path.basename(pip).startswith("pip"): + module.warn(f"Skipping invalid pip client: {pip}") continue try: pip_mgr = PIP(pip, module) @@ -142,16 +139,16 @@ def main(): found += 1 packages[pip] = pip_mgr.get_packages() except Exception as e: - module.warn(f'Failed to retrieve packages with {pip}: {e}') + module.warn(f"Failed to retrieve packages with {pip}: {e}") continue if found == 0: - module.fail_json(msg=f'Unable to use any of the supplied pip clients: {clients}') + module.fail_json(msg=f"Unable to use any of the supplied pip clients: {clients}") # return info - results['packages'] = packages + results["packages"] = packages module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py index e81be23d638..979e38212da 100644 --- a/plugins/modules/pipx.py +++ b/plugins/modules/pipx.py @@ -213,7 +213,11 @@ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.pipx import ( + pipx_runner, + pipx_common_argspec, + make_process_dict, +) from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -225,44 +229,61 @@ def _make_name(name, suffix): class PipX(StateModuleHelper): - output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + output_params = ["name", "source", "index_url", "force", "installdeps"] argument_spec = dict( - state=dict(type='str', default='install', - choices=[ - 'present', 'absent', 'install', 'install_all', 'uninstall', 'uninstall_all', 'inject', 'uninject', - 'upgrade', 'upgrade_shared', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest', 'pin', 'unpin', - ]), - name=dict(type='str'), - source=dict(type='str'), - install_apps=dict(type='bool', default=False), - install_deps=dict(type='bool', default=False), - inject_packages=dict(type='list', elements='str'), - force=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - index_url=dict(type='str'), - python=dict(type='str'), - system_site_packages=dict(type='bool', default=False), - editable=dict(type='bool', default=False), - pip_args=dict(type='str'), - suffix=dict(type='str'), - spec_metadata=dict(type='path'), + state=dict( + type="str", + default="install", + choices=[ + "present", + "absent", + "install", + "install_all", + "uninstall", + "uninstall_all", + "inject", + "uninject", + "upgrade", + "upgrade_shared", + "upgrade_all", + "reinstall", + "reinstall_all", + "latest", + "pin", + "unpin", + ], + ), + name=dict(type="str"), + source=dict(type="str"), + install_apps=dict(type="bool", default=False), + install_deps=dict(type="bool", default=False), + inject_packages=dict(type="list", elements="str"), + force=dict(type="bool", default=False), + include_injected=dict(type="bool", default=False), + index_url=dict(type="str"), + python=dict(type="str"), + system_site_packages=dict(type="bool", default=False), + editable=dict(type="bool", default=False), + pip_args=dict(type="str"), + suffix=dict(type="str"), + spec_metadata=dict(type="path"), ) argument_spec.update(pipx_common_argspec) module = dict( argument_spec=argument_spec, required_if=[ - ('state', 'present', ['name']), - ('state', 'install', ['name']), - ('state', 'install_all', ['spec_metadata']), - ('state', 'absent', ['name']), - ('state', 'uninstall', ['name']), - ('state', 'upgrade', ['name']), - ('state', 'reinstall', ['name']), - ('state', 'latest', ['name']), - ('state', 'inject', ['name', 'inject_packages']), - ('state', 'pin', ['name']), - ('state', 'unpin', ['name']), + ("state", "present", ["name"]), + ("state", "install", ["name"]), + ("state", "install_all", ["spec_metadata"]), + ("state", "absent", ["name"]), + ("state", "uninstall", ["name"]), + ("state", "upgrade", ["name"]), + ("state", "reinstall", ["name"]), + ("state", "latest", ["name"]), + ("state", "inject", ["name", "inject_packages"]), + ("state", "pin", ["name"]), + ("state", "unpin", ["name"]), ], required_by=dict( suffix="name", @@ -272,7 +293,7 @@ class PipX(StateModuleHelper): def _retrieve_installed(self): output_process = make_process_dict(include_injected=True) - installed, dummy = self.runner('_list global', output_process=output_process).run() + installed, dummy = self.runner("_list global", output_process=output_process).run() if self.app_name is None: return installed @@ -283,8 +304,8 @@ def __init_module__(self): if self.vars.executable: self.command = [self.vars.executable] else: - facts = ansible_facts(self.module, gather_subset=['python']) - self.command = [facts['python']['executable'], '-m', 'pipx'] + facts = ansible_facts(self.module, gather_subset=["python"]) + self.command = [facts["python"]["executable"], "-m", "pipx"] self.runner = pipx_runner(self.module, self.command) pkg_req = PackageRequirement(self.module, self.vars.name) @@ -292,7 +313,7 @@ def __init_module__(self): self.parsed_req = pkg_req.requirement self.app_name = _make_name(self.parsed_name, self.vars.suffix) - self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + self.vars.set("application", self._retrieve_installed(), change=True, diff=True) with self.runner("version") as ctx: rc, out, err = ctx.run() @@ -308,7 +329,7 @@ def _capture_results(self, ctx): self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set("run_info", ctx.run_info, verbosity=4) def state_install(self): # If we have a version spec and no source, use the version spec as source @@ -317,7 +338,11 @@ def state_install(self): if self.vars.application.get(self.app_name): is_installed = True - version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True + version_match = ( + self.vars.application[self.app_name]["version"] in self.parsed_req.specifier + if self.parsed_req + else True + ) force = self.vars.force or (not version_match) else: is_installed = False @@ -328,7 +353,9 @@ def state_install(self): return self.changed = True - args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + args_order = ( + "state global index_url install_deps force python system_site_packages editable pip_args suffix name_source" + ) with self.runner(args_order, check_mode_skip=True) as ctx: ctx.run(name_source=[self.parsed_name, self.vars.source], force=force) self._capture_results(ctx) @@ -337,7 +364,10 @@ def state_install(self): def state_install_all(self): self.changed = True - with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: + with self.runner( + "state global index_url force python system_site_packages editable pip_args spec_metadata", + check_mode_skip=True, + ) as ctx: ctx.run() self._capture_results(ctx) @@ -348,14 +378,16 @@ def state_upgrade(self): if self.vars.force: self.changed = True - with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + with self.runner( + "state global include_injected index_url force editable pip_args name", check_mode_skip=True + ) as ctx: ctx.run(name=name) self._capture_results(ctx) def state_uninstall(self): if self.vars.application: name = _make_name(self.vars.name, self.vars.suffix) - with self.runner('state global name', check_mode_skip=True) as ctx: + with self.runner("state global name", check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) @@ -366,7 +398,7 @@ def state_reinstall(self): if not self.vars.application: self.do_raise(f"Trying to reinstall a non-existent application: {name}") self.changed = True - with self.runner('state global name python', check_mode_skip=True) as ctx: + with self.runner("state global name python", check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) @@ -376,7 +408,10 @@ def state_inject(self): self.do_raise(f"Trying to inject packages into a non-existent application: {name}") if self.vars.force: self.changed = True - with self.runner('state global index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + with self.runner( + "state global index_url install_apps install_deps force editable pip_args name inject_packages", + check_mode_skip=True, + ) as ctx: ctx.run(name=name) self._capture_results(ctx) @@ -384,51 +419,53 @@ def state_uninject(self): name = _make_name(self.vars.name, self.vars.suffix) if not self.vars.application: self.do_raise(f"Trying to uninject packages into a non-existent application: {name}") - with self.runner('state global name inject_packages', check_mode_skip=True) as ctx: + with self.runner("state global name inject_packages", check_mode_skip=True) as ctx: ctx.run(name=name) self._capture_results(ctx) def state_uninstall_all(self): - with self.runner('state global', check_mode_skip=True) as ctx: + with self.runner("state global", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_reinstall_all(self): - with self.runner('state global python', check_mode_skip=True) as ctx: + with self.runner("state global python", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_upgrade_all(self): if self.vars.force: self.changed = True - with self.runner('state global include_injected force', check_mode_skip=True) as ctx: + with self.runner("state global include_injected force", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_upgrade_shared(self): - with self.runner('state global pip_args', check_mode_skip=True) as ctx: + with self.runner("state global pip_args", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_latest(self): if not self.vars.application or self.vars.force: self.changed = True - args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + args_order = "state global index_url install_deps force python system_site_packages editable pip_args suffix name_source" with self.runner(args_order, check_mode_skip=True) as ctx: - ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) + ctx.run(state="install", name_source=[self.vars.name, self.vars.source]) self._capture_results(ctx) - with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: - ctx.run(state='upgrade') + with self.runner( + "state global include_injected index_url force editable pip_args name", check_mode_skip=True + ) as ctx: + ctx.run(state="upgrade") self._capture_results(ctx) def state_pin(self): - with self.runner('state global name', check_mode_skip=True) as ctx: + with self.runner("state global name", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) def state_unpin(self): - with self.runner('state global name', check_mode_skip=True) as ctx: + with self.runner("state global name", check_mode_skip=True) as ctx: ctx.run() self._capture_results(ctx) @@ -437,5 +474,5 @@ def main(): PipX.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py index 85d094c8379..e4082af67f5 100644 --- a/plugins/modules/pipx_info.py +++ b/plugins/modules/pipx_info.py @@ -132,19 +132,23 @@ """ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.pipx import ( + pipx_runner, + pipx_common_argspec, + make_process_dict, +) from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible.module_utils.facts.compat import ansible_facts class PipXInfo(ModuleHelper): - output_params = ['name'] + output_params = ["name"] argument_spec = dict( - name=dict(type='str'), - include_deps=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - include_raw=dict(type='bool', default=False), + name=dict(type="str"), + include_deps=dict(type="bool", default=False), + include_injected=dict(type="bool", default=False), + include_raw=dict(type="bool", default=False), ) argument_spec.update(pipx_common_argspec) module = dict( @@ -156,8 +160,8 @@ def __init_module__(self): if self.vars.executable: self.command = [self.vars.executable] else: - facts = ansible_facts(self.module, gather_subset=['python']) - self.command = [facts['python']['executable'], '-m', 'pipx'] + facts = ansible_facts(self.module, gather_subset=["python"]) + self.command = [facts["python"]["executable"], "-m", "pipx"] self.runner = pipx_runner(self.module, self.command) with self.runner("version") as ctx: rc, out, err = ctx.run() @@ -168,17 +172,13 @@ def __init_module__(self): def __run__(self): output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps) - with self.runner('_list global', output_process=output_process) as ctx: + with self.runner("_list global", output_process=output_process) as ctx: applications, raw_data = ctx.run() if self.vars.include_raw: self.vars.raw_output = raw_data if self.vars.name: - self.vars.application = [ - v - for k, v in applications.items() - if k == self.vars.name - ] + self.vars.application = [v for k, v in applications.items() if k == self.vars.name] else: self.vars.application = list(applications.values()) self._capture_results(ctx) @@ -193,5 +193,5 @@ def main(): PipXInfo.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pkg5.py b/plugins/modules/pkg5.py index 2faea511dc4..aa980f740ca 100644 --- a/plugins/modules/pkg5.py +++ b/plugins/modules/pkg5.py @@ -88,12 +88,16 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']), - accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), - be_name=dict(type='str'), - refresh=dict(type='bool', default=True), - verbose=dict(type='bool', default=False), + name=dict(type="list", elements="str", required=True), + state=dict( + type="str", + default="present", + choices=["absent", "installed", "latest", "present", "removed", "uninstalled"], + ), + accept_licenses=dict(type="bool", default=False, aliases=["accept", "accept_licences"]), + be_name=dict(type="str"), + refresh=dict(type="bool", default=True), + verbose=dict(type="bool", default=False), ), supports_check_mode=True, ) @@ -104,78 +108,84 @@ def main(): # pkg(5) FRMIs include a comma before the release number, but # AnsibleModule will have split this into multiple items for us. # Try to spot where this has happened and fix it. - for fragment in params['name']: - if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]): + for fragment in params["name"]: + if re.search(r"^\d+(?:\.\d+)*", fragment) and packages and re.search(r"@[^,]*$", packages[-1]): packages[-1] += f",{fragment}" else: packages.append(fragment) - if params['state'] in ['present', 'installed']: - ensure(module, 'present', packages, params) - elif params['state'] in ['latest']: - ensure(module, 'latest', packages, params) - elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', packages, params) + if params["state"] in ["present", "installed"]: + ensure(module, "present", packages, params) + elif params["state"] in ["latest"]: + ensure(module, "latest", packages, params) + elif params["state"] in ["absent", "uninstalled", "removed"]: + ensure(module, "absent", packages, params) def ensure(module, state, packages, params): response = { - 'results': [], - 'msg': '', + "results": [], + "msg": "", } behaviour = { - 'present': { - 'filter': lambda p: not is_installed(module, p), - 'subcommand': 'install', + "present": { + "filter": lambda p: not is_installed(module, p), + "subcommand": "install", }, - 'latest': { - 'filter': lambda p: ( - not is_installed(module, p) or not is_latest(module, p) - ), - 'subcommand': 'install', + "latest": { + "filter": lambda p: (not is_installed(module, p) or not is_latest(module, p)), + "subcommand": "install", }, - 'absent': { - 'filter': lambda p: is_installed(module, p), - 'subcommand': 'uninstall', + "absent": { + "filter": lambda p: is_installed(module, p), + "subcommand": "uninstall", }, } if module.check_mode: - dry_run = ['-n'] + dry_run = ["-n"] else: dry_run = [] - if params['accept_licenses']: - accept_licenses = ['--accept'] + if params["accept_licenses"]: + accept_licenses = ["--accept"] else: accept_licenses = [] - if params['be_name']: + if params["be_name"]: beadm = [f"--be-name={module.params['be_name']}"] else: beadm = [] - if params['refresh']: + if params["refresh"]: no_refresh = [] else: - no_refresh = ['--no-refresh'] + no_refresh = ["--no-refresh"] - if params['verbose']: + if params["verbose"]: verbosity = [] else: - verbosity = ['-q'] + verbosity = ["-q"] - to_modify = list(filter(behaviour[state]['filter'], packages)) + to_modify = list(filter(behaviour[state]["filter"], packages)) if to_modify: rc, out, err = module.run_command( - ['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + verbosity + ['--'] + to_modify) - response['rc'] = rc - response['results'].append(out) - response['msg'] += err - response['changed'] = True + ["pkg", behaviour[state]["subcommand"]] + + dry_run + + accept_licenses + + beadm + + no_refresh + + verbosity + + ["--"] + + to_modify + ) + response["rc"] = rc + response["results"].append(out) + response["msg"] += err + response["changed"] = True if rc == 4: - response['changed'] = False - response['failed'] = False + response["changed"] = False + response["failed"] = False elif rc != 0: module.fail_json(**response) @@ -183,14 +193,14 @@ def ensure(module, state, packages, params): def is_installed(module, package): - rc, out, err = module.run_command(['pkg', 'list', '--', package]) + rc, out, err = module.run_command(["pkg", "list", "--", package]) return not bool(int(rc)) def is_latest(module, package): - rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + rc, out, err = module.run_command(["pkg", "list", "-u", "--", package]) return bool(int(rc)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py index 2e9d6ca488b..765ee499795 100644 --- a/plugins/modules/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -75,33 +75,33 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['publisher']), - state=dict(default='present', choices=['present', 'absent']), - sticky=dict(type='bool'), - enabled=dict(type='bool'), + name=dict(required=True, aliases=["publisher"]), + state=dict(default="present", choices=["present", "absent"]), + sticky=dict(type="bool"), + enabled=dict(type="bool"), # search_after=dict(), # search_before=dict(), - origin=dict(type='list', elements='str'), - mirror=dict(type='list', elements='str'), + origin=dict(type="list", elements="str"), + mirror=dict(type="list", elements="str"), ) ) - for option in ['origin', 'mirror']: - if module.params[option] == ['']: + for option in ["origin", "mirror"]: + if module.params[option] == [""]: module.params[option] = [] - if module.params['state'] == 'present': + if module.params["state"] == "present": modify_publisher(module, module.params) else: - unset_publisher(module, module.params['name']) + unset_publisher(module, module.params["name"]) def modify_publisher(module, params): - name = params['name'] + name = params["name"] existing = get_publishers(module) if name in existing: - for option in ['origin', 'mirror', 'sticky', 'enabled']: + for option in ["origin", "mirror", "sticky", "enabled"]: if params[option] is not None: if params[option] != existing[name][option]: return set_publisher(module, params) @@ -112,35 +112,32 @@ def modify_publisher(module, params): def set_publisher(module, params): - name = params['name'] + name = params["name"] args = [] - if params['origin'] is not None: - args.append('--remove-origin=*') - args.extend([f"--add-origin={u}" for u in params['origin']]) - if params['mirror'] is not None: - args.append('--remove-mirror=*') - args.extend([f"--add-mirror={u}" for u in params['mirror']]) - - if params['sticky'] is not None and params['sticky']: - args.append('--sticky') - elif params['sticky'] is not None: - args.append('--non-sticky') - - if params['enabled'] is not None and params['enabled']: - args.append('--enable') - elif params['enabled'] is not None: - args.append('--disable') - - rc, out, err = module.run_command( - ["pkg", "set-publisher"] + args + [name], - check_rc=True - ) + if params["origin"] is not None: + args.append("--remove-origin=*") + args.extend([f"--add-origin={u}" for u in params["origin"]]) + if params["mirror"] is not None: + args.append("--remove-mirror=*") + args.extend([f"--add-mirror={u}" for u in params["mirror"]]) + + if params["sticky"] is not None and params["sticky"]: + args.append("--sticky") + elif params["sticky"] is not None: + args.append("--non-sticky") + + if params["enabled"] is not None and params["enabled"]: + args.append("--enable") + elif params["enabled"] is not None: + args.append("--disable") + + rc, out, err = module.run_command(["pkg", "set-publisher"] + args + [name], check_rc=True) response = { - 'rc': rc, - 'results': [out], - 'msg': err, - 'changed': True, + "rc": rc, + "results": [out], + "msg": err, + "changed": True, } if rc != 0: module.fail_json(**response) @@ -151,15 +148,12 @@ def unset_publisher(module, publisher): if publisher not in get_publishers(module): module.exit_json() - rc, out, err = module.run_command( - ["pkg", "unset-publisher", publisher], - check_rc=True - ) + rc, out, err = module.run_command(["pkg", "unset-publisher", publisher], check_rc=True) response = { - 'rc': rc, - 'results': [out], - 'msg': err, - 'changed': True, + "rc": rc, + "results": [out], + "msg": err, + "changed": True, } if rc != 0: module.fail_json(**response) @@ -175,21 +169,21 @@ def get_publishers(module): publishers = {} for line in lines: values = dict(zip(keys, map(unstringify, line.split("\t")))) - name = values['publisher'] + name = values["publisher"] if name not in publishers: - publishers[name] = {k: values[k] for k in ['sticky', 'enabled']} - publishers[name]['origin'] = [] - publishers[name]['mirror'] = [] + publishers[name] = {k: values[k] for k in ["sticky", "enabled"]} + publishers[name]["origin"] = [] + publishers[name]["mirror"] = [] - if values['type'] is not None: - publishers[name][values['type']].append(values['uri']) + if values["type"] is not None: + publishers[name][values["type"]].append(values["uri"]) return publishers def unstringify(val): - if val == "-" or val == '': + if val == "-" or val == "": return None elif val == "true": return True @@ -199,5 +193,5 @@ def unstringify(val): return val -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py index 4f8dd88c5ab..0af59b67096 100644 --- a/plugins/modules/pkgin.py +++ b/plugins/modules/pkgin.py @@ -136,18 +136,17 @@ class PackageState: def query_package(module, name): - """Search for the package by name and return state of the package. - """ + """Search for the package by name and return state of the package.""" # test whether '-p' (parsable) flag is supported. rc, out, err = module.run_command([PKGIN_PATH, "-p", "-v"]) if rc == 0: - pflag = ['-p'] - splitchar = ';' + pflag = ["-p"] + splitchar = ";" else: pflag = [] - splitchar = ' ' + splitchar = " " # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. @@ -155,13 +154,11 @@ def query_package(module, name): # rc will not be 0 unless the search was a success if rc == 0: - # Search results may contain more than one line (e.g., 'emacs'), so iterate # through each line to see if we have a match. - packages = out.split('\n') + packages = out.split("\n") for package in packages: - # Break up line at spaces. The first part will be the package with its # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state # of the package: @@ -170,17 +167,17 @@ def query_package(module, name): # '=' - installed and up to date # '>' - installed but newer than the repository version - if (package in ('reading local summary...', - 'processing local summary...', - 'downloading pkg_summary.xz done.')) or \ - (package.startswith('processing remote summary (')): + if ( + package + in ("reading local summary...", "processing local summary...", "downloading pkg_summary.xz done.") + ) or (package.startswith("processing remote summary (")): continue pkgname_with_version, raw_state = package.split(splitchar)[0:2] # Search for package, stripping version # (results in sth like 'gcc47-libs' or 'emacs24-nox11') - pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) + pkg_search_obj = re.search(r"^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*", pkgname_with_version, re.M) # Do not proceed unless we have a match if not pkg_search_obj: @@ -193,9 +190,9 @@ def query_package(module, name): continue # The package was found; now return its state - if raw_state == '<': + if raw_state == "<": return PackageState.OUTDATED - elif raw_state == '=' or raw_state == '>': + elif raw_state == "=" or raw_state == ">": return PackageState.PRESENT else: # Package found but not installed @@ -210,8 +207,7 @@ def query_package(module, name): def format_action_message(module, action, count): - vars = {"actioned": action, - "count": count} + vars = {"actioned": action, "count": count} if module.check_mode: message = f"would have {vars['actioned']} {vars['count']} package" @@ -245,7 +241,6 @@ def format_pkgin_command(module, command, package=None): def remove_packages(module, packages): - remove_c = 0 # Using a for loop in case of error, we can report the package that failed @@ -254,8 +249,7 @@ def remove_packages(module, packages): if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]: continue - rc, out, err = module.run_command( - format_pkgin_command(module, "remove", package)) + rc, out, err = module.run_command(format_pkgin_command(module, "remove", package)) if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: module.fail_json(msg=f"failed to remove {package}: {out}", stdout=out, stderr=err) @@ -269,7 +263,6 @@ def remove_packages(module, packages): def install_packages(module, packages): - install_c = 0 for package in packages: @@ -279,26 +272,29 @@ def install_packages(module, packages): elif query_result is PackageState.NOT_FOUND: module.fail_json(msg=f"failed to find package {package} for installation") - rc, out, err = module.run_command( - format_pkgin_command(module, "install", package)) + rc, out, err = module.run_command(format_pkgin_command(module, "install", package)) - if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: + if not module.check_mode and not query_package(module, package) in [ + PackageState.PRESENT, + PackageState.OUTDATED, + ]: module.fail_json(msg=f"failed to install {package}: {out}", stdout=out, stderr=err) install_c += 1 if install_c > 0: - module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err) + module.exit_json( + changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err + ) module.exit_json(changed=False, msg="package(s) already present") def update_package_db(module): - rc, out, err = module.run_command( - format_pkgin_command(module, "update")) + rc, out, err = module.run_command(format_pkgin_command(module, "update")) if rc == 0: - if re.search('database for.*is up-to-date\n$', out): + if re.search("database for.*is up-to-date\n$", out): return False, "database is up-to-date" else: return True, "updated repository database" @@ -312,11 +308,10 @@ def do_upgrade_packages(module, full=False): else: cmd = "upgrade" - rc, out, err = module.run_command( - format_pkgin_command(module, cmd)) + rc, out, err = module.run_command(format_pkgin_command(module, cmd)) if rc == 0: - if re.search('^(.*\n|)nothing to do.\n$', out): + if re.search("^(.*\n|)nothing to do.\n$", out): module.exit_json(changed=False, msg="nothing left to upgrade") else: module.fail_json(msg=f"could not {cmd} packages", stdout=out, stderr=err) @@ -331,8 +326,7 @@ def full_upgrade_packages(module): def clean_cache(module): - rc, out, err = module.run_command( - format_pkgin_command(module, "clean")) + rc, out, err = module.run_command(format_pkgin_command(module, "clean")) if rc == 0: # There's no indication if 'clean' actually removed anything, @@ -346,41 +340,43 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(default="present", choices=["present", "absent"]), - name=dict(aliases=["pkg"], type='list', elements='str'), - update_cache=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - full_upgrade=dict(default=False, type='bool'), - clean=dict(default=False, type='bool'), - force=dict(default=False, type='bool')), - required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], - supports_check_mode=True) + name=dict(aliases=["pkg"], type="list", elements="str"), + update_cache=dict(default=False, type="bool"), + upgrade=dict(default=False, type="bool"), + full_upgrade=dict(default=False, type="bool"), + clean=dict(default=False, type="bool"), + force=dict(default=False, type="bool"), + ), + required_one_of=[["name", "update_cache", "upgrade", "full_upgrade", "clean"]], + supports_check_mode=True, + ) global PKGIN_PATH - PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + PKGIN_PATH = module.get_bin_path("pkgin", True, ["/opt/local/bin"]) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") p = module.params if p["update_cache"]: c, msg = update_package_db(module) - if not (p['name'] or p["upgrade"] or p["full_upgrade"]): + if not (p["name"] or p["upgrade"] or p["full_upgrade"]): module.exit_json(changed=c, msg=msg) if p["upgrade"]: upgrade_packages(module) - if not p['name']: - module.exit_json(changed=True, msg='upgraded packages') + if not p["name"]: + module.exit_json(changed=True, msg="upgraded packages") if p["full_upgrade"]: full_upgrade_packages(module) - if not p['name']: - module.exit_json(changed=True, msg='upgraded all packages') + if not p["name"]: + module.exit_json(changed=True, msg="upgraded all packages") if p["clean"]: clean_cache(module) - if not p['name']: - module.exit_json(changed=True, msg='cleaned caches') + if not p["name"]: + module.exit_json(changed=True, msg="cleaned caches") pkgs = p["name"] @@ -391,5 +387,5 @@ def main(): remove_packages(module, pkgs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py index 5c6e75f82b3..ad1eb9ba757 100644 --- a/plugins/modules/pkgng.py +++ b/plugins/modules/pkgng.py @@ -150,26 +150,23 @@ def query_package(module, run_pkgng, name): - - rc, out, err = run_pkgng('info', '-e', name) + rc, out, err = run_pkgng("info", "-e", name) return rc == 0 def query_update(module, run_pkgng, name): - # Check to see if a package upgrade is available. # rc = 0, no updates available or package not installed # rc = 1, updates available - rc, out, err = run_pkgng('upgrade', '-n', name) + rc, out, err = run_pkgng("upgrade", "-n", name) return rc == 1 def pkgng_older_than(module, pkgng_path, compare_version): - - rc, out, err = module.run_command([pkgng_path, '-v']) - version = [int(x) for x in re.split(r'[\._]', out)] + rc, out, err = module.run_command([pkgng_path, "-v"]) + version = [int(x) for x in re.split(r"[\._]", out)] i = 0 new_pkgng = True @@ -187,11 +184,11 @@ def upgrade_packages(module, run_pkgng): # Run a 'pkg upgrade', updating all packages. upgraded_c = 0 - pkgng_args = ['upgrade'] - pkgng_args.append('-n' if module.check_mode else '-y') + pkgng_args = ["upgrade"] + pkgng_args.append("-n" if module.check_mode else "-y") rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode)) - matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE) + matches = re.findall("^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)", out, re.MULTILINE) for match in matches: upgraded_c += int(match) @@ -211,7 +208,7 @@ def remove_packages(module, run_pkgng, packages): continue if not module.check_mode: - rc, out, err = run_pkgng('delete', '-y', package) + rc, out, err = run_pkgng("delete", "-y", package) stdout += out stderr += err @@ -233,7 +230,7 @@ def install_packages(module, run_pkgng, packages, cached, state): stderr = "" if not module.check_mode and not cached: - rc, out, err = run_pkgng('update') + rc, out, err = run_pkgng("update") stdout += out stderr += err if rc != 0: @@ -244,10 +241,7 @@ def install_packages(module, run_pkgng, packages, cached, state): if already_installed and state == "present": continue - if ( - already_installed and state == "latest" - and not query_update(module, run_pkgng, package) - ): + if already_installed and state == "latest" and not query_update(module, run_pkgng, package): continue if already_installed: @@ -256,7 +250,7 @@ def install_packages(module, run_pkgng, packages, cached, state): action_queue["install"].append(package) # install/upgrade all named packages with one pkg command - for (action, package_list) in action_queue.items(): + for action, package_list in action_queue.items(): if module.check_mode: # Do nothing, but count up how many actions # would be performed so that the changed/msg @@ -264,7 +258,7 @@ def install_packages(module, run_pkgng, packages, cached, state): action_count[action] += len(package_list) continue - pkgng_args = [action, '-U', '-y'] + package_list + pkgng_args = [action, "-U", "-y"] + package_list rc, out, err = run_pkgng(*pkgng_args) stdout += out stderr += err @@ -272,9 +266,9 @@ def install_packages(module, run_pkgng, packages, cached, state): # individually verify packages are in requested state for package in package_list: verified = False - if action == 'install': + if action == "install": verified = query_package(module, run_pkgng, package) - elif action == 'upgrade': + elif action == "upgrade": verified = not query_update(module, run_pkgng, package) if verified: @@ -283,21 +277,21 @@ def install_packages(module, run_pkgng, packages, cached, state): module.fail_json(msg=f"failed to {action} {package}", stdout=stdout, stderr=stderr) if sum(action_count.values()) > 0: - past_tense = {'install': 'installed', 'upgrade': 'upgraded'} + past_tense = {"install": "installed", "upgrade": "upgraded"} messages = [] - for (action, count) in action_count.items(): + for action, count in action_count.items(): messages.append(f"{past_tense.get(action, action)} {count} package{'s' if count != 1 else ''}") - return (True, '; '.join(messages), stdout, stderr) + return (True, "; ".join(messages), stdout, stderr) return (False, f"package(s) already {state}", stdout, stderr) def annotation_query(module, run_pkgng, package, tag): - rc, out, err = run_pkgng('info', '-A', package) - match = re.search(rf'^\s*(?P{tag})\s*:\s*(?P\w+)', out, flags=re.MULTILINE) + rc, out, err = run_pkgng("info", "-A", package) + match = re.search(rf"^\s*(?P{tag})\s*:\s*(?P\w+)", out, flags=re.MULTILINE) if match: - return match.group('value') + return match.group("value") return False @@ -306,14 +300,15 @@ def annotation_add(module, run_pkgng, package, tag, value): if not _value: # Annotation does not exist, add it. if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-A', package, tag, data=value, binary_data=True) + rc, out, err = run_pkgng("annotate", "-y", "-A", package, tag, data=value, binary_data=True) if rc != 0: module.fail_json(msg=f"could not annotate {package}: {out}", stderr=err) return True elif _value != value: # Annotation exists, but value differs module.fail_json( - msg=f"failed to annotate {package}, because {tag} is already set to {_value}, but should be set to {value}") + msg=f"failed to annotate {package}, because {tag} is already set to {_value}, but should be set to {value}" + ) return False else: # Annotation exists, nothing to do @@ -324,7 +319,7 @@ def annotation_delete(module, run_pkgng, package, tag, value): _value = annotation_query(module, run_pkgng, package, tag) if _value: if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-D', package, tag) + rc, out, err = run_pkgng("annotate", "-y", "-D", package, tag) if rc != 0: module.fail_json(msg=f"could not delete annotation to {package}: {out}", stderr=err) return True @@ -341,15 +336,17 @@ def annotation_modify(module, run_pkgng, package, tag, value): return False else: if not module.check_mode: - rc, out, err = run_pkgng('annotate', '-y', '-M', package, tag, data=value, binary_data=True) + rc, out, err = run_pkgng("annotate", "-y", "-M", package, tag, data=value, binary_data=True) # pkg sometimes exits with rc == 1, even though the modification succeeded # Check the output for a success message if ( rc != 0 - and re.search(rf'^{package}-[^:]+: Modified annotation tagged: {tag}', out, flags=re.MULTILINE) is None + and re.search(rf"^{package}-[^:]+: Modified annotation tagged: {tag}", out, flags=re.MULTILINE) is None ): - module.fail_json(msg=f"failed to annotate {package}, could not change annotation {tag} to {value}: {out}", stderr=err) + module.fail_json( + msg=f"failed to annotate {package}, could not change annotation {tag} to {value}: {out}", stderr=err + ) return True @@ -359,29 +356,22 @@ def annotate_packages(module, run_pkgng, packages, annotations): # Split on commas with optional trailing whitespace, # to support the old style of multiple annotations # on a single line, rather than YAML list syntax - annotations = re.split(r'\s*,\s*', annotations[0]) + annotations = re.split(r"\s*,\s*", annotations[0]) - operation = { - '+': annotation_add, - '-': annotation_delete, - ':': annotation_modify - } + operation = {"+": annotation_add, "-": annotation_delete, ":": annotation_modify} for package in packages: for annotation_string in annotations: # Note to future maintainers: A dash (-) in a regex character class ([-+:] below) # must appear as the first character in the class, or it will be interpreted # as a range of characters. - annotation = \ - re.match(r'(?P[-+:])(?P[^=]+)(=(?P.+))?', annotation_string) + annotation = re.match(r"(?P[-+:])(?P[^=]+)(=(?P.+))?", annotation_string) if annotation is None: - module.fail_json( - msg=f"failed to annotate {package}, invalid annotate string: {annotation_string}" - ) + module.fail_json(msg=f"failed to annotate {package}, invalid annotate string: {annotation_string}") annotation = annotation.groupdict() - if operation[annotation['operation']](module, run_pkgng, package, annotation['tag'], annotation['value']): + if operation[annotation["operation"]](module, run_pkgng, package, annotation["tag"], annotation["value"]): annotate_c += 1 if annotate_c > 0: @@ -392,11 +382,11 @@ def annotate_packages(module, run_pkgng, packages, annotations): def autoremove_packages(module, run_pkgng): stdout = "" stderr = "" - rc, out, err = run_pkgng('autoremove', '-n') + rc, out, err = run_pkgng("autoremove", "-n") autoremove_c = 0 - match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + match = re.search("^Deinstallation has been requested for the following ([0-9]+) packages", out, re.MULTILINE) if match: autoremove_c = int(match.group(1)) @@ -404,7 +394,7 @@ def autoremove_packages(module, run_pkgng): return (False, "no package(s) to autoremove", stdout, stderr) if not module.check_mode: - rc, out, err = run_pkgng('autoremove', '-y') + rc, out, err = run_pkgng("autoremove", "-y") stdout += out stderr += err @@ -415,21 +405,22 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(default="present", choices=["present", "latest", "absent"]), - name=dict(aliases=["pkg"], required=True, type='list', elements='str'), - cached=dict(default=False, type='bool'), - ignore_osver=dict(default=False, type='bool'), - annotation=dict(type='list', elements='str'), + name=dict(aliases=["pkg"], required=True, type="list", elements="str"), + cached=dict(default=False, type="bool"), + ignore_osver=dict(default=False, type="bool"), + annotation=dict(type="list", elements="str"), pkgsite=dict(), - rootdir=dict(type='path'), - chroot=dict(type='path'), - jail=dict(type='str'), - autoremove=dict(default=False, type='bool'), - use_globs=dict(default=True, type='bool'), + rootdir=dict(type="path"), + chroot=dict(type="path"), + jail=dict(type="str"), + autoremove=dict(default=False, type="bool"), + use_globs=dict(default=True, type="bool"), ), supports_check_mode=True, - mutually_exclusive=[["rootdir", "chroot", "jail"]]) + mutually_exclusive=[["rootdir", "chroot", "jail"]], + ) - pkgng_path = module.get_bin_path('pkg', True) + pkgng_path = module.get_bin_path("pkg", True) p = module.params @@ -466,27 +457,35 @@ def main(): def run_pkgng(action, *args, **kwargs): cmd = [pkgng_path, dir_arg, action] - if p["use_globs"] and action in ('info', 'install', 'upgrade',): - args = ('-g',) + args + if p["use_globs"] and action in ( + "info", + "install", + "upgrade", + ): + args = ("-g",) + args - pkgng_env = {'BATCH': 'yes'} + pkgng_env = {"BATCH": "yes"} if p["ignore_osver"]: - pkgng_env['IGNORE_OSVERSION'] = 'yes' + pkgng_env["IGNORE_OSVERSION"] = "yes" - if p['pkgsite'] is not None and action in ('update', 'install', 'upgrade',): + if p["pkgsite"] is not None and action in ( + "update", + "install", + "upgrade", + ): if repo_flag_not_supported: - pkgng_env['PACKAGESITE'] = p['pkgsite'] + pkgng_env["PACKAGESITE"] = p["pkgsite"] else: cmd.append(f"--repository={p['pkgsite']}") # If environ_update is specified to be "passed through" # to module.run_command, then merge its values into pkgng_env - pkgng_env.update(kwargs.pop('environ_update', dict())) + pkgng_env.update(kwargs.pop("environ_update", dict())) return module.run_command(cmd + list(args), environ_update=pkgng_env, **kwargs) - if pkgs == ['*'] and p["state"] == 'latest': + if pkgs == ["*"] and p["state"] == "latest": # Operate on all installed packages. Only state: latest makes sense here. _changed, _msg, _stdout, _stderr = upgrade_packages(module, run_pkgng) changed = changed or _changed @@ -500,11 +499,10 @@ def run_pkgng(action, *args, **kwargs): # with comma or space delimiters. That doesn't result in a YAML list, and # wrong actions (install vs upgrade) can be reported if those # comma- or space-delimited strings make it to the pkg command line. - pkgs = re.split(r'[,\s]', pkgs[0]) - named_packages = [pkg for pkg in pkgs if pkg != '*'] + pkgs = re.split(r"[,\s]", pkgs[0]) + named_packages = [pkg for pkg in pkgs if pkg != "*"] if p["state"] in ("present", "latest") and named_packages: - _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages, - p["cached"], p["state"]) + _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages, p["cached"], p["state"]) stdout += _out stderr += _err changed = changed or _changed @@ -532,5 +530,5 @@ def run_pkgng(action, *args, **kwargs): module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py index 3d4616bbcb6..2fb72a4df35 100644 --- a/plugins/modules/pkgutil.py +++ b/plugins/modules/pkgutil.py @@ -108,44 +108,44 @@ def packages_not_installed(module, names): - ''' Check if each package is installed and return list of the ones absent ''' + """Check if each package is installed and return list of the ones absent""" pkgs = [] for pkg in names: - rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + rc, out, err = run_command(module, ["pkginfo", "-q", pkg]) if rc != 0: pkgs.append(pkg) return pkgs def packages_installed(module, names): - ''' Check if each package is installed and return list of the ones present ''' + """Check if each package is installed and return list of the ones present""" pkgs = [] for pkg in names: - if not pkg.startswith('CSW'): + if not pkg.startswith("CSW"): continue - rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + rc, out, err = run_command(module, ["pkginfo", "-q", pkg]) if rc == 0: pkgs.append(pkg) return pkgs def packages_not_latest(module, names, site, update_catalog): - ''' Check status of each package and return list of the ones with an upgrade available ''' - cmd = ['pkgutil'] + """Check status of each package and return list of the ones with an upgrade available""" + cmd = ["pkgutil"] if update_catalog: - cmd.append('-U') - cmd.append('-c') + cmd.append("-U") + cmd.append("-c") if site is not None: - cmd.extend(['-t', site]) - if names != ['*']: + cmd.extend(["-t", site]) + if names != ["*"]: cmd.extend(names) rc, out, err = run_command(module, cmd) # Find packages in the catalog which are not up to date packages = [] - for line in out.split('\n')[1:-1]: - if 'catalog' not in line and 'SAME' not in line: - packages.append(line.split(' ')[0]) + for line in out.split("\n")[1:-1]: + if "catalog" not in line and "SAME" not in line: + packages.append(line.split(" ")[0]) # Remove duplicates return list(set(packages)) @@ -153,45 +153,45 @@ def packages_not_latest(module, names, site, update_catalog): def run_command(module, cmd, **kwargs): progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) + cmd[0] = module.get_bin_path(progname, True, ["/opt/csw/bin"]) return module.run_command(cmd, **kwargs) def package_install(module, state, pkgs, site, update_catalog, force): - cmd = ['pkgutil'] + cmd = ["pkgutil"] if module.check_mode: - cmd.append('-n') - cmd.append('-iy') + cmd.append("-n") + cmd.append("-iy") if update_catalog: - cmd.append('-U') + cmd.append("-U") if site is not None: - cmd.extend(['-t', site]) + cmd.extend(["-t", site]) if force: - cmd.append('-f') + cmd.append("-f") cmd.extend(pkgs) return run_command(module, cmd) def package_upgrade(module, pkgs, site, update_catalog, force): - cmd = ['pkgutil'] + cmd = ["pkgutil"] if module.check_mode: - cmd.append('-n') - cmd.append('-uy') + cmd.append("-n") + cmd.append("-uy") if update_catalog: - cmd.append('-U') + cmd.append("-U") if site is not None: - cmd.extend(['-t', site]) + cmd.extend(["-t", site]) if force: - cmd.append('-f') + cmd.append("-f") cmd += pkgs return run_command(module, cmd) def package_uninstall(module, pkgs): - cmd = ['pkgutil'] + cmd = ["pkgutil"] if module.check_mode: - cmd.append('-n') - cmd.append('-ry') + cmd.append("-n") + cmd.append("-ry") cmd.extend(pkgs) return run_command(module, cmd) @@ -199,31 +199,31 @@ def package_uninstall(module, pkgs): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True, aliases=['pkg']), - state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), - site=dict(type='str'), - update_catalog=dict(type='bool', default=False), - force=dict(type='bool', default=False), + name=dict(type="list", elements="str", required=True, aliases=["pkg"]), + state=dict(type="str", required=True, choices=["absent", "installed", "latest", "present", "removed"]), + site=dict(type="str"), + update_catalog=dict(type="bool", default=False), + force=dict(type="bool", default=False), ), supports_check_mode=True, ) - name = module.params['name'] - state = module.params['state'] - site = module.params['site'] - update_catalog = module.params['update_catalog'] - force = module.params['force'] + name = module.params["name"] + state = module.params["state"] + site = module.params["site"] + update_catalog = module.params["update_catalog"] + force = module.params["force"] rc = None - out = '' - err = '' + out = "" + err = "" result = dict( name=name, state=state, ) - if state in ['installed', 'present']: + if state in ["installed", "present"]: # Fail with an explicit error when trying to "install" '*' - if name == ['*']: + if name == ["*"]: module.fail_json(msg="Can not use 'state: present' with name: '*'") # Build list of packages that are actually not installed from the ones requested @@ -237,9 +237,9 @@ def main(): if rc != 0: module.fail_json(msg=(err or out)) - elif state in ['latest']: + elif state in ["latest"]: # When using latest for * - if name == ['*']: + if name == ["*"]: # Check for packages that are actually outdated pkgs = packages_not_latest(module, name, site, update_catalog) @@ -266,7 +266,7 @@ def main(): if rc != 0: module.fail_json(msg=(err or out)) - elif state in ['absent', 'removed']: + elif state in ["absent", "removed"]: # Build list of packages requested for removal that are actually present pkgs = packages_installed(module, name) @@ -280,20 +280,20 @@ def main(): if rc is None: # pkgutil was not executed because the package was already present/absent/up to date - result['changed'] = False + result["changed"] = False elif rc == 0: - result['changed'] = True + result["changed"] = True else: - result['changed'] = False - result['failed'] = True + result["changed"] = False + result["failed"] = True if out: - result['stdout'] = out + result["stdout"] = out if err: - result['stderr'] = err + result["stderr"] = err module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py index 72b40bde6cd..b7c14d307c1 100644 --- a/plugins/modules/pmem.py +++ b/plugins/modules/pmem.py @@ -224,63 +224,59 @@ class PersistentMemory: def __init__(self): module = AnsibleModule( argument_spec=dict( - appdirect=dict(type='int'), - appdirect_interleaved=dict(type='bool', default=True), - memorymode=dict(type='int'), - reserved=dict(type='int'), + appdirect=dict(type="int"), + appdirect_interleaved=dict(type="bool", default=True), + memorymode=dict(type="int"), + reserved=dict(type="int"), socket=dict( - type='list', elements='dict', + type="list", + elements="dict", options=dict( - id=dict(required=True, type='int'), - appdirect=dict(required=True, type='int'), - appdirect_interleaved=dict(type='bool', default=True), - memorymode=dict(required=True, type='int'), - reserved=dict(type='int'), + id=dict(required=True, type="int"), + appdirect=dict(required=True, type="int"), + appdirect_interleaved=dict(type="bool", default=True), + memorymode=dict(required=True, type="int"), + reserved=dict(type="int"), ), ), namespace=dict( - type='list', elements='dict', + type="list", + elements="dict", options=dict( - mode=dict(required=True, type='str', choices=['raw', 'sector', 'fsdax', 'devdax']), - type=dict(type='str', choices=['pmem', 'blk']), - size=dict(type='str'), + mode=dict(required=True, type="str", choices=["raw", "sector", "fsdax", "devdax"]), + type=dict(type="str", choices=["pmem", "blk"]), + size=dict(type="str"), ), ), - namespace_append=dict(type='bool', default=False), - ), - required_together=( - ['appdirect', 'memorymode'], - ), - required_one_of=( - ['appdirect', 'memorymode', 'socket', 'namespace'], + namespace_append=dict(type="bool", default=False), ), + required_together=(["appdirect", "memorymode"],), + required_one_of=(["appdirect", "memorymode", "socket", "namespace"],), mutually_exclusive=( - ['appdirect', 'socket'], - ['memorymode', 'socket'], - ['appdirect', 'namespace'], - ['memorymode', 'namespace'], - ['socket', 'namespace'], - ['appdirect', 'namespace_append'], - ['memorymode', 'namespace_append'], - ['socket', 'namespace_append'], + ["appdirect", "socket"], + ["memorymode", "socket"], + ["appdirect", "namespace"], + ["memorymode", "namespace"], + ["socket", "namespace"], + ["appdirect", "namespace_append"], + ["memorymode", "namespace_append"], + ["socket", "namespace_append"], ), ) if not HAS_XMLTODICT_LIBRARY: - module.fail_json( - msg=missing_required_lib('xmltodict'), - exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + module.fail_json(msg=missing_required_lib("xmltodict"), exception=XMLTODICT_LIBRARY_IMPORT_ERROR) - self.ipmctl_exec = module.get_bin_path('ipmctl', True) - self.ndctl_exec = module.get_bin_path('ndctl', True) + self.ipmctl_exec = module.get_bin_path("ipmctl", True) + self.ndctl_exec = module.get_bin_path("ndctl", True) - self.appdirect = module.params['appdirect'] - self.interleaved = module.params['appdirect_interleaved'] - self.memmode = module.params['memorymode'] - self.reserved = module.params['reserved'] - self.socket = module.params['socket'] - self.namespace = module.params['namespace'] - self.namespace_append = module.params['namespace_append'] + self.appdirect = module.params["appdirect"] + self.interleaved = module.params["appdirect_interleaved"] + self.memmode = module.params["memorymode"] + self.reserved = module.params["reserved"] + self.socket = module.params["socket"] + self.namespace = module.params["namespace"] + self.namespace_append = module.params["namespace_append"] self.module = module self.changed = False @@ -290,77 +286,75 @@ def pmem_run_command(self, command, returnCheck=True): # in case command[] has number cmd = [str(part) for part in command] - self.module.log(msg=f'pmem_run_command: execute: {cmd}') + self.module.log(msg=f"pmem_run_command: execute: {cmd}") rc, out, err = self.module.run_command(cmd) - self.module.log(msg=f'pmem_run_command: result: {out}') + self.module.log(msg=f"pmem_run_command: result: {out}") if returnCheck and rc != 0: - self.module.fail_json(msg=f'Error while running: {cmd}', rc=rc, out=out, err=err) + self.module.fail_json(msg=f"Error while running: {cmd}", rc=rc, out=out, err=err) return out def pmem_run_ipmctl(self, command, returnCheck=True): - command = [self.ipmctl_exec] + command return self.pmem_run_command(command, returnCheck) def pmem_run_ndctl(self, command, returnCheck=True): - command = [self.ndctl_exec] + command return self.pmem_run_command(command, returnCheck) def pmem_is_dcpmm_installed(self): # To check this system has dcpmm - command = ['show', '-system', '-capabilities'] + command = ["show", "-system", "-capabilities"] return self.pmem_run_ipmctl(command) def pmem_get_region_align_size(self, region): aligns = [] for rg in region: - if rg['align'] not in aligns: - aligns.append(rg['align']) + if rg["align"] not in aligns: + aligns.append(rg["align"]) return aligns def pmem_get_available_region_size(self, region): available_size = [] for rg in region: - available_size.append(rg['available_size']) + available_size.append(rg["available_size"]) return available_size def pmem_get_available_region_type(self, region): types = [] for rg in region: - if rg['type'] not in types: - types.append(rg['type']) + if rg["type"] not in types: + types.append(rg["type"]) return types def pmem_argument_check(self): def namespace_check(self): - command = ['list', '-R'] + command = ["list", "-R"] out = self.pmem_run_ndctl(command) if not out: - return 'Available region(s) is not in this system.' + return "Available region(s) is not in this system." region = json.loads(out) aligns = self.pmem_get_region_align_size(region) if len(aligns) != 1: - return 'Not supported the regions whose alignment size is different.' + return "Not supported the regions whose alignment size is different." available_size = self.pmem_get_available_region_size(region) types = self.pmem_get_available_region_type(region) for ns in self.namespace: - if ns['size']: + if ns["size"]: try: - size_byte = human_to_bytes(ns['size']) + size_byte = human_to_bytes(ns["size"]) except ValueError: - return 'The format of size: NNN TB|GB|MB|KB|T|G|M|K|B' + return "The format of size: NNN TB|GB|MB|KB|T|G|M|K|B" if size_byte % aligns[0] != 0: return f"size: {ns['size']} should be align with {aligns[0]}" @@ -375,41 +369,41 @@ def namespace_check(self): if is_space_enough is False: return f"There is not available region for size: {ns['size']}" - ns['size_byte'] = size_byte + ns["size_byte"] = size_byte elif len(self.namespace) != 1: - return 'size option is required to configure multiple namespaces' + return "size option is required to configure multiple namespaces" - if ns['type'] not in types: + if ns["type"] not in types: return f"type {ns['type']} is not supported in this system. Supported type: {types}" return None def percent_check(self, appdirect, memmode, reserved=None): if appdirect is None or (appdirect < 0 or appdirect > 100): - return 'appdirect percent should be from 0 to 100.' + return "appdirect percent should be from 0 to 100." if memmode is None or (memmode < 0 or memmode > 100): - return 'memorymode percent should be from 0 to 100.' + return "memorymode percent should be from 0 to 100." if reserved is None: if appdirect + memmode > 100: - return 'Total percent should be less equal 100.' + return "Total percent should be less equal 100." else: if reserved < 0 or reserved > 100: - return 'reserved percent should be from 0 to 100.' + return "reserved percent should be from 0 to 100." if appdirect + memmode + reserved != 100: - return 'Total percent should be 100.' + return "Total percent should be 100." def socket_id_check(self): - command = ['show', '-o', 'nvmxml', '-socket'] + command = ["show", "-o", "nvmxml", "-socket"] out = self.pmem_run_ipmctl(command) - sockets_dict = xmltodict.parse(out, dict_constructor=dict)['SocketList']['Socket'] + sockets_dict = xmltodict.parse(out, dict_constructor=dict)["SocketList"]["Socket"] socket_ids = [] for sl in sockets_dict: - socket_ids.append(int(sl['SocketID'], 16)) + socket_ids.append(int(sl["SocketID"], 16)) for skt in self.socket: - if skt['id'] not in socket_ids: + if skt["id"] not in socket_ids: return f"Invalid socket number: {skt['id']}" return None @@ -424,15 +418,14 @@ def socket_id_check(self): return ret for skt in self.socket: - ret = percent_check( - self, skt['appdirect'], skt['memorymode'], skt['reserved']) + ret = percent_check(self, skt["appdirect"], skt["memorymode"], skt["reserved"]) if ret is not None: return ret return None def pmem_remove_namespaces(self): - command = ['list', '-N'] + command = ["list", "-N"] out = self.pmem_run_ndctl(command) # There's nothing namespaces in this system. Nothing to do. @@ -443,17 +436,17 @@ def pmem_remove_namespaces(self): # Disable and destroy all namespaces for ns in namespaces: - command = ['disable-namespace', ns['dev']] + command = ["disable-namespace", ns["dev"]] self.pmem_run_ndctl(command) - command = ['destroy-namespace', ns['dev']] + command = ["destroy-namespace", ns["dev"]] self.pmem_run_ndctl(command) return def pmem_delete_goal(self): # delete the goal request - command = ['delete', '-goal'] + command = ["delete", "-goal"] self.pmem_run_ipmctl(command) def pmem_init_env(self): @@ -463,16 +456,16 @@ def pmem_init_env(self): self.pmem_delete_goal() def pmem_get_capacity(self, skt=None): - command = ['show', '-d', 'Capacity', '-u', 'B', '-o', 'nvmxml', '-dimm'] + command = ["show", "-d", "Capacity", "-u", "B", "-o", "nvmxml", "-dimm"] if skt: - command += ['-socket', skt['id']] + command += ["-socket", skt["id"]] out = self.pmem_run_ipmctl(command) - dimm_list = xmltodict.parse(out, dict_constructor=dict)['DimmList']['Dimm'] + dimm_list = xmltodict.parse(out, dict_constructor=dict)["DimmList"]["Dimm"] capacity = 0 for entry in dimm_list: for key, v in entry.items(): - if key == 'Capacity': + if key == "Capacity": capacity += int(v.split()[0]) return capacity @@ -482,11 +475,11 @@ def build_ipmctl_creation_opts(self, skt=None): ipmctl_opts = [] if skt: - appdirect = skt['appdirect'] - memmode = skt['memorymode'] - reserved = skt['reserved'] - socket_id = skt['id'] - ipmctl_opts += ['-socket', socket_id] + appdirect = skt["appdirect"] + memmode = skt["memorymode"] + reserved = skt["reserved"] + socket_id = skt["id"] + ipmctl_opts += ["-socket", socket_id] else: appdirect = self.appdirect memmode = self.memmode @@ -494,61 +487,59 @@ def build_ipmctl_creation_opts(self, skt=None): if reserved is None: res = 100 - memmode - appdirect - ipmctl_opts += [f'memorymode={memmode}', f'reserved={res}'] + ipmctl_opts += [f"memorymode={memmode}", f"reserved={res}"] else: - ipmctl_opts += [f'memorymode={memmode}', f'reserved={reserved}'] + ipmctl_opts += [f"memorymode={memmode}", f"reserved={reserved}"] if self.interleaved: - ipmctl_opts += ['PersistentMemoryType=AppDirect'] + ipmctl_opts += ["PersistentMemoryType=AppDirect"] else: - ipmctl_opts += ['PersistentMemoryType=AppDirectNotInterleaved'] + ipmctl_opts += ["PersistentMemoryType=AppDirectNotInterleaved"] return ipmctl_opts def is_allocation_good(self, ipmctl_out, command): - warning = re.compile('WARNING') - error = re.compile('.*Error.*') - ignore_error = re.compile( - 'Do you want to continue? [y/n] Error: Invalid data input.') + warning = re.compile("WARNING") + error = re.compile(".*Error.*") + ignore_error = re.compile("Do you want to continue? [y/n] Error: Invalid data input.") - errmsg = '' + errmsg = "" rc = True for line in ipmctl_out.splitlines(): if warning.match(line): - errmsg = f'{line} (command: {command})' + errmsg = f"{line} (command: {command})" rc = False break elif error.match(line): if not ignore_error: - errmsg = f'{line} (command: {command})' + errmsg = f"{line} (command: {command})" rc = False break return rc, errmsg def get_allocation_result(self, goal, skt=None): - ret = {'appdirect': 0, 'memorymode': 0} + ret = {"appdirect": 0, "memorymode": 0} if skt: - ret['socket'] = skt['id'] + ret["socket"] = skt["id"] - out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal'] + out = xmltodict.parse(goal, dict_constructor=dict)["ConfigGoalList"]["ConfigGoal"] for entry in out: - # Probably it is a bug of ipmctl to show the socket goal # which isn't specified by the -socket option. # Anyway, filter the noise out here: - if skt and skt['id'] != int(entry['SocketID'], 16): + if skt and skt["id"] != int(entry["SocketID"], 16): continue for key, v in entry.items(): - if key == 'MemorySize': - ret['memorymode'] += int(v.split()[0]) - elif key == 'AppDirect1Size' or key == 'AapDirect2Size': - ret['appdirect'] += int(v.split()[0]) + if key == "MemorySize": + ret["memorymode"] += int(v.split()[0]) + elif key == "AppDirect1Size" or key == "AapDirect2Size": + ret["appdirect"] += int(v.split()[0]) capacity = self.pmem_get_capacity(skt) - ret['reserved'] = capacity - ret['appdirect'] - ret['memorymode'] + ret["reserved"] = capacity - ret["appdirect"] - ret["memorymode"] return ret @@ -557,26 +548,26 @@ def get_allocation_result(self, goal, skt=None): ipmctl_opts = build_ipmctl_creation_opts(self, skt) # First, do dry run ipmctl create command to check the error and warning. - command = ['create', '-goal'] + ipmctl_opts + command = ["create", "-goal"] + ipmctl_opts out = self.pmem_run_ipmctl(command, returnCheck=False) rc, errmsg = is_allocation_good(self, out, command) if rc is False: return reboot_required, {}, errmsg # Run actual creation here - command = ['create', '-u', 'B', '-o', 'nvmxml', '-force', '-goal'] + ipmctl_opts + command = ["create", "-u", "B", "-o", "nvmxml", "-force", "-goal"] + ipmctl_opts goal = self.pmem_run_ipmctl(command) ret = get_allocation_result(self, goal, skt) reboot_required = True - return reboot_required, ret, '' + return reboot_required, ret, "" def pmem_config_namespaces(self, namespace): - command = ['create-namespace', '-m', namespace['mode']] - if namespace['type']: - command += ['-t', namespace['type']] - if 'size_byte' in namespace: - command += ['-s', namespace['size_byte']] + command = ["create-namespace", "-m", namespace["mode"]] + if namespace["type"]: + command += ["-t", namespace["type"]] + if "size_byte" in namespace: + command += ["-s", namespace["size_byte"]] self.pmem_run_ndctl(command) @@ -584,7 +575,6 @@ def pmem_config_namespaces(self, namespace): def main(): - pmem = PersistentMemory() pmem.pmem_is_dcpmm_installed() @@ -600,7 +590,7 @@ def main(): for ns in pmem.namespace: pmem.pmem_config_namespaces(ns) - command = ['list', '-N'] + command = ["list", "-N"] out = pmem.pmem_run_ndctl(command) all_ns = json.loads(out) @@ -623,12 +613,8 @@ def main(): pmem.result.append(skt_ret) - pmem.module.exit_json( - changed=pmem.changed, - reboot_required=reboot_required, - result=pmem.result - ) + pmem.module.exit_json(changed=pmem.changed, reboot_required=reboot_required, result=pmem.result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py index 8b5611299e9..262c33a2ee1 100644 --- a/plugins/modules/pnpm.py +++ b/plugins/modules/pnpm.py @@ -217,12 +217,8 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): if not os.path.isdir(self.path): self.module.fail_json(msg=f"Path {self.path} is not a directory") - if not self.alias_name_ver and not os.path.isfile( - os.path.join(self.path, "package.json") - ): - self.module.fail_json( - msg="package.json does not exist in provided path" - ) + if not self.alias_name_ver and not os.path.isfile(os.path.join(self.path, "package.json")): + self.module.fail_json(msg="package.json does not exist in provided path") cwd = self.path @@ -247,9 +243,7 @@ def missing(self): data = json.loads(out) except Exception as e: - self.module.fail_json( - msg=f"Failed to parse pnpm output with error {e}" - ) + self.module.fail_json(msg=f"Failed to parse pnpm output with error {e}") if "error" in data: return True @@ -324,9 +318,7 @@ def list_outdated(self): data = json.loads(out) except Exception as e: - self.module.fail_json( - msg=f"Failed to parse pnpm output with error {e}" - ) + self.module.fail_json(msg=f"Failed to parse pnpm output with error {e}") return data.keys() @@ -377,17 +369,13 @@ def main(): module.fail_json(msg="Cannot specify path when doing global installation") if globally and (production or dev or optional): - module.fail_json( - msg="Options production, dev, and optional is meaningless when installing packages globally" - ) + module.fail_json(msg="Options production, dev, and optional is meaningless when installing packages globally") if name is not None and path is not None and globally: module.fail_json(msg="path should not be mentioned when installing globally") if production and dev and optional: - module.fail_json( - msg="Options production and dev and optional don't go together" - ) + module.fail_json(msg="Options production and dev and optional don't go together") if production and dev: module.fail_json(msg="Options production and dev don't go together") @@ -402,9 +390,7 @@ def main(): module.fail_json(msg="Semver not supported on remote url downloads") if name is None and optional: - module.fail_json( - msg="Optional not available when package name not provided, use no_optional instead" - ) + module.fail_json(msg="Optional not available when package name not provided, use no_optional instead") if state == "absent" and name is None: module.fail_json(msg="Package name is required for uninstalling") diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py index b9653801261..13e76fbeb20 100644 --- a/plugins/modules/portage.py +++ b/plugins/modules/portage.py @@ -258,6 +258,7 @@ try: from portage.dbapi import vartree from portage.exception import InvalidAtom + HAS_PORTAGE = True PORTAGE_IMPORT_ERROR = None except ImportError: @@ -266,7 +267,7 @@ def query_package(module, package, action): - if package.startswith('@'): + if package.startswith("@"): return query_set(module, package, action) return query_atom(module, package, action) @@ -282,26 +283,26 @@ def query_atom(module, atom, action): def query_set(module, set_, action): system_sets = [ - '@live-rebuild', - '@module-rebuild', - '@preserved-rebuild', - '@security', - '@selected', - '@system', - '@world', - '@x11-module-rebuild', + "@live-rebuild", + "@module-rebuild", + "@preserved-rebuild", + "@security", + "@selected", + "@system", + "@world", + "@x11-module-rebuild", ] if set_ in system_sets: - if action == 'unmerge': - module.fail_json(msg=f'set {set_} cannot be removed') + if action == "unmerge": + module.fail_json(msg=f"set {set_} cannot be removed") return False - world_sets_path = '/var/lib/portage/world_sets' + world_sets_path = "/var/lib/portage/world_sets" if not os.path.exists(world_sets_path): return False - cmd = ['grep', set_, world_sets_path] + cmd = ["grep", set_, world_sets_path] rc, out, err = module.run_command(cmd) return rc == 0 @@ -309,17 +310,17 @@ def query_set(module, set_, action): def sync_repositories(module, webrsync=False): if module.check_mode: - module.exit_json(msg='check mode not supported by sync') + module.exit_json(msg="check mode not supported by sync") if webrsync: - webrsync_path = module.get_bin_path('emerge-webrsync', required=True) - cmd = [webrsync_path, '--quiet'] + webrsync_path = module.get_bin_path("emerge-webrsync", required=True) + cmd = [webrsync_path, "--quiet"] else: - cmd = [module.emerge_path, '--sync', '--quiet', '--ask=n'] + cmd = [module.emerge_path, "--sync", "--quiet", "--ask=n"] rc, out, err = module.run_command(cmd) if rc != 0: - module.fail_json(msg='could not sync package repositories') + module.fail_json(msg="could not sync package repositories") # Note: In the 3 functions below, package querying is done one-by-one, @@ -332,48 +333,53 @@ def emerge_packages(module, packages): """Run emerge command against given list of atoms.""" p = module.params - if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'): + if p["noreplace"] and not p["changed_use"] and not p["newuse"] and not (p["update"] or p["state"] == "latest"): for package in packages: - if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'): + if ( + p["noreplace"] + and not p["changed_use"] + and not p["newuse"] + and not query_package(module, package, "emerge") + ): break else: - module.exit_json(changed=False, msg='Packages already present.') + module.exit_json(changed=False, msg="Packages already present.") if module.check_mode: - module.exit_json(changed=True, msg='Packages would be installed.') + module.exit_json(changed=True, msg="Packages would be installed.") args = [] emerge_flags = { - 'update': '--update', - 'deep': '--deep', - 'newuse': '--newuse', - 'changed_use': '--changed-use', - 'oneshot': '--oneshot', - 'noreplace': '--noreplace', - 'nodeps': '--nodeps', - 'onlydeps': '--onlydeps', - 'quiet': '--quiet', - 'verbose': '--verbose', - 'getbinpkgonly': '--getbinpkgonly', - 'getbinpkg': '--getbinpkg', - 'usepkgonly': '--usepkgonly', - 'usepkg': '--usepkg', - 'keepgoing': '--keep-going', - 'quietbuild': '--quiet-build', - 'quietfail': '--quiet-fail', + "update": "--update", + "deep": "--deep", + "newuse": "--newuse", + "changed_use": "--changed-use", + "oneshot": "--oneshot", + "noreplace": "--noreplace", + "nodeps": "--nodeps", + "onlydeps": "--onlydeps", + "quiet": "--quiet", + "verbose": "--verbose", + "getbinpkgonly": "--getbinpkgonly", + "getbinpkg": "--getbinpkg", + "usepkgonly": "--usepkgonly", + "usepkg": "--usepkg", + "keepgoing": "--keep-going", + "quietbuild": "--quiet-build", + "quietfail": "--quiet-fail", } for flag, arg in emerge_flags.items(): if p[flag]: args.append(arg) - if p['state'] and p['state'] == 'latest': + if p["state"] and p["state"] == "latest": args.append("--update") emerge_flags = { - 'jobs': '--jobs', - 'loadavg': '--load-average', - 'backtrack': '--backtrack', - 'withbdeps': '--with-bdeps', - 'select': '--select', + "jobs": "--jobs", + "loadavg": "--load-average", + "backtrack": "--backtrack", + "withbdeps": "--with-bdeps", + "select": "--select", } for flag, arg in emerge_flags.items(): @@ -385,7 +391,7 @@ def emerge_packages(module, packages): """Add the --flag=value pair.""" if isinstance(flag_val, bool): - args.extend((arg, to_native('y' if flag_val else 'n'))) + args.extend((arg, to_native("y" if flag_val else "n"))) elif not flag_val: """If the value is 0 or 0.0: add the flag, but not the value.""" args.append(arg) @@ -395,34 +401,42 @@ def emerge_packages(module, packages): cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not installed.', + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, + msg="Packages not installed.", ) # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite # this error - if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \ - and 'Permission denied (publickey).' in err: + if (p["usepkgonly"] or p["getbinpkg"] or p["getbinpkgonly"]) and "Permission denied (publickey)." in err: module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Please check your PORTAGE_BINHOST configuration in make.conf ' - 'and your SSH authorized_keys file', + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, + msg="Please check your PORTAGE_BINHOST configuration in make.conf and your SSH authorized_keys file", ) changed = True for line in out.splitlines(): - if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): - msg = 'Packages installed.' + if re.match(r"(?:>+) Emerging (?:binary )?\(1 of", line): + msg = "Packages installed." break - elif module.check_mode and re.match(r'\[(binary|ebuild)', line): - msg = 'Packages would be installed.' + elif module.check_mode and re.match(r"\[(binary|ebuild)", line): + msg = "Packages would be installed." break else: changed = False - msg = 'No packages installed.' + msg = "No packages installed." module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + changed=changed, + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, msg=msg, ) @@ -431,28 +445,35 @@ def unmerge_packages(module, packages): p = module.params for package in packages: - if query_package(module, package, 'unmerge'): + if query_package(module, package, "unmerge"): break else: - module.exit_json(changed=False, msg='Packages already absent.') + module.exit_json(changed=False, msg="Packages already absent.") - args = ['--unmerge'] + args = ["--unmerge"] - for flag in ['quiet', 'verbose']: + for flag in ["quiet", "verbose"]: if p[flag]: - args.append(f'--{flag}') + args.append(f"--{flag}") cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not removed.', + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, + msg="Packages not removed.", ) module.exit_json( - changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages removed.', + changed=True, + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, + msg="Packages removed.", ) @@ -461,16 +482,16 @@ def cleanup_packages(module, packages): if packages: for package in packages: - if query_package(module, package, 'unmerge'): + if query_package(module, package, "unmerge"): break else: - module.exit_json(changed=False, msg='Packages already absent.') + module.exit_json(changed=False, msg="Packages already absent.") - args = ['--depclean'] + args = ["--depclean"] - for flag in ['quiet', 'verbose']: + for flag in ["quiet", "verbose"]: if p[flag]: - args.append(f'--{flag}') + args.append(f"--{flag}") cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: @@ -478,111 +499,114 @@ def cleanup_packages(module, packages): removed = 0 for line in out.splitlines(): - if not line.startswith('Number removed:'): + if not line.startswith("Number removed:"): continue - parts = line.split(':') + parts = line.split(":") removed = int(parts[1].strip()) changed = removed > 0 module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Depclean completed.', + changed=changed, + cmd=cmd, + rc=rc, + stdout=out, + stderr=err, + msg="Depclean completed.", ) def run_emerge(module, packages, *args): args = list(args) - args.append('--ask=n') + args.append("--ask=n") if module.check_mode: - args.append('--pretend') + args.append("--pretend") cmd = [module.emerge_path] + args + packages return cmd, module.run_command(cmd) -portage_present_states = ['present', 'emerged', 'installed', 'latest'] -portage_absent_states = ['absent', 'unmerged', 'removed'] +portage_present_states = ["present", "emerged", "installed", "latest"] +portage_absent_states = ["absent", "unmerged", "removed"] def main(): module = AnsibleModule( argument_spec=dict( - package=dict(type='list', elements='str', aliases=['name']), + package=dict(type="list", elements="str", aliases=["name"]), state=dict( default=portage_present_states[0], choices=portage_present_states + portage_absent_states, ), - update=dict(default=False, type='bool'), - backtrack=dict(type='int'), - deep=dict(default=False, type='bool'), - newuse=dict(default=False, type='bool'), - changed_use=dict(default=False, type='bool'), - oneshot=dict(default=False, type='bool'), - noreplace=dict(default=True, type='bool'), - nodeps=dict(default=False, type='bool'), - onlydeps=dict(default=False, type='bool'), - depclean=dict(default=False, type='bool'), - select=dict(type='bool'), - quiet=dict(default=False, type='bool'), - verbose=dict(default=False, type='bool'), - sync=dict(choices=['yes', 'web', 'no']), - getbinpkgonly=dict(default=False, type='bool'), - getbinpkg=dict(default=False, type='bool'), - usepkgonly=dict(default=False, type='bool'), - usepkg=dict(default=False, type='bool'), - keepgoing=dict(default=False, type='bool'), - jobs=dict(type='int'), - loadavg=dict(type='float'), - withbdeps=dict(type='bool'), - quietbuild=dict(default=False, type='bool'), - quietfail=dict(default=False, type='bool'), + update=dict(default=False, type="bool"), + backtrack=dict(type="int"), + deep=dict(default=False, type="bool"), + newuse=dict(default=False, type="bool"), + changed_use=dict(default=False, type="bool"), + oneshot=dict(default=False, type="bool"), + noreplace=dict(default=True, type="bool"), + nodeps=dict(default=False, type="bool"), + onlydeps=dict(default=False, type="bool"), + depclean=dict(default=False, type="bool"), + select=dict(type="bool"), + quiet=dict(default=False, type="bool"), + verbose=dict(default=False, type="bool"), + sync=dict(choices=["yes", "web", "no"]), + getbinpkgonly=dict(default=False, type="bool"), + getbinpkg=dict(default=False, type="bool"), + usepkgonly=dict(default=False, type="bool"), + usepkg=dict(default=False, type="bool"), + keepgoing=dict(default=False, type="bool"), + jobs=dict(type="int"), + loadavg=dict(type="float"), + withbdeps=dict(type="bool"), + quietbuild=dict(default=False, type="bool"), + quietfail=dict(default=False, type="bool"), ), - required_one_of=[['package', 'sync', 'depclean']], + required_one_of=[["package", "sync", "depclean"]], mutually_exclusive=[ - ['nodeps', 'onlydeps'], - ['quiet', 'verbose'], - ['quietbuild', 'verbose'], - ['quietfail', 'verbose'], - ['oneshot', 'select'], + ["nodeps", "onlydeps"], + ["quiet", "verbose"], + ["quietbuild", "verbose"], + ["quietfail", "verbose"], + ["oneshot", "select"], ], supports_check_mode=True, ) if not HAS_PORTAGE: - if sys.executable != '/usr/bin/python' and not has_respawned(): - respawn_module('/usr/bin/python') + if sys.executable != "/usr/bin/python" and not has_respawned(): + respawn_module("/usr/bin/python") else: - module.fail_json(msg=missing_required_lib('portage'), - exception=PORTAGE_IMPORT_ERROR) + module.fail_json(msg=missing_required_lib("portage"), exception=PORTAGE_IMPORT_ERROR) - module.emerge_path = module.get_bin_path('emerge', required=True) + module.emerge_path = module.get_bin_path("emerge", required=True) p = module.params - if p['sync'] and p['sync'].strip() != 'no': - sync_repositories(module, webrsync=(p['sync'] == 'web')) - if not p['package']: - module.exit_json(msg='Sync successfully finished.') + if p["sync"] and p["sync"].strip() != "no": + sync_repositories(module, webrsync=(p["sync"] == "web")) + if not p["package"]: + module.exit_json(msg="Sync successfully finished.") packages = [] - if p['package']: - packages.extend(p['package']) + if p["package"]: + packages.extend(p["package"]) - if p['depclean']: - if packages and p['state'] not in portage_absent_states: + if p["depclean"]: + if packages and p["state"] not in portage_absent_states: module.fail_json( - msg=f'Depclean can only be used with package when the state is one of: {portage_absent_states}', + msg=f"Depclean can only be used with package when the state is one of: {portage_absent_states}", ) cleanup_packages(module, packages) - elif p['state'] in portage_present_states: + elif p["state"] in portage_present_states: emerge_packages(module, packages) - elif p['state'] in portage_absent_states: + elif p["state"] in portage_absent_states: unmerge_packages(module, packages) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/portinstall.py b/plugins/modules/portinstall.py index 3f13d3b66c4..e05005c744c 100644 --- a/plugins/modules/portinstall.py +++ b/plugins/modules/portinstall.py @@ -69,19 +69,18 @@ def query_package(module, name): - - pkg_info_path = module.get_bin_path('pkg_info', False) + pkg_info_path = module.get_bin_path("pkg_info", False) # Assume that if we have pkg_info, we haven't upgraded to pkgng if pkg_info_path: pkgng = False - pkg_glob_path = module.get_bin_path('pkg_glob', True) + pkg_glob_path = module.get_bin_path("pkg_glob", True) # TODO: convert run_comand() argument to list! rc, out, err = module.run_command(f"{pkg_info_path} -e `pkg_glob {shlex_quote(name)}`", use_unsafe_shell=True) pkg_info_path = [pkg_info_path] else: pkgng = True - pkg_info_path = [module.get_bin_path('pkg', True), "info"] + pkg_info_path = [module.get_bin_path("pkg", True), "info"] rc, out, err = module.run_command(pkg_info_path + [name]) found = rc == 0 @@ -90,7 +89,7 @@ def query_package(module, name): # databases/mysql55-client installs as mysql-client, so try solving # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking # some package is installed - name_without_digits = re.sub('[0-9]', '', name) + name_without_digits = re.sub("[0-9]", "", name) if name != name_without_digits: rc, out, err = module.run_command(pkg_info_path + [name_without_digits]) @@ -100,28 +99,26 @@ def query_package(module, name): def matching_packages(module, name): - - ports_glob_path = module.get_bin_path('ports_glob', True) + ports_glob_path = module.get_bin_path("ports_glob", True) rc, out, err = module.run_command([ports_glob_path, name]) # counts the number of packages found - occurrences = out.count('\n') + occurrences = out.count("\n") if occurrences == 0: - name_without_digits = re.sub('[0-9]', '', name) + name_without_digits = re.sub("[0-9]", "", name) if name != name_without_digits: rc, out, err = module.run_command([ports_glob_path, name_without_digits]) - occurrences = out.count('\n') + occurrences = out.count("\n") return occurrences def remove_packages(module, packages): - remove_c = 0 - pkg_glob_path = module.get_bin_path('pkg_glob', True) + pkg_glob_path = module.get_bin_path("pkg_glob", True) # If pkg_delete not found, we assume pkgng - pkg_delete_path = module.get_bin_path('pkg_delete', False) + pkg_delete_path = module.get_bin_path("pkg_delete", False) if not pkg_delete_path: - pkg_delete_path = module.get_bin_path('pkg', True) + pkg_delete_path = module.get_bin_path("pkg", True) pkg_delete_path = f"{pkg_delete_path} delete -y" # Using a for loop in case of error, we can report the package that failed @@ -131,36 +128,37 @@ def remove_packages(module, packages): continue # TODO: convert run_comand() argument to list! - rc, out, err = module.run_command(f"{pkg_delete_path} `{pkg_glob_path} {shlex_quote(package)}`", use_unsafe_shell=True) + rc, out, err = module.run_command( + f"{pkg_delete_path} `{pkg_glob_path} {shlex_quote(package)}`", use_unsafe_shell=True + ) if query_package(module, package): - name_without_digits = re.sub('[0-9]', '', package) + name_without_digits = re.sub("[0-9]", "", package) # TODO: convert run_comand() argument to list! - rc, out, err = module.run_command(f"{pkg_delete_path} `{pkg_glob_path} {shlex_quote(name_without_digits)}`", - use_unsafe_shell=True) + rc, out, err = module.run_command( + f"{pkg_delete_path} `{pkg_glob_path} {shlex_quote(name_without_digits)}`", use_unsafe_shell=True + ) if query_package(module, package): module.fail_json(msg=f"failed to remove {package}: {out}") remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg=f"removed {remove_c} package(s)") module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, packages, use_packages): - install_c = 0 # If portinstall not found, automagically install - portinstall_path = module.get_bin_path('portinstall', False) + portinstall_path = module.get_bin_path("portinstall", False) if not portinstall_path: - pkg_path = module.get_bin_path('pkg', False) + pkg_path = module.get_bin_path("pkg", False) if pkg_path: module.run_command([pkg_path, "install", "-y", "portupgrade"]) - portinstall_path = module.get_bin_path('portinstall', True) + portinstall_path = module.get_bin_path("portinstall", True) if use_packages: portinstall_params = ["--use-packages"] @@ -195,7 +193,9 @@ def main(): argument_spec=dict( state=dict(default="present", choices=["present", "absent"]), name=dict(aliases=["pkg"], required=True), - use_packages=dict(type='bool', default=True))) + use_packages=dict(type="bool", default=True), + ) + ) p = module.params @@ -208,5 +208,5 @@ def main(): remove_packages(module, pkgs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py index 9d2c4dbc9d9..616b62834ec 100644 --- a/plugins/modules/pritunl_org_info.py +++ b/plugins/modules/pritunl_org_info.py @@ -109,11 +109,7 @@ def get_pritunl_organizations(module): def main(): argument_spec = pritunl_argument_spec() - argument_spec.update( - dict( - organization=dict(type="str", aliases=["org"]) - ) - ) + argument_spec.update(dict(organization=dict(type="str", aliases=["org"]))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py index f25c1042b2b..79080043233 100644 --- a/plugins/modules/pritunl_user.py +++ b/plugins/modules/pritunl_user.py @@ -184,9 +184,7 @@ def add_or_update_pritunl_user(module): ) if len(org_obj_list) == 0: - module.fail_json( - msg=f"Can not add user to organization '{org_name}' which does not exist" - ) + module.fail_json(msg=f"Can not add user to organization '{org_name}' which does not exist") org_id = org_obj_list[0]["id"] @@ -274,9 +272,7 @@ def remove_pritunl_user(module): ) if len(org_obj_list) == 0: - module.fail_json( - msg=f"Can not remove user '{user_name}' from a non existing organization '{org_name}'" - ) + module.fail_json(msg=f"Can not remove user '{user_name}' from a non existing organization '{org_name}'") org_id = org_obj_list[0]["id"] diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py index 83445c60b12..a89886a0415 100644 --- a/plugins/modules/pritunl_user_info.py +++ b/plugins/modules/pritunl_user_info.py @@ -115,9 +115,7 @@ def get_pritunl_user(module): ) if len(org_obj_list) == 0: - module.fail_json( - msg=f"Can not list users from the organization '{org_name}' which does not exist" - ) + module.fail_json(msg=f"Can not list users from the organization '{org_name}' which does not exist") org_id = org_obj_list[0]["id"] @@ -126,11 +124,7 @@ def get_pritunl_user(module): get_pritunl_settings(module), { "organization_id": org_id, - "filters": ( - {"type": user_type} - if user_name is None - else {"name": user_name, "type": user_type} - ), + "filters": ({"type": user_type} if user_name is None else {"name": user_name, "type": user_type}), }, ) ) diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py index 9171be63fcf..8b669834d56 100644 --- a/plugins/modules/pubnub_blocks.py +++ b/plugins/modules/pubnub_blocks.py @@ -219,6 +219,7 @@ from pubnub_blocks_client import User from pubnub_blocks_client import Block, EventHandler from pubnub_blocks_client import exceptions + HAS_PUBNUB_BLOCKS_CLIENT = True except ImportError: HAS_PUBNUB_BLOCKS_CLIENT = False @@ -249,18 +250,19 @@ def pubnub_user(module): user = None params = module.params - if params.get('cache') and params['cache'].get('module_cache'): - cache = params['cache']['module_cache'] + if params.get("cache") and params["cache"].get("module_cache"): + cache = params["cache"]["module_cache"] user = User() - user.restore(cache=copy.deepcopy(cache['pnm_user'])) - elif params.get('email') and params.get('password'): - user = User(email=params.get('email'), password=params.get('password')) + user.restore(cache=copy.deepcopy(cache["pnm_user"])) + elif params.get("email") and params.get("password"): + user = User(email=params.get("email"), password=params.get("password")) else: - err_msg = 'It looks like not account credentials has been passed or ' \ - '\'cache\' field doesn\'t have result of previous module ' \ - 'call.' - module.fail_json(msg='Missing account credentials.', - description=err_msg, changed=False) + err_msg = ( + "It looks like not account credentials has been passed or " + "'cache' field doesn't have result of previous module " + "call." + ) + module.fail_json(msg="Missing account credentials.", description=err_msg, changed=False) return user @@ -280,16 +282,16 @@ def pubnub_account(module, user): case if not all required information has been passed to block. """ params = module.params - if params.get('account'): - account_name = params.get('account') - account = user.account(name=params.get('account')) + if params.get("account"): + account_name = params.get("account") + account = user.account(name=params.get("account")) if account is None: - err_frmt = 'It looks like there is no \'{0}\' account for ' \ - 'authorized user. Please make sure what correct ' \ - 'name has been passed during module configuration.' - module.fail_json(msg='Missing account.', - description=err_frmt.format(account_name), - changed=False) + err_frmt = ( + "It looks like there is no '{0}' account for " + "authorized user. Please make sure what correct " + "name has been passed during module configuration." + ) + module.fail_json(msg="Missing account.", description=err_frmt.format(account_name), changed=False) else: account = user.accounts()[0] @@ -314,21 +316,22 @@ def pubnub_application(module, account): application = None params = module.params try: - application = account.application(params['application']) + application = account.application(params["application"]) except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=dict(account)) + exc_descr = exc.message if hasattr(exc, "message") else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, changed=account.changed, module_cache=dict(account)) if application is None: - err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ - 'correct application name has been passed. If application ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' + err_fmt = ( + "There is no '{0}' application for {1}. Make sure what " + "correct application name has been passed. If application " + "doesn't exist you can create it on admin.pubnub.com." + ) email = account.owner.email - module.fail_json(msg=err_fmt.format(params['application'], email), - changed=account.changed, module_cache=dict(account)) + module.fail_json( + msg=err_fmt.format(params["application"], email), changed=account.changed, module_cache=dict(account) + ) return application @@ -352,14 +355,16 @@ def pubnub_keyset(module, account, application): :return: Reference on initialized and ready to use keyset model. """ params = module.params - keyset = application.keyset(params['keyset']) + keyset = application.keyset(params["keyset"]) if keyset is None: - err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ - 'sure what correct keyset name has been passed. If keyset ' \ - 'doesn\'t exist you can create it on admin.pubnub.com.' - module.fail_json(msg=err_fmt.format(params['keyset'], - application.name), - changed=account.changed, module_cache=dict(account)) + err_fmt = ( + "There is no '{0}' keyset for '{1}' application. Make " + "sure what correct keyset name has been passed. If keyset " + "doesn't exist you can create it on admin.pubnub.com." + ) + module.fail_json( + msg=err_fmt.format(params["keyset"], application.name), changed=account.changed, module_cache=dict(account) + ) return keyset @@ -385,30 +390,30 @@ def pubnub_block(module, account, keyset): block = None params = module.params try: - block = keyset.block(params['name']) + block = keyset.block(params["name"]) except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, module_cache=dict(account)) + exc_descr = exc.message if hasattr(exc, "message") else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, changed=account.changed, module_cache=dict(account)) # Report error because block doesn't exists and at the same time # requested to start/stop. - if block is None and params['state'] in ['started', 'stopped']: - block_name = params.get('name') - module.fail_json(msg=f"'{block_name}' block doesn't exists.", changed=account.changed, module_cache=dict(account)) - - if block is None and params['state'] == 'present': - block = Block(name=params.get('name'), - description=params.get('description')) + if block is None and params["state"] in ["started", "stopped"]: + block_name = params.get("name") + module.fail_json( + msg=f"'{block_name}' block doesn't exists.", changed=account.changed, module_cache=dict(account) + ) + + if block is None and params["state"] == "present": + block = Block(name=params.get("name"), description=params.get("description")) keyset.add_block(block) if block: # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] - if params.get('description'): - block.description = params.get('description') + if params.get("changes") and params["changes"].get("name"): + block.name = params["changes"]["name"] + if params.get("description"): + block.description = params.get("description") return block @@ -428,25 +433,23 @@ def pubnub_event_handler(block, data): 'None' will be returned in case if there is no handler with specified name and no request to create it. """ - event_handler = block.event_handler(data['name']) + event_handler = block.event_handler(data["name"]) # Prepare payload for event handler update. - changed_name = (data.pop('changes').get('name') - if 'changes' in data else None) - name = data.get('name') or changed_name - channels = data.get('channels') - event = data.get('event') - code = _content_of_file_at_path(data.get('src')) - state = data.get('state') or 'present' + changed_name = data.pop("changes").get("name") if "changes" in data else None + name = data.get("name") or changed_name + channels = data.get("channels") + event = data.get("event") + code = _content_of_file_at_path(data.get("src")) + state = data.get("state") or "present" # Create event handler if required. - if event_handler is None and state == 'present': - event_handler = EventHandler(name=name, channels=channels, event=event, - code=code) + if event_handler is None and state == "present": + event_handler = EventHandler(name=name, channels=channels, event=event, code=code) block.add_event_handler(event_handler) # Update event handler if required. - if event_handler is not None and state == 'present': + if event_handler is not None and state == "present": if name is not None: event_handler.name = name if channels is not None: @@ -471,37 +474,37 @@ def _failure_title_from_exception(exception): :return: Reference on error tile which should be shown on module failure. """ - title = 'General REST API access error.' + title = "General REST API access error." if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: - title = 'Authorization error: missing credentials.' + title = "Authorization error: missing credentials." elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: - title = 'Authorization error: wrong credentials.' + title = "Authorization error: wrong credentials." elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: - title = 'API access error: insufficient access rights.' + title = "API access error: insufficient access rights." elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: - title = 'API access error: time token expired.' + title = "API access error: time token expired." elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: - title = 'Block create did fail: block with same name already exists).' + title = "Block create did fail: block with same name already exists)." elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: - title = 'Unable fetch list of blocks for keyset.' + title = "Unable fetch list of blocks for keyset." elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: - title = 'Block creation did fail.' + title = "Block creation did fail." elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: - title = 'Block update did fail.' + title = "Block update did fail." elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: - title = 'Block removal did fail.' + title = "Block removal did fail." elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: - title = 'Block start/stop did fail.' + title = "Block start/stop did fail." elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: - title = 'Event handler creation did fail: missing fields.' + title = "Event handler creation did fail: missing fields." elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: - title = 'Event handler creation did fail: missing fields.' + title = "Event handler creation did fail: missing fields." elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: - title = 'Event handler creation did fail.' + title = "Event handler creation did fail." elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: - title = 'Event handler update did fail.' + title = "Event handler update did fail." elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: - title = 'Event handler removal did fail.' + title = "Event handler removal did fail." return title @@ -520,7 +523,7 @@ def _content_of_file_at_path(path): with open(path, mode="rt") as opened_file: b_content = opened_file.read() try: - content = to_text(b_content, errors='surrogate_or_strict') + content = to_text(b_content, errors="surrogate_or_strict") except UnicodeError: pass @@ -529,22 +532,23 @@ def _content_of_file_at_path(path): def main(): fields = dict( - email=dict(default='', type='str'), - password=dict(default='', type='str', no_log=True), - account=dict(default='', type='str'), - application=dict(required=True, type='str'), - keyset=dict(required=True, type='str', no_log=False), - state=dict(default='present', type='str', - choices=['started', 'stopped', 'present', 'absent']), - name=dict(required=True, type='str'), description=dict(type='str'), - event_handlers=dict(default=list(), type='list', elements='dict'), - changes=dict(default=dict(), type='dict'), - cache=dict(default=dict(), type='dict'), - validate_certs=dict(default=True, type='bool')) + email=dict(default="", type="str"), + password=dict(default="", type="str", no_log=True), + account=dict(default="", type="str"), + application=dict(required=True, type="str"), + keyset=dict(required=True, type="str", no_log=False), + state=dict(default="present", type="str", choices=["started", "stopped", "present", "absent"]), + name=dict(required=True, type="str"), + description=dict(type="str"), + event_handlers=dict(default=list(), type="list", elements="dict"), + changes=dict(default=dict(), type="dict"), + cache=dict(default=dict(), type="dict"), + validate_certs=dict(default=True, type="bool"), + ) module = AnsibleModule(argument_spec=fields, supports_check_mode=True) if not HAS_PUBNUB_BLOCKS_CLIENT: - module.fail_json(msg='pubnub_blocks_client required for this module.') + module.fail_json(msg="pubnub_blocks_client required for this module.") params = module.params @@ -561,44 +565,45 @@ def main(): is_new_block = block is not None and block.uid == -1 # Check whether block should be removed or not. - if block is not None and params['state'] == 'absent': + if block is not None and params["state"] == "absent": keyset.remove_block(block) block = None if block is not None: # Update block information if required. - if params.get('changes') and params['changes'].get('name'): - block.name = params['changes']['name'] + if params.get("changes") and params["changes"].get("name"): + block.name = params["changes"]["name"] # Process event changes to event handlers. - for event_handler_data in params.get('event_handlers') or list(): - state = event_handler_data.get('state') or 'present' - event_handler = pubnub_event_handler(data=event_handler_data, - block=block) - if state == 'absent' and event_handler: + for event_handler_data in params.get("event_handlers") or list(): + state = event_handler_data.get("state") or "present" + event_handler = pubnub_event_handler(data=event_handler_data, block=block) + if state == "absent" and event_handler: block.delete_event_handler(event_handler) # Update block operation state if required. if block and not is_new_block: - if params['state'] == 'started': + if params["state"] == "started": block.start() - elif params['state'] == 'stopped': + elif params["state"] == "stopped": block.stop() # Save current account state. if not module.check_mode: try: account.save() - except (exceptions.APIAccessError, exceptions.KeysetError, - exceptions.BlockError, exceptions.EventHandlerError, - exceptions.GeneralPubNubError) as exc: + except ( + exceptions.APIAccessError, + exceptions.KeysetError, + exceptions.BlockError, + exceptions.EventHandlerError, + exceptions.GeneralPubNubError, + ) as exc: module_cache = dict(account) module_cache.update(dict(pnm_user=dict(user))) exc_msg = _failure_title_from_exception(exc) - exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] - module.fail_json(msg=exc_msg, description=exc_descr, - changed=account.changed, - module_cache=module_cache) + exc_descr = exc.message if hasattr(exc, "message") else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, changed=account.changed, module_cache=module_cache) # Report module execution results. module_cache = dict(account) @@ -607,5 +612,5 @@ def main(): module.exit_json(changed=changed_will_change, module_cache=module_cache) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py index ffa78e0d688..18cacd5eb48 100644 --- a/plugins/modules/pulp_repo.py +++ b/plugins/modules/pulp_repo.py @@ -226,12 +226,12 @@ def check_repo_exists(self, repo_id): def compare_repo_distributor_config(self, repo_id, **kwargs): repo_config = self.get_repo_config_by_id(repo_id) - for distributor in repo_config['distributors']: + for distributor in repo_config["distributors"]: for key, value in kwargs.items(): - if key not in distributor['config'].keys(): + if key not in distributor["config"].keys(): return False - if not distributor['config'][key] == value: + if not distributor["config"][key] == value: return False return True @@ -239,13 +239,13 @@ def compare_repo_distributor_config(self, repo_id, **kwargs): def compare_repo_importer_config(self, repo_id, **kwargs): repo_config = self.get_repo_config_by_id(repo_id) - for importer in repo_config['importers']: + for importer in repo_config["importers"]: for key, value in kwargs.items(): if value is not None: - if key not in importer['config'].keys(): + if key not in importer["config"].keys(): return False - if not importer['config'][key] == value: + if not importer["config"][key] == value: return False return True @@ -266,95 +266,85 @@ def create_repo( ssl_ca_cert=None, ssl_client_cert=None, ssl_client_key=None, - add_export_distributor=False + add_export_distributor=False, ): url = f"{self.host}/pulp/api/v2/repositories/" data = dict() - data['id'] = repo_id - data['distributors'] = [] + data["id"] = repo_id + data["distributors"] = [] - if self.repo_type == 'rpm': + if self.repo_type == "rpm": yum_distributor = dict() - yum_distributor['distributor_id'] = "yum_distributor" - yum_distributor['distributor_type_id'] = "yum_distributor" - yum_distributor['auto_publish'] = True - yum_distributor['distributor_config'] = dict() - yum_distributor['distributor_config']['http'] = serve_http - yum_distributor['distributor_config']['https'] = serve_https - yum_distributor['distributor_config']['relative_url'] = relative_url - yum_distributor['distributor_config']['repoview'] = repoview - yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview - data['distributors'].append(yum_distributor) + yum_distributor["distributor_id"] = "yum_distributor" + yum_distributor["distributor_type_id"] = "yum_distributor" + yum_distributor["auto_publish"] = True + yum_distributor["distributor_config"] = dict() + yum_distributor["distributor_config"]["http"] = serve_http + yum_distributor["distributor_config"]["https"] = serve_https + yum_distributor["distributor_config"]["relative_url"] = relative_url + yum_distributor["distributor_config"]["repoview"] = repoview + yum_distributor["distributor_config"]["generate_sqlite"] = generate_sqlite or repoview + data["distributors"].append(yum_distributor) if add_export_distributor: export_distributor = dict() - export_distributor['distributor_id'] = "export_distributor" - export_distributor['distributor_type_id'] = "export_distributor" - export_distributor['auto_publish'] = False - export_distributor['distributor_config'] = dict() - export_distributor['distributor_config']['http'] = serve_http - export_distributor['distributor_config']['https'] = serve_https - export_distributor['distributor_config']['relative_url'] = relative_url - export_distributor['distributor_config']['repoview'] = repoview - export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview - data['distributors'].append(export_distributor) - - data['importer_type_id'] = "yum_importer" - data['importer_config'] = dict() + export_distributor["distributor_id"] = "export_distributor" + export_distributor["distributor_type_id"] = "export_distributor" + export_distributor["auto_publish"] = False + export_distributor["distributor_config"] = dict() + export_distributor["distributor_config"]["http"] = serve_http + export_distributor["distributor_config"]["https"] = serve_https + export_distributor["distributor_config"]["relative_url"] = relative_url + export_distributor["distributor_config"]["repoview"] = repoview + export_distributor["distributor_config"]["generate_sqlite"] = generate_sqlite or repoview + data["distributors"].append(export_distributor) + + data["importer_type_id"] = "yum_importer" + data["importer_config"] = dict() if feed: - data['importer_config']['feed'] = feed + data["importer_config"]["feed"] = feed if proxy_host: - data['importer_config']['proxy_host'] = proxy_host + data["importer_config"]["proxy_host"] = proxy_host if proxy_port: - data['importer_config']['proxy_port'] = proxy_port + data["importer_config"]["proxy_port"] = proxy_port if proxy_username: - data['importer_config']['proxy_username'] = proxy_username + data["importer_config"]["proxy_username"] = proxy_username if proxy_password: - data['importer_config']['proxy_password'] = proxy_password + data["importer_config"]["proxy_password"] = proxy_password if ssl_ca_cert: - data['importer_config']['ssl_ca_cert'] = ssl_ca_cert + data["importer_config"]["ssl_ca_cert"] = ssl_ca_cert if ssl_client_cert: - data['importer_config']['ssl_client_cert'] = ssl_client_cert + data["importer_config"]["ssl_client_cert"] = ssl_client_cert if ssl_client_key: - data['importer_config']['ssl_client_key'] = ssl_client_key + data["importer_config"]["ssl_client_key"] = ssl_client_key - data['notes'] = { - "_repo-type": "rpm-repo" - } + data["notes"] = {"_repo-type": "rpm-repo"} - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') + response, info = fetch_url(self.module, url, data=json.dumps(data), method="POST") - if info['status'] != 201: + if info["status"] != 201: self.module.fail_json( - msg="Failed to create repo.", - status_code=info['status'], - response=info['msg'], - url=url) + msg="Failed to create repo.", status_code=info["status"], response=info["msg"], url=url + ) else: return True def delete_repo(self, repo_id): url = f"{self.host}/pulp/api/v2/repositories/{repo_id}/" - response, info = fetch_url(self.module, url, data='', method='DELETE') + response, info = fetch_url(self.module, url, data="", method="DELETE") - if info['status'] != 202: + if info["status"] != 202: self.module.fail_json( - msg="Failed to delete repo.", - status_code=info['status'], - response=info['msg'], - url=url) + msg="Failed to delete repo.", status_code=info["status"], response=info["msg"], url=url + ) if self.wait_for_completion: self.verify_tasks_completed(json.load(response)) @@ -363,7 +353,7 @@ def delete_repo(self, repo_id): def get_repo_config_by_id(self, repo_id): if repo_id not in self.repo_cache.keys(): - repo_array = [x for x in self.repo_list if x['id'] == repo_id] + repo_array = [x for x in self.repo_list if x["id"] == repo_id] self.repo_cache[repo_id] = repo_array[0] return self.repo_cache[repo_id] @@ -375,38 +365,32 @@ def publish_repo(self, repo_id, publish_distributor): if publish_distributor is None: repo_config = self.get_repo_config_by_id(repo_id) - for distributor in repo_config['distributors']: + for distributor in repo_config["distributors"]: data = dict() - data['id'] = distributor['id'] - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 202: + data["id"] = distributor["id"] + response, info = fetch_url(self.module, url, data=json.dumps(data), method="POST") + + if info["status"] != 202: self.module.fail_json( msg="Failed to publish the repo.", - status_code=info['status'], - response=info['msg'], + status_code=info["status"], + response=info["msg"], url=url, - distributor=distributor['id']) + distributor=distributor["id"], + ) else: data = dict() - data['id'] = publish_distributor - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') - - if info['status'] != 202: + data["id"] = publish_distributor + response, info = fetch_url(self.module, url, data=json.dumps(data), method="POST") + + if info["status"] != 202: self.module.fail_json( msg="Failed to publish the repo", - status_code=info['status'], - response=info['msg'], + status_code=info["status"], + response=info["msg"], url=url, - distributor=publish_distributor) + distributor=publish_distributor, + ) if self.wait_for_completion: self.verify_tasks_completed(json.load(response)) @@ -415,14 +399,12 @@ def publish_repo(self, repo_id, publish_distributor): def sync_repo(self, repo_id): url = f"{self.host}/pulp/api/v2/repositories/{repo_id}/actions/sync/" - response, info = fetch_url(self.module, url, data='', method='POST') + response, info = fetch_url(self.module, url, data="", method="POST") - if info['status'] != 202: + if info["status"] != 202: self.module.fail_json( - msg="Failed to schedule a sync of the repo.", - status_code=info['status'], - response=info['msg'], - url=url) + msg="Failed to schedule a sync of the repo.", status_code=info["status"], response=info["msg"], url=url + ) if self.wait_for_completion: self.verify_tasks_completed(json.load(response)) @@ -433,26 +415,23 @@ def update_repo_distributor_config(self, repo_id, **kwargs): url = f"{self.host}/pulp/api/v2/repositories/{repo_id}/distributors/" repo_config = self.get_repo_config_by_id(repo_id) - for distributor in repo_config['distributors']: + for distributor in repo_config["distributors"]: distributor_url = f"{url}{distributor['id']}/" data = dict() - data['distributor_config'] = dict() + data["distributor_config"] = dict() for key, value in kwargs.items(): - data['distributor_config'][key] = value + data["distributor_config"][key] = value - response, info = fetch_url( - self.module, - distributor_url, - data=json.dumps(data), - method='PUT') + response, info = fetch_url(self.module, distributor_url, data=json.dumps(data), method="PUT") - if info['status'] != 202: + if info["status"] != 202: self.module.fail_json( msg="Failed to set the relative url for the repository.", - status_code=info['status'], - response=info['msg'], - url=url) + status_code=info["status"], + response=info["msg"], + url=url, + ) def update_repo_importer_config(self, repo_id, **kwargs): url = f"{self.host}/pulp/api/v2/repositories/{repo_id}/importers/" @@ -463,63 +442,53 @@ def update_repo_importer_config(self, repo_id, **kwargs): if value is not None: importer_config[key] = value - data['importer_config'] = importer_config + data["importer_config"] = importer_config - if self.repo_type == 'rpm': - data['importer_type_id'] = "yum_importer" + if self.repo_type == "rpm": + data["importer_type_id"] = "yum_importer" - response, info = fetch_url( - self.module, - url, - data=json.dumps(data), - method='POST') + response, info = fetch_url(self.module, url, data=json.dumps(data), method="POST") - if info['status'] != 202: + if info["status"] != 202: self.module.fail_json( msg="Failed to set the repo importer configuration", - status_code=info['status'], - response=info['msg'], + status_code=info["status"], + response=info["msg"], importer_config=importer_config, - url=url) + url=url, + ) def set_repo_list(self): url = f"{self.host}/pulp/api/v2/repositories/?details=true" - response, info = fetch_url(self.module, url, method='GET') + response, info = fetch_url(self.module, url, method="GET") - if info['status'] != 200: - self.module.fail_json( - msg="Request failed", - status_code=info['status'], - response=info['msg'], - url=url) + if info["status"] != 200: + self.module.fail_json(msg="Request failed", status_code=info["status"], response=info["msg"], url=url) self.repo_list = json.load(response) def verify_tasks_completed(self, response_dict): - for task in response_dict['spawned_tasks']: + for task in response_dict["spawned_tasks"]: task_url = f"{self.host}{task['_href']}" while True: - response, info = fetch_url( - self.module, - task_url, - data='', - method='GET') + response, info = fetch_url(self.module, task_url, data="", method="GET") - if info['status'] != 200: + if info["status"] != 200: self.module.fail_json( msg="Failed to check async task status.", - status_code=info['status'], - response=info['msg'], - url=task_url) + status_code=info["status"], + response=info["msg"], + url=task_url, + ) task_dict = json.load(response) - if task_dict['state'] == 'finished': + if task_dict["state"] == "finished": return True - if task_dict['state'] == 'error': - self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error']) + if task_dict["state"] == "error": + self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict["error"]) sleep(2) @@ -527,13 +496,13 @@ def verify_tasks_completed(self, response_dict): def main(): argument_spec = url_argument_spec() argument_spec.update( - add_export_distributor=dict(default=False, type='bool'), + add_export_distributor=dict(default=False, type="bool"), feed=dict(), - generate_sqlite=dict(default=False, type='bool'), - feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']), - feed_client_cert=dict(aliases=['importer_ssl_client_cert']), - feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), - name=dict(required=True, aliases=['repo']), + generate_sqlite=dict(default=False, type="bool"), + feed_ca_cert=dict(aliases=["importer_ssl_ca_cert"]), + feed_client_cert=dict(aliases=["importer_ssl_client_cert"]), + feed_client_key=dict(aliases=["importer_ssl_client_key"], no_log=True), + name=dict(required=True, aliases=["repo"]), proxy_host=dict(), proxy_port=dict(), proxy_username=dict(), @@ -542,58 +511,55 @@ def main(): pulp_host=dict(default="https://127.0.0.1"), relative_url=dict(), repo_type=dict(default="rpm"), - repoview=dict(default=False, type='bool'), - serve_http=dict(default=False, type='bool'), - serve_https=dict(default=True, type='bool'), - state=dict( - default="present", - choices=['absent', 'present', 'sync', 'publish']), - wait_for_completion=dict(default=False, type="bool")) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True) - - add_export_distributor = module.params['add_export_distributor'] - feed = module.params['feed'] - generate_sqlite = module.params['generate_sqlite'] - importer_ssl_ca_cert = module.params['feed_ca_cert'] - importer_ssl_client_cert = module.params['feed_client_cert'] - importer_ssl_client_key = module.params['feed_client_key'] - proxy_host = module.params['proxy_host'] - proxy_port = module.params['proxy_port'] - proxy_username = module.params['proxy_username'] - proxy_password = module.params['proxy_password'] - publish_distributor = module.params['publish_distributor'] - pulp_host = module.params['pulp_host'] - relative_url = module.params['relative_url'] - repo = module.params['name'] - repo_type = module.params['repo_type'] - repoview = module.params['repoview'] - serve_http = module.params['serve_http'] - serve_https = module.params['serve_https'] - state = module.params['state'] - wait_for_completion = module.params['wait_for_completion'] - - if (state == 'present') and (not relative_url): + repoview=dict(default=False, type="bool"), + serve_http=dict(default=False, type="bool"), + serve_https=dict(default=True, type="bool"), + state=dict(default="present", choices=["absent", "present", "sync", "publish"]), + wait_for_completion=dict(default=False, type="bool"), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + add_export_distributor = module.params["add_export_distributor"] + feed = module.params["feed"] + generate_sqlite = module.params["generate_sqlite"] + importer_ssl_ca_cert = module.params["feed_ca_cert"] + importer_ssl_client_cert = module.params["feed_client_cert"] + importer_ssl_client_key = module.params["feed_client_key"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + proxy_username = module.params["proxy_username"] + proxy_password = module.params["proxy_password"] + publish_distributor = module.params["publish_distributor"] + pulp_host = module.params["pulp_host"] + relative_url = module.params["relative_url"] + repo = module.params["name"] + repo_type = module.params["repo_type"] + repoview = module.params["repoview"] + serve_http = module.params["serve_http"] + serve_https = module.params["serve_https"] + state = module.params["state"] + wait_for_completion = module.params["wait_for_completion"] + + if (state == "present") and (not relative_url): module.fail_json(msg="When state is present, relative_url is required.") # Ensure that the importer_ssl_* is the content and not a file path if importer_ssl_ca_cert is not None: importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) if os.path.isfile(importer_ssl_ca_cert_file_path): - with open(importer_ssl_ca_cert_file_path, 'r') as importer_ssl_ca_cert_file_object: + with open(importer_ssl_ca_cert_file_path, "r") as importer_ssl_ca_cert_file_object: importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() if importer_ssl_client_cert is not None: importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) if os.path.isfile(importer_ssl_client_cert_file_path): - with open(importer_ssl_client_cert_file_path, 'r') as importer_ssl_client_cert_file_object: + with open(importer_ssl_client_cert_file_path, "r") as importer_ssl_client_cert_file_object: importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() if importer_ssl_client_key is not None: importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) if os.path.isfile(importer_ssl_client_key_file_path): - with open(importer_ssl_client_key_file_path, 'r') as importer_ssl_client_key_file_object: + with open(importer_ssl_client_key_file_path, "r") as importer_ssl_client_key_file_object: importer_ssl_client_key = importer_ssl_client_key_file_object.read() server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) @@ -602,13 +568,13 @@ def main(): changed = False - if state == 'absent' and repo_exists: + if state == "absent" and repo_exists: if not module.check_mode: server.delete_repo(repo) changed = True - if state == 'sync': + if state == "sync": if not repo_exists: module.fail_json(msg="Repository was not found. The repository can not be synced.") @@ -617,7 +583,7 @@ def main(): changed = True - if state == 'publish': + if state == "publish": if not repo_exists: module.fail_json(msg="Repository was not found. The repository can not be published.") @@ -626,7 +592,7 @@ def main(): changed = True - if state == 'present': + if state == "present": if not repo_exists: if not module.check_mode: server.create_repo( @@ -644,7 +610,8 @@ def main(): ssl_ca_cert=importer_ssl_ca_cert, ssl_client_cert=importer_ssl_client_cert, ssl_client_key=importer_ssl_client_key, - add_export_distributor=add_export_distributor) + add_export_distributor=add_export_distributor, + ) changed = True @@ -661,7 +628,7 @@ def main(): proxy_password=proxy_password, ssl_ca_cert=importer_ssl_ca_cert, ssl_client_cert=importer_ssl_client_cert, - ssl_client_key=importer_ssl_client_key + ssl_client_key=importer_ssl_client_key, ): if not module.check_mode: server.update_repo_importer_config( @@ -673,19 +640,15 @@ def main(): proxy_password=proxy_password, ssl_ca_cert=importer_ssl_ca_cert, ssl_client_cert=importer_ssl_client_cert, - ssl_client_key=importer_ssl_client_key) + ssl_client_key=importer_ssl_client_key, + ) changed = True if relative_url is not None: - if not server.compare_repo_distributor_config( - repo, - relative_url=relative_url - ): + if not server.compare_repo_distributor_config(repo, relative_url=relative_url): if not module.check_mode: - server.update_repo_distributor_config( - repo, - relative_url=relative_url) + server.update_repo_distributor_config(repo, relative_url=relative_url) changed = True @@ -716,5 +679,5 @@ def main(): module.exit_json(changed=changed, repo=repo) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py index b99d8c1e5d2..ddc2116fd2b 100644 --- a/plugins/modules/puppet.py +++ b/plugins/modules/puppet.py @@ -200,74 +200,67 @@ def _write_structured_data(basedir, basename, data): # This is more complex than you might normally expect because we want to # open the file with only u+rw set. Also, we use the stat constants # because ansible still supports python 2.4 and the octal syntax changed - out_file = os.fdopen( - os.open( - file_path, os.O_CREAT | os.O_WRONLY, - stat.S_IRUSR | stat.S_IWUSR), 'w') - out_file.write(json.dumps(data).encode('utf8')) + out_file = os.fdopen(os.open(file_path, os.O_CREAT | os.O_WRONLY, stat.S_IRUSR | stat.S_IWUSR), "w") + out_file.write(json.dumps(data).encode("utf8")) out_file.close() def main(): module = AnsibleModule( argument_spec=dict( - timeout=dict(type='str', default='30m'), - puppetmaster=dict(type='str'), - modulepath=dict(type='str'), - manifest=dict(type='str'), - confdir=dict(type='str'), - noop=dict(type='bool'), - logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), + timeout=dict(type="str", default="30m"), + puppetmaster=dict(type="str"), + modulepath=dict(type="str"), + manifest=dict(type="str"), + confdir=dict(type="str"), + noop=dict(type="bool"), + logdest=dict(type="str", default="stdout", choices=["all", "stdout", "syslog"]), # The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154 - show_diff=dict(type='bool', default=False), - facts=dict(type='dict'), - facter_basename=dict(type='str', default='ansible'), - environment=dict(type='str'), - certname=dict(type='str'), - tags=dict(type='list', elements='str'), - skip_tags=dict(type='list', elements='str'), - execute=dict(type='str'), - summarize=dict(type='bool', default=False), - waitforlock=dict(type='str'), - debug=dict(type='bool', default=False), - verbose=dict(type='bool', default=False), - use_srv_records=dict(type='bool'), - environment_lang=dict(type='str', default='C'), + show_diff=dict(type="bool", default=False), + facts=dict(type="dict"), + facter_basename=dict(type="str", default="ansible"), + environment=dict(type="str"), + certname=dict(type="str"), + tags=dict(type="list", elements="str"), + skip_tags=dict(type="list", elements="str"), + execute=dict(type="str"), + summarize=dict(type="bool", default=False), + waitforlock=dict(type="str"), + debug=dict(type="bool", default=False), + verbose=dict(type="bool", default=False), + use_srv_records=dict(type="bool"), + environment_lang=dict(type="str", default="C"), ), supports_check_mode=True, mutually_exclusive=[ - ('puppetmaster', 'manifest'), - ('puppetmaster', 'manifest', 'execute'), - ('puppetmaster', 'modulepath'), + ("puppetmaster", "manifest"), + ("puppetmaster", "manifest", "execute"), + ("puppetmaster", "modulepath"), ], ) p = module.params - if p['manifest']: - if not os.path.exists(p['manifest']): - module.fail_json( - msg=f"Manifest file {dict(manifest=p['manifest'])['manifest']} not found.") + if p["manifest"]: + if not os.path.exists(p["manifest"]): + module.fail_json(msg=f"Manifest file {dict(manifest=p['manifest'])['manifest']} not found.") # Check if puppet is disabled here - if not p['manifest']: + if not p["manifest"]: puppet_utils.ensure_agent_enabled(module) - if module.params['facts'] and not module.check_mode: - _write_structured_data( - puppet_utils.get_facter_dir(), - module.params['facter_basename'], - module.params['facts']) + if module.params["facts"] and not module.check_mode: + _write_structured_data(puppet_utils.get_facter_dir(), module.params["facter_basename"], module.params["facts"]) runner = puppet_utils.puppet_runner(module) - if not p['manifest'] and not p['execute']: + if not p["manifest"] and not p["execute"]: args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records waitforlock" with runner(args_order) as ctx: rc, stdout, stderr = ctx.run() else: args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose waitforlock" with runner(args_order) as ctx: - rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + rc, stdout, stderr = ctx.run(_execute=[p["execute"], p["manifest"]]) if rc == 0: # success @@ -280,22 +273,17 @@ def main(): msg = "puppet is disabled" else: msg = "puppet did not run" - module.exit_json( - rc=rc, disabled=disabled, msg=msg, - error=True, stdout=stdout, stderr=stderr) + module.exit_json(rc=rc, disabled=disabled, msg=msg, error=True, stdout=stdout, stderr=stderr) elif rc == 2: # success with changes module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) elif rc == 124: # timeout - module.exit_json( - rc=rc, msg=f"{ctx.cmd} timed out", stdout=stdout, stderr=stderr) + module.exit_json(rc=rc, msg=f"{ctx.cmd} timed out", stdout=stdout, stderr=stderr) else: # failure - module.fail_json( - rc=rc, msg=f"{ctx.cmd} failed with return code: {rc}", - stdout=stdout, stderr=stderr) + module.fail_json(rc=rc, msg=f"{ctx.cmd} failed with return code: {rc}", stdout=stdout, stderr=stderr) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py index 259c1fd4c29..85679654eae 100644 --- a/plugins/modules/pushbullet.py +++ b/plugins/modules/pushbullet.py @@ -108,33 +108,32 @@ # Main # + def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), - channel=dict(type='str'), - device=dict(type='str'), - push_type=dict(type='str', default="note", choices=['note', 'link']), - title=dict(type='str', required=True), - body=dict(type='str'), - url=dict(type='str'), - ), - mutually_exclusive=( - ['channel', 'device'], + api_key=dict(type="str", required=True, no_log=True), + channel=dict(type="str"), + device=dict(type="str"), + push_type=dict(type="str", default="note", choices=["note", "link"]), + title=dict(type="str", required=True), + body=dict(type="str"), + url=dict(type="str"), ), - supports_check_mode=True + mutually_exclusive=(["channel", "device"],), + supports_check_mode=True, ) - api_key = module.params['api_key'] - channel = module.params['channel'] - device = module.params['device'] - push_type = module.params['push_type'] - title = module.params['title'] - body = module.params['body'] - url = module.params['url'] + api_key = module.params["api_key"] + channel = module.params["channel"] + device = module.params["device"] + push_type = module.params["push_type"] + title = module.params["title"] + body = module.params["body"] + url = module.params["url"] if not pushbullet_found: - module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) + module.fail_json(msg=missing_required_lib("pushbullet.py"), exception=PUSHBULLET_IMP_ERR) # Init pushbullet try: @@ -156,7 +155,9 @@ def main(): if device in devices_by_nickname: target = devices_by_nickname[device] else: - module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + module.fail_json( + msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())) + ) # Search for given channel if channel is not None: @@ -167,7 +168,9 @@ def main(): if channel in channels_by_tag: target = channels_by_tag[channel] else: - module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + module.fail_json( + msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())) + ) # If in check mode, exit saying that we succeeded if module.check_mode: @@ -186,5 +189,5 @@ def main(): module.fail_json(msg="An unknown error has occurred") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py index 5d547800ce8..5d1df47b8ab 100644 --- a/plugins/modules/pushover.py +++ b/plugins/modules/pushover.py @@ -93,8 +93,9 @@ class Pushover: - ''' Instantiates a pushover object, use it to send notifications ''' - base_uri = 'https://api.pushover.net' + """Instantiates a pushover object, use it to send notifications""" + + base_uri = "https://api.pushover.net" def __init__(self, module, user, token): self.module = module @@ -102,55 +103,51 @@ def __init__(self, module, user, token): self.token = token def run(self, priority, msg, title, device): - ''' Do, whatever it is, we do. ''' + """Do, whatever it is, we do.""" - url = f'{self.base_uri}/1/messages.json' + url = f"{self.base_uri}/1/messages.json" # parse config - options = dict(user=self.user, - token=self.token, - priority=priority, - message=msg) + options = dict(user=self.user, token=self.token, priority=priority, message=msg) if title is not None: - options = dict(options, - title=title) + options = dict(options, title=title) if device is not None: - options = dict(options, - device=device) + options = dict(options, device=device) data = urlencode(options) headers = {"Content-type": "application/x-www-form-urlencoded"} - r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) - if info['status'] != 200: + r, info = fetch_url(self.module, url, method="POST", data=data, headers=headers) + if info["status"] != 200: raise Exception(info) return r.read() def main(): - module = AnsibleModule( argument_spec=dict( - title=dict(type='str'), + title=dict(type="str"), msg=dict(required=True), app_token=dict(required=True, no_log=True), user_key=dict(required=True, no_log=True), - pri=dict(default='0', choices=['-2', '-1', '0', '1', '2']), - device=dict(type='str'), + pri=dict(default="0", choices=["-2", "-1", "0", "1", "2"]), + device=dict(type="str"), ), ) - msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) + msg_object = Pushover(module, module.params["user_key"], module.params["app_token"]) try: - response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device']) + response = msg_object.run( + module.params["pri"], module.params["msg"], module.params["title"], module.params["device"] + ) except Exception: - module.fail_json(msg='Unable to send msg via pushover') + module.fail_json(msg="Unable to send msg via pushover") - module.exit_json(msg=f'message sent successfully: {response}', changed=False) + module.exit_json(msg=f"message sent successfully: {response}", changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py index ef8464e4776..d0497cc48b1 100644 --- a/plugins/modules/python_requirements_info.py +++ b/plugins/modules/python_requirements_info.py @@ -125,6 +125,7 @@ try: import pkg_resources from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + HAS_DISTUTILS = True except ImportError: pass @@ -132,11 +133,11 @@ from ansible.module_utils.basic import AnsibleModule operations = { - '<=': operator.le, - '>=': operator.ge, - '<': operator.lt, - '>': operator.gt, - '==': operator.eq, + "<=": operator.le, + ">=": operator.ge, + "<": operator.lt, + ">": operator.gt, + "==": operator.eq, } python_version_info = dict( @@ -150,9 +151,7 @@ def main(): module = AnsibleModule( - argument_spec=dict( - dependencies=dict(type='list', elements='str', default=[]) - ), + argument_spec=dict(dependencies=dict(type="list", elements="str", default=[])), supports_check_mode=True, ) if not HAS_DISTUTILS: @@ -163,7 +162,7 @@ def main(): python_version_info=python_version_info, python_system_path=sys.path, ) - pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$') + pkg_dep_re = re.compile(r"(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$") results = dict( not_found=[], @@ -171,33 +170,35 @@ def main(): valid={}, ) - for dep in module.params['dependencies']: + for dep in module.params["dependencies"]: match = pkg_dep_re.match(dep) if not match: module.fail_json(msg=f"Failed to parse version requirement '{dep}'. Must be formatted like 'ansible>2.6'") pkg, op, version = match.groups() if op is not None and op not in operations: - module.fail_json(msg=f"Failed to parse version requirement '{dep}'. Operator must be one of >, <, <=, >=, or ==") + module.fail_json( + msg=f"Failed to parse version requirement '{dep}'. Operator must be one of >, <, <=, >=, or ==" + ) try: existing = pkg_resources.get_distribution(pkg).version except pkg_resources.DistributionNotFound: # not there - results['not_found'].append(pkg) + results["not_found"].append(pkg) continue if op is None and version is None: - results['valid'][pkg] = { - 'installed': existing, - 'desired': None, + results["valid"][pkg] = { + "installed": existing, + "desired": None, } elif operations[op](LooseVersion(existing), LooseVersion(version)): - results['valid'][pkg] = { - 'installed': existing, - 'desired': dep, + results["valid"][pkg] = { + "installed": existing, + "desired": dep, } else: - results['mismatched'][pkg] = { - 'installed': existing, - 'desired': dep, + results["mismatched"][pkg] = { + "installed": existing, + "desired": dep, } module.exit_json( @@ -205,9 +206,9 @@ def main(): python_version=sys.version, python_version_info=python_version_info, python_system_path=sys.path, - **results + **results, ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/read_csv.py b/plugins/modules/read_csv.py index 8fd5bc4c500..cadc30d97c1 100644 --- a/plugins/modules/read_csv.py +++ b/plugins/modules/read_csv.py @@ -146,36 +146,40 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, - DialectNotAvailableError, - CustomDialectFailureError) +from ansible_collections.community.general.plugins.module_utils.csv import ( + initialize_dialect, + read_csv, + CSVError, + DialectNotAvailableError, + CustomDialectFailureError, +) def main(): module = AnsibleModule( argument_spec=dict( - path=dict(type='path', required=True, aliases=['filename']), - dialect=dict(type='str', default='excel'), - key=dict(type='str', no_log=False), - fieldnames=dict(type='list', elements='str'), - unique=dict(type='bool', default=True), - delimiter=dict(type='str'), - skipinitialspace=dict(type='bool'), - strict=dict(type='bool'), + path=dict(type="path", required=True, aliases=["filename"]), + dialect=dict(type="str", default="excel"), + key=dict(type="str", no_log=False), + fieldnames=dict(type="list", elements="str"), + unique=dict(type="bool", default=True), + delimiter=dict(type="str"), + skipinitialspace=dict(type="bool"), + strict=dict(type="bool"), ), supports_check_mode=True, ) - path = module.params['path'] - dialect = module.params['dialect'] - key = module.params['key'] - fieldnames = module.params['fieldnames'] - unique = module.params['unique'] + path = module.params["path"] + dialect = module.params["dialect"] + key = module.params["key"] + fieldnames = module.params["fieldnames"] + unique = module.params["unique"] dialect_params = { - "delimiter": module.params['delimiter'], - "skipinitialspace": module.params['skipinitialspace'], - "strict": module.params['strict'], + "delimiter": module.params["delimiter"], + "skipinitialspace": module.params["skipinitialspace"], + "strict": module.params["strict"], } try: @@ -184,7 +188,7 @@ def main(): module.fail_json(msg=to_native(e)) try: - with open(path, 'rb') as f: + with open(path, "rb") as f: data = f.read() except (IOError, OSError) as e: module.fail_json(msg=f"Unable to open file: {e}") @@ -215,5 +219,5 @@ def main(): module.exit_json(dict=data_dict, list=data_list) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py index 8531acb4fcc..58005d1bc53 100644 --- a/plugins/modules/redfish_command.py +++ b/plugins/modules/redfish_command.py @@ -841,26 +841,60 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { - "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", - "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "PowerFullPowerCycle", - "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", - "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", - "VirtualMediaEject", "VerifyBiosAttributes"], + "Systems": [ + "PowerOn", + "PowerForceOff", + "PowerForceRestart", + "PowerGracefulRestart", + "PowerGracefulShutdown", + "PowerReboot", + "PowerCycle", + "PowerFullPowerCycle", + "SetOneTimeBoot", + "EnableContinuousBootOverride", + "DisableBootOverride", + "IndicatorLedOn", + "IndicatorLedOff", + "IndicatorLedBlink", + "VirtualMediaInsert", + "VirtualMediaEject", + "VerifyBiosAttributes", + ], "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], - "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", - "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", - "UpdateUserAccountTypes", "UpdateAccountServiceProperties"], + "Accounts": [ + "AddUser", + "EnableUser", + "DeleteUser", + "DisableUser", + "UpdateUserRole", + "UpdateUserPassword", + "UpdateUserName", + "UpdateUserAccountTypes", + "UpdateAccountServiceProperties", + ], "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], - "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", - "ResetToDefaults", - "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", - "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], + "Manager": [ + "GracefulRestart", + "ClearLogs", + "VirtualMediaInsert", + "ResetToDefaults", + "VirtualMediaEject", + "PowerOn", + "PowerForceOff", + "PowerForceRestart", + "PowerGracefulRestart", + "PowerGracefulShutdown", + "PowerReboot", + ], "Update": ["SimpleUpdate", "MultipartHTTPPushUpdate", "PerformRequestedOperations"], } @@ -870,7 +904,7 @@ def main(): return_values = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), @@ -880,142 +914,152 @@ def main(): new_username=dict(aliases=["account_username"]), new_password=dict(aliases=["account_password"], no_log=True), roleid=dict(aliases=["account_roleid"]), - account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]), - oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]), - update_username=dict(type='str', aliases=["account_updatename"]), - account_properties=dict(type='dict', default={}), + account_types=dict(type="list", elements="str", aliases=["account_accounttypes"]), + oem_account_types=dict(type="list", elements="str", aliases=["account_oemaccounttypes"]), + update_username=dict(type="str", aliases=["account_updatename"]), + account_properties=dict(type="dict", default={}), bootdevice=dict(), - timeout=dict(type='int', default=60), + timeout=dict(type="int", default=60), uefi_target=dict(), boot_next=dict(), - boot_override_mode=dict(choices=['Legacy', 'UEFI']), + boot_override_mode=dict(choices=["Legacy", "UEFI"]), resource_id=dict(), update_image_uri=dict(), - update_image_file=dict(type='path'), + update_image_file=dict(type="path"), update_protocol=dict(), - update_targets=dict(type='list', elements='str', default=[]), - update_oem_params=dict(type='dict'), - update_custom_oem_header=dict(type='str'), - update_custom_oem_mime_type=dict(type='str'), - update_custom_oem_params=dict(type='raw'), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) + update_targets=dict(type="list", elements="str", default=[]), + update_oem_params=dict(type="dict"), + update_custom_oem_header=dict(type="str"), + update_custom_oem_mime_type=dict(type="str"), + update_custom_oem_params=dict(type="raw"), + update_creds=dict(type="dict", options=dict(username=dict(), password=dict(no_log=True))), + update_apply_time=dict( + choices=[ + "Immediate", + "OnReset", + "AtMaintenanceWindowStart", + "InMaintenanceWindowOnReset", + "OnStartUpdateRequest", + ] ), - update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', - 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), update_handle=dict(), virtual_media=dict( - type='dict', + type="dict", options=dict( - media_types=dict(type='list', elements='str', default=[]), + media_types=dict(type="list", elements="str", default=[]), image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), + inserted=dict(type="bool", default=True), + write_protected=dict(type="bool", default=True), username=dict(), password=dict(no_log=True), transfer_protocol_type=dict(), transfer_method=dict(), - ) + ), ), - strip_etag_quotes=dict(type='bool', default=False), - reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), + strip_etag_quotes=dict(type="bool", default=False), + reset_to_defaults_mode=dict(choices=["ResetAll", "PreserveNetworkAndUsers", "PreserveNetwork"]), bios_attributes=dict(type="dict"), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=120), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=120), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), - ('update_custom_oem_header', 'update_custom_oem_params'), + ("username", "password"), + ("update_custom_oem_header", "update_custom_oem_params"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # user to add/modify/delete user = { - 'account_id': module.params['id'], - 'account_username': module.params['new_username'], - 'account_password': module.params['new_password'], - 'account_roleid': module.params['roleid'], - 'account_accounttypes': module.params['account_types'], - 'account_oemaccounttypes': module.params['oem_account_types'], - 'account_updatename': module.params['update_username'], - 'account_properties': module.params['account_properties'], - 'account_passwordchangerequired': None, + "account_id": module.params["id"], + "account_username": module.params["new_username"], + "account_password": module.params["new_password"], + "account_roleid": module.params["roleid"], + "account_accounttypes": module.params["account_types"], + "account_oemaccounttypes": module.params["oem_account_types"], + "account_updatename": module.params["update_username"], + "account_properties": module.params["account_properties"], + "account_passwordchangerequired": None, } # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # update options update_opts = { - 'update_image_uri': module.params['update_image_uri'], - 'update_image_file': module.params['update_image_file'], - 'update_protocol': module.params['update_protocol'], - 'update_targets': module.params['update_targets'], - 'update_creds': module.params['update_creds'], - 'update_apply_time': module.params['update_apply_time'], - 'update_oem_params': module.params['update_oem_params'], - 'update_custom_oem_header': module.params['update_custom_oem_header'], - 'update_custom_oem_params': module.params['update_custom_oem_params'], - 'update_custom_oem_mime_type': module.params['update_custom_oem_mime_type'], - 'update_handle': module.params['update_handle'], + "update_image_uri": module.params["update_image_uri"], + "update_image_file": module.params["update_image_file"], + "update_protocol": module.params["update_protocol"], + "update_targets": module.params["update_targets"], + "update_creds": module.params["update_creds"], + "update_apply_time": module.params["update_apply_time"], + "update_oem_params": module.params["update_oem_params"], + "update_custom_oem_header": module.params["update_custom_oem_header"], + "update_custom_oem_params": module.params["update_custom_oem_params"], + "update_custom_oem_mime_type": module.params["update_custom_oem_mime_type"], + "update_handle": module.params["update_handle"], } # Boot override options boot_opts = { - 'bootdevice': module.params['bootdevice'], - 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'], - 'boot_override_mode': module.params['boot_override_mode'], + "bootdevice": module.params["bootdevice"], + "uefi_target": module.params["uefi_target"], + "boot_next": module.params["boot_next"], + "boot_override_mode": module.params["boot_override_mode"], } # VirtualMedia options - virtual_media = module.params['virtual_media'] + virtual_media = module.params["virtual_media"] # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] + strip_etag_quotes = module.params["strip_etag_quotes"] # BIOS Attributes options - bios_attributes = module.params['bios_attributes'] + bios_attributes = module.params["bios_attributes"] # Build root URI root_uri = f"https://{module.params['baseuri']}" - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + rf_utils = RedfishUtils( + creds, + root_uri, + timeout, + module, + resource_id=resource_id, + data_modification=True, + strip_etag_quotes=strip_etag_quotes, + ) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Accounts": @@ -1028,19 +1072,23 @@ def main(): "UpdateUserPassword": rf_utils.update_user_password, "UpdateUserName": rf_utils.update_user_name, "UpdateUserAccountTypes": rf_utils.update_user_accounttypes, - "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties + "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties, } # execute only if we find an Account service resource result = rf_utils._find_accountservice_resource() - if result['ret'] is False: + if result["ret"] is False: # If a password change is required and the user is attempting to # modify their password, try to proceed. - user['account_passwordchangerequired'] = rf_utils.check_password_change_required(result) - if len(command_list) == 1 and command_list[0] == "UpdateUserPassword" and user['account_passwordchangerequired']: + user["account_passwordchangerequired"] = rf_utils.check_password_change_required(result) + if ( + len(command_list) == 1 + and command_list[0] == "UpdateUserPassword" + and user["account_passwordchangerequired"] + ): result = rf_utils.update_user_password(user) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) else: for command in command_list: result = ACCOUNTS_COMMANDS[command](user) @@ -1048,41 +1096,41 @@ def main(): elif category == "Systems": # execute only if we find a System resource result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: - if command.startswith('Power'): + if command.startswith("Power"): result = rf_utils.manage_system_power(command) elif command == "SetOneTimeBoot": - boot_opts['override_enabled'] = 'Once' + boot_opts["override_enabled"] = "Once" result = rf_utils.set_boot_override(boot_opts) elif command == "EnableContinuousBootOverride": - boot_opts['override_enabled'] = 'Continuous' + boot_opts["override_enabled"] = "Continuous" result = rf_utils.set_boot_override(boot_opts) elif command == "DisableBootOverride": - boot_opts['override_enabled'] = 'Disabled' + boot_opts["override_enabled"] = "Disabled" result = rf_utils.set_boot_override(boot_opts) - elif command.startswith('IndicatorLed'): + elif command.startswith("IndicatorLed"): result = rf_utils.manage_system_indicator_led(command) - elif command == 'VirtualMediaInsert': + elif command == "VirtualMediaInsert": result = rf_utils.virtual_media_insert(virtual_media, category) - elif command == 'VirtualMediaEject': + elif command == "VirtualMediaEject": result = rf_utils.virtual_media_eject(virtual_media, category) - elif command == 'VerifyBiosAttributes': + elif command == "VerifyBiosAttributes": result = rf_utils.verify_bios_attributes(bios_attributes) elif category == "Chassis": result = rf_utils._find_chassis_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] # Check if more than one led_command is present num_led_commands = sum([command in led_commands for command in command_list]) if num_led_commands > 1: - result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + result = {"ret": False, "msg": "Only one IndicatorLed command should be sent at a time."} else: for command in command_list: if command in led_commands: @@ -1091,8 +1139,8 @@ def main(): elif category == "Sessions": # execute only if we find SessionService resources resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "ClearSessions": @@ -1100,60 +1148,58 @@ def main(): elif command == "CreateSession": result = rf_utils.create_session() elif command == "DeleteSession": - result = rf_utils.delete_session(module.params['session_uri']) + result = rf_utils.delete_session(module.params["session_uri"]) elif category == "Manager": # execute only if we find a Manager service resource result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: # standardize on the Power* commands, but allow the legacy # GracefulRestart command - if command == 'GracefulRestart': - command = 'PowerGracefulRestart' + if command == "GracefulRestart": + command = "PowerGracefulRestart" - if command.startswith('Power'): - result = rf_utils.manage_manager_power(command, module.params['wait'], module.params['wait_timeout']) - elif command == 'ClearLogs': + if command.startswith("Power"): + result = rf_utils.manage_manager_power(command, module.params["wait"], module.params["wait_timeout"]) + elif command == "ClearLogs": result = rf_utils.clear_logs() - elif command == 'VirtualMediaInsert': + elif command == "VirtualMediaInsert": result = rf_utils.virtual_media_insert(virtual_media, category) - elif command == 'VirtualMediaEject': + elif command == "VirtualMediaEject": result = rf_utils.virtual_media_eject(virtual_media, category) - elif command == 'ResetToDefaults': - result = rf_utils.manager_reset_to_defaults(module.params['reset_to_defaults_mode']) + elif command == "ResetToDefaults": + result = rf_utils.manager_reset_to_defaults(module.params["reset_to_defaults_mode"]) elif category == "Update": # execute only if we find UpdateService resources resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "SimpleUpdate": result = rf_utils.simple_update(update_opts) - if 'update_status' in result: - return_values['update_status'] = result['update_status'] + if "update_status" in result: + return_values["update_status"] = result["update_status"] elif command == "MultipartHTTPPushUpdate": result = rf_utils.multipath_http_push_update(update_opts) - if 'update_status' in result: - return_values['update_status'] = result['update_status'] + if "update_status" in result: + return_values["update_status"] = result["update_status"] elif command == "PerformRequestedOperations": - result = rf_utils.perform_requested_update_operations(update_opts['update_handle']) + result = rf_utils.perform_requested_update_operations(update_opts["update_handle"]) # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) - module.exit_json(changed=changed, session=session, - msg='Action was successful', - return_values=return_values) + if result["ret"] is True: + del result["ret"] + changed = result.get("changed", True) + session = result.get("session", dict()) + module.exit_json(changed=changed, session=session, msg="Action was successful", return_values=return_values) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py index 269bca25403..fac604618fd 100644 --- a/plugins/modules/redfish_config.py +++ b/plugins/modules/redfish_config.py @@ -389,15 +389,26 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { - "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", - "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume", - "SetPowerRestorePolicy"], + "Systems": [ + "SetBiosDefaultSettings", + "SetBiosAttributes", + "SetBootOrder", + "SetDefaultBootOrder", + "EnableSecureBoot", + "SetSecureBoot", + "DeleteVolumes", + "CreateVolume", + "SetPowerRestorePolicy", + ], "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface", "SetServiceIdentification"], "Sessions": ["SetSessionService"], } @@ -407,126 +418,129 @@ def main(): result = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=60), - boot_order=dict(type='list', elements='str', default=[]), - network_protocols=dict( - type='dict', - default={} - ), + bios_attributes=dict(type="dict", default={}), + timeout=dict(type="int", default=60), + boot_order=dict(type="list", elements="str", default=[]), + network_protocols=dict(type="dict", default={}), resource_id=dict(), service_id=dict(), - nic_addr=dict(default='null'), - nic_config=dict( - type='dict', - default={} - ), - strip_etag_quotes=dict(type='bool', default=False), - hostinterface_config=dict(type='dict', default={}), + nic_addr=dict(default="null"), + nic_config=dict(type="dict", default={}), + strip_etag_quotes=dict(type="bool", default=False), + hostinterface_config=dict(type="dict", default={}), hostinterface_id=dict(), - sessions_config=dict(type='dict', default={}), - storage_subsystem_id=dict(type='str', default=''), - storage_none_volume_deletion=dict(type='bool', default=False), - volume_ids=dict(type='list', default=[], elements='str'), - secure_boot_enable=dict(type='bool', default=True), - volume_details=dict(type='dict', default={}), - power_restore_policy=dict(choices=['AlwaysOn', 'AlwaysOff', 'LastState']), + sessions_config=dict(type="dict", default={}), + storage_subsystem_id=dict(type="str", default=""), + storage_none_volume_deletion=dict(type="bool", default=False), + volume_ids=dict(type="list", default=[], elements="str"), + secure_boot_enable=dict(type="bool", default=True), + volume_details=dict(type="dict", default={}), + power_restore_policy=dict(choices=["AlwaysOn", "AlwaysOff", "LastState"]), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # BIOS attributes to update - bios_attributes = module.params['bios_attributes'] + bios_attributes = module.params["bios_attributes"] # boot order - boot_order = module.params['boot_order'] + boot_order = module.params["boot_order"] # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # manager nic - nic_addr = module.params['nic_addr'] - nic_config = module.params['nic_config'] + nic_addr = module.params["nic_addr"] + nic_config = module.params["nic_config"] # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] + strip_etag_quotes = module.params["strip_etag_quotes"] # HostInterface config options - hostinterface_config = module.params['hostinterface_config'] + hostinterface_config = module.params["hostinterface_config"] # HostInterface instance ID - hostinterface_id = module.params['hostinterface_id'] + hostinterface_id = module.params["hostinterface_id"] # Service Identification - service_id = module.params['service_id'] + service_id = module.params["service_id"] # Sessions config options - sessions_config = module.params['sessions_config'] + sessions_config = module.params["sessions_config"] # Volume deletion options - storage_subsystem_id = module.params['storage_subsystem_id'] - volume_ids = module.params['volume_ids'] + storage_subsystem_id = module.params["storage_subsystem_id"] + volume_ids = module.params["volume_ids"] # Set SecureBoot options - secure_boot_enable = module.params['secure_boot_enable'] + secure_boot_enable = module.params["secure_boot_enable"] # Volume creation options - volume_details = module.params['volume_details'] - storage_subsystem_id = module.params['storage_subsystem_id'] - storage_none_volume_deletion = module.params['storage_none_volume_deletion'] + volume_details = module.params["volume_details"] + storage_subsystem_id = module.params["storage_subsystem_id"] + storage_none_volume_deletion = module.params["storage_none_volume_deletion"] # Power Restore Policy - power_restore_policy = module.params['power_restore_policy'] + power_restore_policy = module.params["power_restore_policy"] # Build root URI root_uri = f"https://{module.params['baseuri']}" - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + rf_utils = RedfishUtils( + creds, + root_uri, + timeout, + module, + resource_id=resource_id, + data_modification=True, + strip_etag_quotes=strip_etag_quotes, + ) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {list(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Systems": # execute only if we find a System resource result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "SetBiosDefaultSettings": @@ -551,12 +565,12 @@ def main(): elif category == "Manager": # execute only if we find a Manager service resource result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "SetNetworkProtocols": - result = rf_utils.set_network_protocols(module.params['network_protocols']) + result = rf_utils.set_network_protocols(module.params["network_protocols"]) elif command == "SetManagerNic": result = rf_utils.set_manager_nic(nic_addr, nic_config) elif command == "SetHostInterface": @@ -567,22 +581,22 @@ def main(): elif category == "Sessions": # execute only if we find a Sessions resource result = rf_utils._find_sessionservice_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: if command == "SetSessionService": result = rf_utils.set_session_service(sessions_config) # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) + if result["ret"] is True: + if result.get("warning"): + module.warn(to_native(result["warning"])) - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + module.exit_json(changed=result["changed"], msg=to_native(result["msg"])) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py index 6823a00aceb..361e7dfa997 100644 --- a/plugins/modules/redfish_info.py +++ b/plugins/modules/redfish_info.py @@ -398,22 +398,52 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) CATEGORY_COMMANDS_ALL = { - "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", - "GetMemoryInventory", "GetNicInventory", "GetHealthReport", - "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", - "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries", - "GetPowerRestorePolicy"], - "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", - "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"], + "Systems": [ + "GetSystemInventory", + "GetPsuInventory", + "GetCpuInventory", + "GetMemoryInventory", + "GetNicInventory", + "GetHealthReport", + "GetStorageControllerInventory", + "GetDiskInventory", + "GetVolumeInventory", + "GetBiosAttributes", + "GetBootOrder", + "GetBootOverride", + "GetVirtualMedia", + "GetBiosRegistries", + "GetPowerRestorePolicy", + ], + "Chassis": [ + "GetFanInventory", + "GetPsuInventory", + "GetChassisPower", + "GetChassisThermals", + "GetChassisInventory", + "GetHealthReport", + "GetHPEThermalConfig", + "GetHPEFanPercentMin", + ], "Accounts": ["ListUsers", "GetAccountServiceConfig"], "Sessions": ["GetSessions"], - "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", - "GetUpdateStatus"], - "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", - "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"], + "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", "GetUpdateStatus"], + "Manager": [ + "GetManagerNicInventory", + "GetVirtualMedia", + "GetLogs", + "GetNetworkProtocols", + "GetHealthReport", + "GetHostInterfaces", + "GetManagerInventory", + "GetServiceIdentification", + ], "Service": ["CheckAvailability"], } @@ -432,13 +462,13 @@ def main(): result = {} category_list = [] argument_spec = dict( - category=dict(type='list', elements='str', default=['Systems']), - command=dict(type='list', elements='str'), + category=dict(type="list", elements="str", default=["Systems"]), + command=dict(type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=60), + timeout=dict(type="int", default=60), update_handle=dict(), manager=dict(), ) @@ -446,56 +476,54 @@ def main(): module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], supports_check_mode=True, ) # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # update handle - update_handle = module.params['update_handle'] + update_handle = module.params["update_handle"] # manager - manager = module.params['manager'] + manager = module.params["manager"] # Build root URI root_uri = f"https://{module.params['baseuri']}" rf_utils = RedfishUtils(creds, root_uri, timeout, module) # Build Category list - if "all" in module.params['category']: + if "all" in module.params["category"]: for entry in CATEGORY_COMMANDS_ALL: category_list.append(entry) else: # one or more categories specified - category_list = module.params['category'] + category_list = module.params["category"] for category in category_list: command_list = [] # Build Command list for each Category if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: + if not module.params["command"]: # True if we don't specify a command --> use default command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: + elif "all" in module.params["command"]: for entry in range(len(CATEGORY_COMMANDS_ALL[category])): command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) # one or more commands else: - command_list = module.params['command'] + command_list = module.params["command"] # Verify that all commands are valid for cmd in command_list: # Fail if even one command given is invalid @@ -515,8 +543,8 @@ def main(): elif category == "Systems": # execute only if we find a Systems resource resource = rf_utils._find_systems_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "GetSystemInventory": @@ -551,8 +579,8 @@ def main(): elif category == "Chassis": # execute only if we find Chassis resource resource = rf_utils._find_chassis_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "GetFanInventory": @@ -575,8 +603,8 @@ def main(): elif category == "Accounts": # execute only if we find an Account service resource resource = rf_utils._find_accountservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "ListUsers": @@ -587,8 +615,8 @@ def main(): elif category == "Update": # execute only if we find UpdateService resources resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "GetFirmwareInventory": @@ -603,8 +631,8 @@ def main(): elif category == "Sessions": # execute only if we find SessionService resources resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "GetSessions": @@ -613,8 +641,8 @@ def main(): elif category == "Manager": # execute only if we find a Manager service resource resource = rf_utils._find_managers_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "GetManagerNicInventory": @@ -638,5 +666,5 @@ def main(): module.exit_json(redfish_facts=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py index 71c382f0dae..b424131d538 100644 --- a/plugins/modules/redhat_subscription.py +++ b/plugins/modules/redhat_subscription.py @@ -285,14 +285,13 @@ class Rhsm: - REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" def __init__(self, module): self.module = module def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = f'/etc/yum/pluginconf.d/{plugin}.conf' + plugin_conf = f"/etc/yum/pluginconf.d/{plugin}.conf" if isfile(plugin_conf): tmpfd, tmpfile = tempfile.mkstemp() @@ -301,41 +300,41 @@ def update_plugin_conf(self, plugin, enabled=True): cfg.read([tmpfile]) if enabled: - cfg.set('main', 'enabled', '1') + cfg.set("main", "enabled", "1") else: - cfg.set('main', 'enabled', '0') + cfg.set("main", "enabled", "0") - with open(tmpfile, 'w+') as fd: + with open(tmpfile, "w+") as fd: cfg.write(fd) self.module.atomic_move(tmpfile, plugin_conf) def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' + """ + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + """ # Remove any existing redhat.repo if isfile(self.REDHAT_REPO): unlink(self.REDHAT_REPO) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) + self.update_plugin_conf("rhnplugin", False) + self.update_plugin_conf("subscription-manager", True) def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHSM - Raises: - * Exception - if error occurs while running command - ''' + """ + Configure the system as directed for registration with RHSM + Raises: + * Exception - if error occurs while running command + """ - args = [SUBMAN_CMD, 'config'] + args = [SUBMAN_CMD, "config"] # Pass supplied **kwargs as parameters to subscription-manager. Ignore # non-configuration parameters and replace '_' with '.'. For example, # 'server_hostname' becomes '--server.hostname'. options = [] for k, v in sorted(kwargs.items()): - if re.search(r'^(server|rhsm)_', k) and v is not None: + if re.search(r"^(server|rhsm)_", k) and v is not None: options.append(f"--{k.replace('_', '.', 1)}={v}") # When there is nothing to configure, then it is not necessary @@ -350,14 +349,14 @@ def configure(self, **kwargs): @property def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHSM. - ''' - - args = [SUBMAN_CMD, 'identity'] + """ + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHSM. + """ + + args = [SUBMAN_CMD, "identity"] rc, stdout, stderr = self.module.run_command(args, check_rc=False) if rc == 0: return True @@ -381,12 +380,11 @@ def str2int(s, default=0): distro_version = tuple(str2int(p) for p in distro.version_parts()) # subscription-manager in any supported Fedora version has the interface. - if distro_id == 'fedora': + if distro_id == "fedora": return True # Any other distro: assume it is EL; # the D-Bus interface was added to subscription-manager in RHEL 7.4. - return (distro_version[0] == 7 and distro_version[1] >= 4) or \ - distro_version[0] >= 8 + return (distro_version[0] == 7 and distro_version[1] >= 4) or distro_version[0] >= 8 def _can_connect_to_dbus(self): """ @@ -401,108 +399,164 @@ def _can_connect_to_dbus(self): # sorry, I guess... import dbus except ImportError: - self.module.debug('dbus Python module not available, will use CLI') + self.module.debug("dbus Python module not available, will use CLI") return False try: bus = dbus.SystemBus() - msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test') + msg = dbus.lowlevel.SignalMessage("/", "com.example", "test") bus.send_message(msg) bus.flush() except dbus.exceptions.DBusException as e: - self.module.debug(f'Failed to connect to system D-Bus bus, will use CLI: {e}') + self.module.debug(f"Failed to connect to system D-Bus bus, will use CLI: {e}") return False - self.module.debug('Verified system D-Bus bus as usable') + self.module.debug("Verified system D-Bus bus as usable") return True - def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, environment, - release): - ''' - Register the current system to the provided RHSM or Red Hat Satellite - or Katello server + def register( + self, + was_registered, + username, + password, + token, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ): + """ + Register the current system to the provided RHSM or Red Hat Satellite + or Katello server - Raises: - * Exception - if any error occurs during the registration - ''' + Raises: + * Exception - if any error occurs during the registration + """ # There is no support for token-based registration in the D-Bus API # of rhsm, so always use the CLI in that case; # also, since the specified environments are names, and the D-Bus APIs # require IDs for the environments, use the CLI also in that case - if (not token and not environment and self._has_dbus_interface() and - self._can_connect_to_dbus()): - self._register_using_dbus(was_registered, username, password, auto_attach, - activationkey, org_id, consumer_type, - consumer_name, consumer_id, - force_register, environment, release) + if not token and not environment and self._has_dbus_interface() and self._can_connect_to_dbus(): + self._register_using_dbus( + was_registered, + username, + password, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ) return - self._register_using_cli(username, password, token, auto_attach, - activationkey, org_id, consumer_type, - consumer_name, consumer_id, - force_register, environment, release) - - def _register_using_cli(self, username, password, token, auto_attach, - activationkey, org_id, consumer_type, consumer_name, - consumer_id, force_register, environment, release): - ''' - Register using the 'subscription-manager' command - - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'register'] + self._register_using_cli( + username, + password, + token, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ) + + def _register_using_cli( + self, + username, + password, + token, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ): + """ + Register using the 'subscription-manager' command + + Raises: + * Exception - if error occurs while running command + """ + args = [SUBMAN_CMD, "register"] # Generate command arguments if force_register: - args.extend(['--force']) + args.extend(["--force"]) if org_id: - args.extend(['--org', org_id]) + args.extend(["--org", org_id]) if auto_attach: - args.append('--auto-attach') + args.append("--auto-attach") if consumer_type: - args.extend(['--type', consumer_type]) + args.extend(["--type", consumer_type]) if consumer_name: - args.extend(['--name', consumer_name]) + args.extend(["--name", consumer_name]) if consumer_id: - args.extend(['--consumerid', consumer_id]) + args.extend(["--consumerid", consumer_id]) if environment: - args.extend(['--environment', environment]) + args.extend(["--environment", environment]) if activationkey: - args.extend(['--activationkey', activationkey]) + args.extend(["--activationkey", activationkey]) elif token: - args.extend(['--token', token]) + args.extend(["--token", token]) else: if username: - args.extend(['--username', username]) + args.extend(["--username", username]) if password: - args.extend(['--password', password]) + args.extend(["--password", password]) if release: - args.extend(['--release', release]) + args.extend(["--release", release]) rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) - def _register_using_dbus(self, was_registered, username, password, auto_attach, - activationkey, org_id, consumer_type, consumer_name, - consumer_id, force_register, environment, release): - ''' - Register using D-Bus (connecting to the rhsm service) + def _register_using_dbus( + self, + was_registered, + username, + password, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ): + """ + Register using D-Bus (connecting to the rhsm service) - Raises: - * Exception - if error occurs during the D-Bus communication - ''' + Raises: + * Exception - if error occurs during the D-Bus communication + """ import dbus - SUBSCRIPTION_MANAGER_LOCALE = 'C' + SUBSCRIPTION_MANAGER_LOCALE = "C" # Seconds to wait for Registration to complete over DBus; # 10 minutes should be a pretty generous timeout. REGISTRATION_TIMEOUT = 600 @@ -522,8 +576,8 @@ def str2int(s, default=0): # - with subscription-manager < 1.26.5-1 (in RHEL < 8.2); # fixed later by https://github.com/candlepin/subscription-manager/pull/2175 # - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296 - if distro_id == 'fedora' or distro_version[0] >= 7: - cmd = ['systemctl', 'stop', 'rhsm'] + if distro_id == "fedora" or distro_version[0] >= 7: + cmd = ["systemctl", "stop", "rhsm"] self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False) # While there is a 'force' options for the registration, it is actually @@ -535,10 +589,11 @@ def str2int(s, default=0): # Match it on RHEL, since we know about it; other distributions # will need their own logic. dbus_force_option_works = False - if (distro_id == 'rhel' and - ((distro_version[0] == 8 and distro_version[1] >= 8) or - (distro_version[0] == 9 and distro_version[1] >= 2) or - distro_version[0] > 9)): + if distro_id == "rhel" and ( + (distro_version[0] == 8 and distro_version[1] >= 8) + or (distro_version[0] == 9 and distro_version[1] >= 2) + or distro_version[0] > 9 + ): dbus_force_option_works = True # We need to use the 'enable_content' D-Bus option to ensure that # content is enabled; sadly the option is available depending on the @@ -546,38 +601,42 @@ def str2int(s, default=0): # for registration. dbus_has_enable_content_option = False if activationkey: + def supports_enable_content_for_activation_keys(): # subscription-manager in Fedora >= 41 has the new option. - if distro_id == 'fedora' and distro_version[0] >= 41: + if distro_id == "fedora" and distro_version[0] >= 41: return True # Assume EL distros here. if distro_version[0] >= 10: return True return False + dbus_has_enable_content_option = supports_enable_content_for_activation_keys() else: + def supports_enable_content_for_credentials(): # subscription-manager in any supported Fedora version # has the new option. - if distro_id == 'fedora': + if distro_id == "fedora": return True # Check for RHEL 8 >= 8.6, or RHEL >= 9. - if distro_id == 'rhel' and \ - ((distro_version[0] == 8 and distro_version[1] >= 6) or - distro_version[0] >= 9): + if distro_id == "rhel" and ( + (distro_version[0] == 8 and distro_version[1] >= 6) or distro_version[0] >= 9 + ): return True # CentOS: similar checks as for RHEL, with one extra bit: # if the 2nd part of the version is empty, it means it is # CentOS Stream, and thus we can assume it has the latest # version of subscription-manager. - if distro_id == 'centos' and \ - ((distro_version[0] == 8 and - (distro_version[1] >= 6 or distro_version_parts[1] == '')) or - distro_version[0] >= 9): + if distro_id == "centos" and ( + (distro_version[0] == 8 and (distro_version[1] >= 6 or distro_version_parts[1] == "")) + or distro_version[0] >= 9 + ): return True # Unknown or old distro: assume it does not support # the new option. return False + dbus_has_enable_content_option = supports_enable_content_for_credentials() if force_register and not dbus_force_option_works and was_registered: @@ -593,30 +652,30 @@ def supports_enable_content_for_credentials(): def supports_option_consumer_type(): # subscription-manager in any supported Fedora version # has the new option. - if distro_id == 'fedora': + if distro_id == "fedora": return True # Check for RHEL 9 >= 9.2, or RHEL >= 10. - if distro_id == 'rhel' and \ - ((distro_version[0] == 9 and distro_version[1] >= 2) or - distro_version[0] >= 10): + if distro_id == "rhel" and ( + (distro_version[0] == 9 and distro_version[1] >= 2) or distro_version[0] >= 10 + ): return True # CentOS: since the change was only done in EL 9, then there is # only CentOS Stream for 9, and thus we can assume it has the # latest version of subscription-manager. - if distro_id == 'centos' and distro_version[0] >= 9: + if distro_id == "centos" and distro_version[0] >= 9: return True # Unknown or old distro: assume it does not support # the new option. return False - consumer_type_key = 'type' + consumer_type_key = "type" if supports_option_consumer_type(): - consumer_type_key = 'consumer_type' + consumer_type_key = "consumer_type" register_opts[consumer_type_key] = consumer_type if consumer_name: - register_opts['name'] = consumer_name + register_opts["name"] = consumer_name if consumer_id: - register_opts['consumerid'] = consumer_id + register_opts["consumerid"] = consumer_id if environment: # The option for environments used to be 'environment' in versions # of RHEL before 8.6, and then it changed to 'environments'; since @@ -625,52 +684,51 @@ def supports_option_consumer_type(): def supports_option_environments(): # subscription-manager in any supported Fedora version # has the new option. - if distro_id == 'fedora': + if distro_id == "fedora": return True # Check for RHEL 8 >= 8.6, or RHEL >= 9. - if distro_id == 'rhel' and \ - ((distro_version[0] == 8 and distro_version[1] >= 6) or - distro_version[0] >= 9): + if distro_id == "rhel" and ( + (distro_version[0] == 8 and distro_version[1] >= 6) or distro_version[0] >= 9 + ): return True # CentOS: similar checks as for RHEL, with one extra bit: # if the 2nd part of the version is empty, it means it is # CentOS Stream, and thus we can assume it has the latest # version of subscription-manager. - if distro_id == 'centos' and \ - ((distro_version[0] == 8 and - (distro_version[1] >= 6 or distro_version_parts[1] == '')) or - distro_version[0] >= 9): + if distro_id == "centos" and ( + (distro_version[0] == 8 and (distro_version[1] >= 6 or distro_version_parts[1] == "")) + or distro_version[0] >= 9 + ): return True # Unknown or old distro: assume it does not support # the new option. return False - environment_key = 'environment' + environment_key = "environment" if supports_option_environments(): - environment_key = 'environments' + environment_key = "environments" register_opts[environment_key] = environment if force_register and dbus_force_option_works and was_registered: - register_opts['force'] = True + register_opts["force"] = True if dbus_has_enable_content_option: - register_opts['enable_content'] = "1" + register_opts["enable_content"] = "1" # Wrap it as proper D-Bus dict - register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1) + register_opts = dbus.Dictionary(register_opts, signature="sv", variant_level=1) connection_opts = {} # Wrap it as proper D-Bus dict - connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1) + connection_opts = dbus.Dictionary(connection_opts, signature="sv", variant_level=1) bus = dbus.SystemBus() - register_server = bus.get_object('com.redhat.RHSM1', - '/com/redhat/RHSM1/RegisterServer') + register_server = bus.get_object("com.redhat.RHSM1", "/com/redhat/RHSM1/RegisterServer") address = register_server.Start( SUBSCRIPTION_MANAGER_LOCALE, - dbus_interface='com.redhat.RHSM1.RegisterServer', + dbus_interface="com.redhat.RHSM1.RegisterServer", ) try: # Use the private bus to register the system - self.module.debug('Connecting to the private DBus') + self.module.debug("Connecting to the private DBus") private_bus = dbus.connection.Connection(address) try: @@ -683,17 +741,17 @@ def supports_option_environments(): SUBSCRIPTION_MANAGER_LOCALE, ) private_bus.call_blocking( - 'com.redhat.RHSM1', - '/com/redhat/RHSM1/Register', - 'com.redhat.RHSM1.Register', - 'RegisterWithActivationKeys', - 'sasa{sv}a{sv}s', + "com.redhat.RHSM1", + "/com/redhat/RHSM1/Register", + "com.redhat.RHSM1.Register", + "RegisterWithActivationKeys", + "sasa{sv}a{sv}s", args, timeout=REGISTRATION_TIMEOUT, ) else: args = ( - org_id or '', + org_id or "", username, password, register_opts, @@ -701,11 +759,11 @@ def supports_option_environments(): SUBSCRIPTION_MANAGER_LOCALE, ) private_bus.call_blocking( - 'com.redhat.RHSM1', - '/com/redhat/RHSM1/Register', - 'com.redhat.RHSM1.Register', - 'Register', - 'sssa{sv}a{sv}s', + "com.redhat.RHSM1", + "/com/redhat/RHSM1/Register", + "com.redhat.RHSM1.Register", + "Register", + "sssa{sv}a{sv}s", args, timeout=REGISTRATION_TIMEOUT, ) @@ -713,7 +771,7 @@ def supports_option_environments(): except dbus.exceptions.DBusException as e: # Sometimes we get NoReply but the registration has succeeded. # Check the registration status before deciding if this is an error. - if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply': + if e.get_dbus_name() == "org.freedesktop.DBus.Error.NoReply": if not self.is_registered(): # Host is not registered so re-raise the error raise @@ -722,37 +780,36 @@ def supports_option_environments(): # Host was registered so continue finally: # Always shut down the private bus - self.module.debug('Shutting down private DBus instance') + self.module.debug("Shutting down private DBus instance") register_server.Stop( SUBSCRIPTION_MANAGER_LOCALE, - dbus_interface='com.redhat.RHSM1.RegisterServer', + dbus_interface="com.redhat.RHSM1.RegisterServer", ) # Make sure to refresh all the local data: this will fetch all the # certificates, update redhat.repo, etc. - self.module.run_command([SUBMAN_CMD, 'refresh'], - check_rc=True, expand_user_and_vars=False) + self.module.run_command([SUBMAN_CMD, "refresh"], check_rc=True, expand_user_and_vars=False) if auto_attach: - args = [SUBMAN_CMD, 'attach', '--auto'] + args = [SUBMAN_CMD, "attach", "--auto"] self.module.run_command(args, check_rc=True, expand_user_and_vars=False) # There is no support for setting the release via D-Bus, so invoke # the CLI for this. if release: - args = [SUBMAN_CMD, 'release', '--set', release] + args = [SUBMAN_CMD, "release", "--set", release] self.module.run_command(args, check_rc=True, expand_user_and_vars=False) def unsubscribe(self, serials=None): - ''' - Unsubscribe a system from subscribed channels - Args: - serials(list or None): list of serials to unsubscribe. If - serials is none or an empty list, then - all subscribed channels will be removed. - Raises: - * Exception - if error occurs while running command - ''' + """ + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. + Raises: + * Exception - if error occurs while running command + """ items = [] if serials is not None and serials: items = [f"--serial={s}" for s in serials] @@ -760,20 +817,20 @@ def unsubscribe(self, serials=None): items = ["--all"] if items: - args = [SUBMAN_CMD, 'remove'] + items + args = [SUBMAN_CMD, "remove"] + items rc, stderr, stdout = self.module.run_command(args, check_rc=True) return serials def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'unregister'] + """ + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + """ + args = [SUBMAN_CMD, "unregister"] rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) + self.update_plugin_conf("rhnplugin", False) + self.update_plugin_conf("subscription-manager", False) def subscribe_by_pool_ids(self, pool_ids): """ @@ -785,12 +842,12 @@ def subscribe_by_pool_ids(self, pool_ids): for pool_id, quantity in sorted(pool_ids.items()): if pool_id in available_pool_ids: - args = [SUBMAN_CMD, 'attach', '--pool', pool_id] + args = [SUBMAN_CMD, "attach", "--pool", pool_id] if quantity is not None: - args.extend(['--quantity', to_native(quantity)]) + args.extend(["--quantity", to_native(quantity)]) rc, stderr, stdout = self.module.run_command(args, check_rc=True) else: - self.module.fail_json(msg=f'Pool ID: {pool_id} not in list of available pools') + self.module.fail_json(msg=f"Pool ID: {pool_id} not in list of available pools") return pool_ids def update_subscriptions_by_pool_ids(self, pool_ids): @@ -820,21 +877,20 @@ def update_subscriptions_by_pool_ids(self, pool_ids): if missing_pools or serials: changed = True - return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), - 'unsubscribed_serials': serials} + return {"changed": changed, "subscribed_pool_ids": list(missing_pools.keys()), "unsubscribed_serials": serials} def sync_syspurpose(self): """ Try to synchronize syspurpose attributes with server """ - args = [SUBMAN_CMD, 'status'] + args = [SUBMAN_CMD, "status"] rc, stdout, stderr = self.module.run_command(args, check_rc=False) class RhsmPool: - ''' - Convenience class for housing subscription information - ''' + """ + Convenience class for housing subscription information + """ def __init__(self, module, **kwargs): self.module = module @@ -842,13 +898,13 @@ def __init__(self, module, **kwargs): setattr(self, k, v) def __str__(self): - return str(self.__getattribute__('_name')) + return str(self.__getattribute__("_name")) def get_pool_id(self): - return getattr(self, 'PoolId', getattr(self, 'PoolID')) + return getattr(self, "PoolId", getattr(self, "PoolID")) def get_quantity_used(self): - return int(getattr(self, 'QuantityUsed')) + return int(getattr(self, "QuantityUsed")) def subscribe(self): args = f"subscription-manager attach --pool {self.get_pool_id()}" @@ -861,7 +917,7 @@ def subscribe(self): class RhsmPools: """ - This class is used for manipulating pools subscriptions with RHSM + This class is used for manipulating pools subscriptions with RHSM """ def __init__(self, module, consumed=False): @@ -873,32 +929,32 @@ def __iter__(self): def _load_product_list(self, consumed=False): """ - Loads list of all available or consumed pools for system in data structure + Loads list of all available or consumed pools for system in data structure - Args: - consumed(bool): if True list consumed pools, else list available pools (default False) + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) """ args = "subscription-manager list" if consumed: args += " --consumed" else: args += " --available" - lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + lang_env = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) products = [] - for line in stdout.split('\n'): + for line in stdout.split("\n"): # Remove leading+trailing whitespace line = line.strip() # An empty line implies the end of a output group if len(line) == 0: continue # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) + elif ":" in line: + (key, value) = line.split(":", 1) key = key.strip().replace(" ", "") # To unify value = value.strip() - if key in ['ProductName', 'SubscriptionName']: + if key in ["ProductName", "SubscriptionName"]: # Remember the name for later processing products.append(RhsmPool(self.module, _name=value, key=value)) elif products: @@ -906,22 +962,22 @@ def _load_product_list(self, consumed=False): products[-1].__setattr__(key, value) # FIXME - log some warning? # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) return products - def filter_pools(self, regexp='^$'): - ''' - Return a list of RhsmPools whose pool id matches the provided regular expression - ''' + def filter_pools(self, regexp="^$"): + """ + Return a list of RhsmPools whose pool id matches the provided regular expression + """ r = re.compile(regexp) for product in self.products: if r.search(product.get_pool_id()): yield product - def filter_products(self, regexp='^$'): - ''' - Return a list of RhsmPools whose product name matches the provided regular expression - ''' + def filter_products(self, regexp="^$"): + """ + Return a list of RhsmPools whose product name matches the provided regular expression + """ r = re.compile(regexp) for product in self.products: if r.search(product._name): @@ -935,7 +991,7 @@ class SysPurpose: SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" - ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] + ALLOWED_ATTRIBUTES = ["role", "usage", "service_level_agreement", "addons"] def __init__(self, path=None): """ @@ -953,7 +1009,7 @@ def update_syspurpose(self, new_syspurpose): if key in self.ALLOWED_ATTRIBUTES: if value is not None: syspurpose[key] = value - elif key == 'sync': + elif key == "sync": pass else: raise KeyError(f"Attribute: {key} not in list of allowed attributes: {self.ALLOWED_ATTRIBUTES}") @@ -994,89 +1050,90 @@ def _read_syspurpose(self): def main(): - # Note: the default values for parameters are: # 'type': 'str', 'default': None, 'required': False # So there is no need to repeat these values for each parameter. module = AnsibleModule( argument_spec={ - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'username': {}, - 'password': {'no_log': True}, - 'token': {'no_log': True}, - 'server_hostname': {}, - 'server_insecure': {}, - 'server_prefix': {}, - 'server_port': {}, - 'rhsm_baseurl': {}, - 'rhsm_repo_ca_cert': {}, - 'auto_attach': {'type': 'bool'}, - 'activationkey': {'no_log': True}, - 'org_id': {}, - 'environment': {}, - 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, - 'consumer_type': {}, - 'consumer_name': {}, - 'consumer_id': {}, - 'force_register': {'default': False, 'type': 'bool'}, - 'server_proxy_hostname': {}, - 'server_proxy_scheme': {}, - 'server_proxy_port': {}, - 'server_proxy_user': {}, - 'server_proxy_password': {'no_log': True}, - 'release': {}, - 'syspurpose': { - 'type': 'dict', - 'options': { - 'role': {}, - 'usage': {}, - 'service_level_agreement': {}, - 'addons': {'type': 'list', 'elements': 'str'}, - 'sync': {'type': 'bool', 'default': False} - } - } + "state": {"default": "present", "choices": ["present", "absent"]}, + "username": {}, + "password": {"no_log": True}, + "token": {"no_log": True}, + "server_hostname": {}, + "server_insecure": {}, + "server_prefix": {}, + "server_port": {}, + "rhsm_baseurl": {}, + "rhsm_repo_ca_cert": {}, + "auto_attach": {"type": "bool"}, + "activationkey": {"no_log": True}, + "org_id": {}, + "environment": {}, + "pool_ids": {"default": [], "type": "list", "elements": "raw"}, + "consumer_type": {}, + "consumer_name": {}, + "consumer_id": {}, + "force_register": {"default": False, "type": "bool"}, + "server_proxy_hostname": {}, + "server_proxy_scheme": {}, + "server_proxy_port": {}, + "server_proxy_user": {}, + "server_proxy_password": {"no_log": True}, + "release": {}, + "syspurpose": { + "type": "dict", + "options": { + "role": {}, + "usage": {}, + "service_level_agreement": {}, + "addons": {"type": "list", "elements": "str"}, + "sync": {"type": "bool", "default": False}, + }, + }, }, - required_together=[['username', 'password'], - ['server_proxy_hostname', 'server_proxy_port'], - ['server_proxy_user', 'server_proxy_password']], - mutually_exclusive=[['activationkey', 'username'], - ['activationkey', 'token'], - ['token', 'username'], - ['activationkey', 'consumer_id'], - ['activationkey', 'environment'], - ['activationkey', 'auto_attach']], - required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]], + required_together=[ + ["username", "password"], + ["server_proxy_hostname", "server_proxy_port"], + ["server_proxy_user", "server_proxy_password"], + ], + mutually_exclusive=[ + ["activationkey", "username"], + ["activationkey", "token"], + ["token", "username"], + ["activationkey", "consumer_id"], + ["activationkey", "environment"], + ["activationkey", "auto_attach"], + ], + required_if=[["force_register", True, ["username", "activationkey", "token"], True]], ) if getuid() != 0: - module.fail_json( - msg="Interacting with subscription-manager requires root permissions ('become: true')" - ) + module.fail_json(msg="Interacting with subscription-manager requires root permissions ('become: true')") # Load RHSM configuration from file rhsm = Rhsm(module) - state = module.params['state'] - username = module.params['username'] - password = module.params['password'] - token = module.params['token'] - server_hostname = module.params['server_hostname'] - server_insecure = module.params['server_insecure'] - server_prefix = module.params['server_prefix'] - server_port = module.params['server_port'] - rhsm_baseurl = module.params['rhsm_baseurl'] - rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] - auto_attach = module.params['auto_attach'] - activationkey = module.params['activationkey'] - org_id = module.params['org_id'] + state = module.params["state"] + username = module.params["username"] + password = module.params["password"] + token = module.params["token"] + server_hostname = module.params["server_hostname"] + server_insecure = module.params["server_insecure"] + server_prefix = module.params["server_prefix"] + server_port = module.params["server_port"] + rhsm_baseurl = module.params["rhsm_baseurl"] + rhsm_repo_ca_cert = module.params["rhsm_repo_ca_cert"] + auto_attach = module.params["auto_attach"] + activationkey = module.params["activationkey"] + org_id = module.params["org_id"] if activationkey and not org_id: - module.fail_json(msg='org_id is required when using activationkey') - environment = module.params['environment'] + module.fail_json(msg="org_id is required when using activationkey") + environment = module.params["environment"] pool_ids = {} - for value in module.params['pool_ids']: + for value in module.params["pool_ids"]: if isinstance(value, dict): if len(value) != 1: - module.fail_json(msg='Unable to parse pool_ids option.') + module.fail_json(msg="Unable to parse pool_ids option.") pool_id, quantity = list(value.items())[0] else: pool_id, quantity = value, None @@ -1085,15 +1142,15 @@ def main(): consumer_name = module.params["consumer_name"] consumer_id = module.params["consumer_id"] force_register = module.params["force_register"] - server_proxy_hostname = module.params['server_proxy_hostname'] - server_proxy_port = module.params['server_proxy_port'] - server_proxy_user = module.params['server_proxy_user'] - server_proxy_password = module.params['server_proxy_password'] - release = module.params['release'] - syspurpose = module.params['syspurpose'] + server_proxy_hostname = module.params["server_proxy_hostname"] + server_proxy_port = module.params["server_proxy_port"] + server_proxy_user = module.params["server_proxy_user"] + server_proxy_password = module.params["server_proxy_password"] + release = module.params["release"] + syspurpose = module.params["syspurpose"] global SUBMAN_CMD - SUBMAN_CMD = module.get_bin_path('subscription-manager', True) + SUBMAN_CMD = module.get_bin_path("subscription-manager", True) syspurpose_changed = False if syspurpose is not None: @@ -1103,14 +1160,13 @@ def main(): module.fail_json(msg=f"Failed to update syspurpose attributes: {to_native(err)}") # Ensure system is registered - if state == 'present': - + if state == "present": # Cache the status of the system before the changes was_registered = rhsm.is_registered # Register system if was_registered and not force_register: - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + if syspurpose and "sync" in syspurpose and syspurpose["sync"] is True: try: rhsm.sync_syspurpose() except Exception as e: @@ -1129,14 +1185,28 @@ def main(): module.exit_json(changed=False, msg="System already registered.") else: if not username and not activationkey and not token: - module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token") + module.fail_json( + msg="state is present but any of the following are missing: username, activationkey, token" + ) try: rhsm.enable() rhsm.configure(**module.params) - rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, - environment, release) - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + rhsm.register( + was_registered, + username, + password, + token, + auto_attach, + activationkey, + org_id, + consumer_type, + consumer_name, + consumer_id, + force_register, + environment, + release, + ) + if syspurpose and "sync" in syspurpose and syspurpose["sync"] is True: rhsm.sync_syspurpose() if pool_ids: subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) @@ -1145,12 +1215,14 @@ def main(): except Exception as e: module.fail_json(msg=f"Failed to register with '{server_hostname}': {to_native(e)}") else: - module.exit_json(changed=True, - msg=f"System successfully registered to '{server_hostname}'.", - subscribed_pool_ids=subscribed_pool_ids) + module.exit_json( + changed=True, + msg=f"System successfully registered to '{server_hostname}'.", + subscribed_pool_ids=subscribed_pool_ids, + ) # Ensure system is *not* registered - if state == 'absent': + if state == "absent": if not rhsm.is_registered: module.exit_json(changed=False, msg="System already unregistered.") else: @@ -1162,5 +1234,5 @@ def main(): module.exit_json(changed=True, msg=f"System successfully unregistered from {server_hostname}.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redis.py b/plugins/modules/redis.py index 9ef6dc29f2a..eb3d3c2297f 100644 --- a/plugins/modules/redis.py +++ b/plugins/modules/redis.py @@ -151,7 +151,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, redis_auth_params) + fail_imports, + redis_auth_argument_spec, + redis_auth_params, +) import re @@ -185,15 +188,16 @@ def flush(client, db=None): def main(): redis_auth_args = redis_auth_argument_spec(tls_default=False) module_args = dict( - command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), - master_host=dict(type='str'), - master_port=dict(type='int'), - replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], - aliases=["slave_mode"]), - db=dict(type='int'), - flush_mode=dict(type='str', default='all', choices=['all', 'db']), - name=dict(type='str'), - value=dict(type='str'), + command=dict(type="str", choices=["config", "flush", "replica", "slave"]), + master_host=dict(type="str"), + master_port=dict(type="int"), + replica_mode=dict( + type="str", default="replica", choices=["master", "replica", "slave"], aliases=["slave_mode"] + ), + db=dict(type="int"), + flush_mode=dict(type="str", default="all", choices=["all", "db"]), + name=dict(type="str"), + value=dict(type="str"), ) module_args.update(redis_auth_args) module = AnsibleModule( @@ -201,29 +205,29 @@ def main(): supports_check_mode=True, ) - fail_imports(module, module.params['tls']) + fail_imports(module, module.params["tls"]) redis_params = redis_auth_params(module) - command = module.params['command'] + command = module.params["command"] if command == "slave": command = "replica" # Replica Command section ----------- if command == "replica": - master_host = module.params['master_host'] - master_port = module.params['master_port'] - mode = module.params['replica_mode'] + master_host = module.params["master_host"] + master_port = module.params["master_port"] + mode = module.params["replica_mode"] if mode == "slave": mode = "replica" # Check if we have all the data if mode == "replica": # Only need data if we want to be replica if not master_host: - module.fail_json(msg='In replica mode master host must be provided') + module.fail_json(msg="In replica mode master host must be provided") if not master_port: - module.fail_json(msg='In replica mode master port must be provided') + module.fail_json(msg="In replica mode master port must be provided") # Connect and check r = redis.StrictRedis(**redis_params) @@ -237,7 +241,12 @@ def main(): if mode == "master" and info["role"] == "master": module.exit_json(changed=False, mode=mode) - elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + elif ( + mode == "replica" + and info["role"] == "slave" + and info["master_host"] == master_host + and info["master_port"] == master_port + ): status = dict( status=mode, master_host=master_host, @@ -252,24 +261,24 @@ def main(): if module.check_mode or set_replica_mode(r, master_host, master_port): info = r.info() status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, + "status": mode, + "master_host": master_host, + "master_port": master_port, } module.exit_json(changed=True, mode=status) else: - module.fail_json(msg='Unable to set replica mode') + module.fail_json(msg="Unable to set replica mode") else: if module.check_mode or set_master_mode(r): module.exit_json(changed=True, mode=mode) else: - module.fail_json(msg='Unable to set master mode') + module.fail_json(msg="Unable to set master mode") # flush Command section ----------- elif command == "flush": - db = module.params['db'] - mode = module.params['flush_mode'] + db = module.params["db"] + mode = module.params["flush_mode"] # Check if we have all the data if mode == "db": @@ -297,16 +306,16 @@ def main(): module.exit_json(changed=True, flushed=True, db=db) else: # Flush never fails :) module.fail_json(msg=f"Unable to flush '{db}' database") - elif command == 'config': - name = module.params['name'] + elif command == "config": + name = module.params["name"] try: # try to parse the value as if it were the memory size - if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()): - value = str(human_to_bytes(module.params['value'].upper())) + if re.match(r"^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$", module.params["value"].upper()): + value = str(human_to_bytes(module.params["value"].upper())) else: - value = module.params['value'] + value = module.params["value"] except ValueError: - value = module.params['value'] + value = module.params["value"] r = redis.StrictRedis(**redis_params) @@ -330,8 +339,8 @@ def main(): module.fail_json(msg=f"unable to write config: {e}", exception=traceback.format_exc()) module.exit_json(changed=changed, name=name, value=value) else: - module.fail_json(msg='A valid command must be provided') + module.fail_json(msg="A valid command must be provided") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py index eb4a7e7ef08..aeb8a5da437 100644 --- a/plugins/modules/redis_data.py +++ b/plugins/modules/redis_data.py @@ -134,116 +134,117 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) + fail_imports, + redis_auth_argument_spec, + RedisAnsible, +) def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='str'), - expiration=dict(type='int'), - non_existing=dict(type='bool'), - existing=dict(type='bool'), - keep_ttl=dict(type='bool'), - state=dict(type='str', default='present', - choices=['present', 'absent']), + key=dict(type="str", required=True, no_log=False), + value=dict(type="str"), + expiration=dict(type="int"), + non_existing=dict(type="bool"), + existing=dict(type="bool"), + keep_ttl=dict(type="bool"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) module_args.update(redis_auth_args) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - required_if=[('state', 'present', ('value',))], - mutually_exclusive=[['non_existing', 'existing'], - ['keep_ttl', 'expiration']],) + required_if=[("state", "present", ("value",))], + mutually_exclusive=[["non_existing", "existing"], ["keep_ttl", "expiration"]], + ) fail_imports(module) redis = RedisAnsible(module) - key = module.params['key'] - value = module.params['value'] - px = module.params['expiration'] - nx = module.params['non_existing'] - xx = module.params['existing'] - keepttl = module.params['keep_ttl'] - state = module.params['state'] - set_args = {'name': key, 'value': value, 'px': px, - 'nx': nx, 'xx': xx, 'keepttl': keepttl} + key = module.params["key"] + value = module.params["value"] + px = module.params["expiration"] + nx = module.params["non_existing"] + xx = module.params["existing"] + keepttl = module.params["keep_ttl"] + state = module.params["state"] + set_args = {"name": key, "value": value, "px": px, "nx": nx, "xx": xx, "keepttl": keepttl} - result = {'changed': False} + result = {"changed": False} old_value = None try: old_value = redis.connection.get(key) except Exception as e: - msg = f'Failed to get value of key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to get value of key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) - if state == 'absent': + if state == "absent": if module.check_mode: if old_value is None: - msg = f'Key: {key} not present' - result['msg'] = msg + msg = f"Key: {key} not present" + result["msg"] = msg module.exit_json(**result) else: - msg = f'Deleted key: {key}' - result['msg'] = msg + msg = f"Deleted key: {key}" + result["msg"] = msg module.exit_json(**result) try: ret = redis.connection.delete(key) if ret == 0: - msg = f'Key: {key} not present' - result['msg'] = msg + msg = f"Key: {key} not present" + result["msg"] = msg module.exit_json(**result) else: - msg = f'Deleted key: {key}' - result['msg'] = msg - result['changed'] = True + msg = f"Deleted key: {key}" + result["msg"] = msg + result["changed"] = True module.exit_json(**result) except Exception as e: - msg = f'Failed to delete key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to delete key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) old_value = None try: old_value = redis.connection.get(key) except Exception as e: - msg = f'Failed to get value of key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to get value of key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) - result['old_value'] = old_value + result["old_value"] = old_value if old_value == value and keepttl is not False and px is None: - msg = f'Key {key} already has desired value' - result['msg'] = msg - result['value'] = value + msg = f"Key {key} already has desired value" + result["msg"] = msg + result["value"] = value module.exit_json(**result) if module.check_mode: - result['msg'] = f'Set key: {key}' - result['value'] = value + result["msg"] = f"Set key: {key}" + result["value"] = value module.exit_json(**result) try: ret = redis.connection.set(**set_args) if ret is None: if nx: - msg = f'Could not set key: {key}. Key already present.' + msg = f"Could not set key: {key}. Key already present." else: - msg = f'Could not set key: {key}. Key not present.' - result['msg'] = msg + msg = f"Could not set key: {key}. Key not present." + result["msg"] = msg module.fail_json(**result) - msg = f'Set key: {key}' - result['msg'] = msg - result['changed'] = True - result['value'] = value + msg = f"Set key: {key}" + result["msg"] = msg + result["changed"] = True + result["value"] = value module.exit_json(**result) except Exception as e: - msg = f'Failed to set key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to set key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py index 3a1d4ed158f..45c863e7079 100644 --- a/plugins/modules/redis_data_incr.py +++ b/plugins/modules/redis_data_incr.py @@ -89,36 +89,39 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) + fail_imports, + redis_auth_argument_spec, + RedisAnsible, +) def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( - key=dict(type='str', required=True, no_log=False), - increment_int=dict(type='int'), - increment_float=dict(type='float'), + key=dict(type="str", required=True, no_log=False), + increment_int=dict(type="int"), + increment_float=dict(type="float"), ) module_args.update(redis_auth_args) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - mutually_exclusive=[['increment_int', 'increment_float']], + mutually_exclusive=[["increment_int", "increment_float"]], ) fail_imports(module) redis = RedisAnsible(module) - key = module.params['key'] - increment_float = module.params['increment_float'] - increment_int = module.params['increment_int'] + key = module.params["key"] + increment_float = module.params["increment_float"] + increment_int = module.params["increment_int"] increment = 1 if increment_float is not None: increment = increment_float elif increment_int is not None: increment = increment_int - result = {'changed': False} + result = {"changed": False} if module.check_mode: value = 0.0 try: @@ -126,55 +129,55 @@ def main(): if res is not None: value = float(res) except ValueError as e: - msg = f'Value: {res} of key: {key} is not incrementable(int or float)' - result['msg'] = msg + msg = f"Value: {res} of key: {key} is not incrementable(int or float)" + result["msg"] = msg module.fail_json(**result) except Exception as e: - msg = f'Failed to get value of key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to get value of key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) - msg = f'Incremented key: {key} by {increment} to {value + increment}' - result['msg'] = msg - result['value'] = float(value + increment) + msg = f"Incremented key: {key} by {increment} to {value + increment}" + result["msg"] = msg + result["value"] = float(value + increment) module.exit_json(**result) if increment_float is not None: try: value = redis.connection.incrbyfloat(key, increment) - msg = f'Incremented key: {key} by {increment} to {value}' - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True + msg = f"Incremented key: {key} by {increment} to {value}" + result["msg"] = msg + result["value"] = float(value) + result["changed"] = True module.exit_json(**result) except Exception as e: - msg = f'Failed to increment key: {key} by {increment} with exception: {e}' - result['msg'] = msg + msg = f"Failed to increment key: {key} by {increment} with exception: {e}" + result["msg"] = msg module.fail_json(**result) elif increment_int is not None: try: value = redis.connection.incrby(key, increment) - msg = f'Incremented key: {key} by {increment} to {value}' - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True + msg = f"Incremented key: {key} by {increment} to {value}" + result["msg"] = msg + result["value"] = float(value) + result["changed"] = True module.exit_json(**result) except Exception as e: - msg = f'Failed to increment key: {key} by {increment} with exception: {e}' - result['msg'] = msg + msg = f"Failed to increment key: {key} by {increment} with exception: {e}" + result["msg"] = msg module.fail_json(**result) else: try: value = redis.connection.incr(key) - msg = f'Incremented key: {key} to {value}' - result['msg'] = msg - result['value'] = float(value) - result['changed'] = True + msg = f"Incremented key: {key} to {value}" + result["msg"] = msg + result["value"] = float(value) + result["changed"] = True module.exit_json(**result) except Exception as e: - msg = f'Failed to increment key: {key} with exception: {e}' - result['msg'] = msg + msg = f"Failed to increment key: {key} with exception: {e}" + result["msg"] = msg module.fail_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py index 5750e2c2a49..34b6858652b 100644 --- a/plugins/modules/redis_data_info.py +++ b/plugins/modules/redis_data_info.py @@ -68,13 +68,16 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, RedisAnsible) + fail_imports, + redis_auth_argument_spec, + RedisAnsible, +) def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( - key=dict(type='str', required=True, no_log=False), + key=dict(type="str", required=True, no_log=False), ) module_args.update(redis_auth_args) @@ -86,27 +89,27 @@ def main(): redis = RedisAnsible(module) - key = module.params['key'] - result = {'changed': False} + key = module.params["key"] + result = {"changed": False} value = None try: value = redis.connection.get(key) except Exception as e: msg = f'Failed to get value of key "{key}" with exception: {e}' - result['msg'] = msg + result["msg"] = msg module.fail_json(**result) if value is None: msg = f'Key "{key}" does not exist in database' - result['exists'] = False + result["exists"] = False else: msg = f'Got key "{key}"' - result['value'] = value - result['exists'] = True - result['msg'] = msg + result["value"] = value + result["exists"] = True + result["msg"] = msg module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py index fd02ebdeece..42638f5d86e 100644 --- a/plugins/modules/redis_info.py +++ b/plugins/modules/redis_info.py @@ -217,6 +217,7 @@ REDIS_IMP_ERR = None try: from redis import StrictRedis + HAS_REDIS_PACKAGE = True except ImportError: REDIS_IMP_ERR = traceback.format_exc() @@ -224,7 +225,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( - fail_imports, redis_auth_argument_spec, redis_auth_params) + fail_imports, + redis_auth_argument_spec, + redis_auth_params, +) def redis_client(**client_params): @@ -234,7 +238,7 @@ def redis_client(**client_params): # Module execution. def main(): module_args = dict( - cluster=dict(type='bool', default=False), + cluster=dict(type="bool", default=False), ) module_args.update(redis_auth_argument_spec(tls_default=False)) module = AnsibleModule( @@ -242,10 +246,10 @@ def main(): supports_check_mode=True, ) - fail_imports(module, module.params['tls']) + fail_imports(module, module.params["tls"]) redis_params = redis_auth_params(module) - cluster = module.params['cluster'] + cluster = module.params["cluster"] # Connect and check client = redis_client(**redis_params) @@ -259,10 +263,10 @@ def main(): result = dict(changed=False, info=info) if cluster: - result['cluster_info'] = client.execute_command('CLUSTER INFO') + result["cluster_info"] = client.execute_command("CLUSTER INFO") module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py index ed5e738fbe5..6c7aa39e781 100644 --- a/plugins/modules/rhevm.py +++ b/plugins/modules/rhevm.py @@ -333,6 +333,7 @@ try: from ovirtsdk.api import API from ovirtsdk.xml import params + HAS_SDK = True except ImportError: HAS_SDK = False @@ -344,8 +345,8 @@ RHEV_SUCCESS = 0 RHEV_UNAVAILABLE = 2 -RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] -STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] +RHEV_TYPE_OPTS = ["desktop", "host", "server"] +STATE_OPTS = ["absent", "cd", "down", "info", "ping", "present", "restart", "up"] msg: list[str] = [] changed = False @@ -353,16 +354,16 @@ class RHEVConn: - 'Connection to RHEV-M' + "Connection to RHEV-M" def __init__(self, module): self.module = module - user = module.params.get('user') - password = module.params.get('password') - server = module.params.get('server') - port = module.params.get('port') - insecure_api = module.params.get('insecure_api') + user = module.params.get("user") + password = module.params.get("password") + server = module.params.get("server") + port = module.params.get("port") + insecure_api = module.params.get("insecure_api") url = f"https://{server}:{port}" @@ -382,7 +383,7 @@ def createVMimage(self, name, cluster, template): name=name, cluster=self.conn.clusters.get(name=cluster), template=self.conn.templates.get(name=template), - disks=params.Disks(clone=True) + disks=params.Disks(clone=True), ) self.conn.vms.add(vmparams) setMsg("VM is created") @@ -401,7 +402,7 @@ def createVM(self, name, cluster, os, actiontype): cluster=self.conn.clusters.get(name=cluster), os=params.OperatingSystem(type_=os), template=self.conn.templates.get(name="Blank"), - type_=actiontype + type_=actiontype, ) self.conn.vms.add(vmparams) setMsg("VM is created") @@ -413,7 +414,9 @@ def createVM(self, name, cluster, os, actiontype): setFailed() return False - def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): + def createDisk( + self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot + ): VM = self.get_VM(vmname) newdisk = params.Disk( @@ -424,9 +427,7 @@ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, disk interface=diskinterface, format=diskformat, bootable=diskboot, - storage_domains=params.StorageDomains( - storage_domain=[self.get_domain(diskdomain)] - ) + storage_domains=params.StorageDomains(storage_domain=[self.get_domain(diskdomain)]), ) try: @@ -443,7 +444,7 @@ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, disk try: currentdisk = VM.disks.get(name=diskname) attempt = 1 - while currentdisk.status.state != 'ok': + while currentdisk.status.state != "ok": currentdisk = VM.disks.get(name=diskname) if attempt == 100: setMsg(f"Error, disk {diskname}, state {currentdisk.status.state}") @@ -463,11 +464,7 @@ def createNIC(self, vmname, nicname, vlan, interface): VM = self.get_VM(vmname) CLUSTER = self.get_cluster_byid(VM.cluster.id) DC = self.get_DC_byid(CLUSTER.data_center.id) - newnic = params.NIC( - name=nicname, - network=DC.networks.get(name=vlan), - interface=interface - ) + newnic = params.NIC(name=nicname, network=DC.networks.get(name=vlan), interface=interface) try: VM.nics.add(newnic) @@ -611,7 +608,7 @@ def set_Disk(self, diskname, disksize, diskinterface, diskboot): setMsg("The boot option of the disk is correct") if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): try: - DISK.size = (1024 * 1024 * 1024 * int(disksize)) + DISK.size = 1024 * 1024 * 1024 * int(disksize) setMsg("Updated the size of the disk.") setChanged() except Exception as e: @@ -660,7 +657,7 @@ def set_NIC(self, vmname, nicname, newname, vlan, interface): setChanged() try: NIC.update() - setMsg('iface has successfully been updated.') + setMsg("iface has successfully been updated.") except Exception as e: setMsg("Failed to update the iface.") setMsg(str(e)) @@ -706,40 +703,40 @@ def set_Host(self, host_name, cluster, ifaces): setMsg("Host does not exist.") ifacelist = dict() networklist = [] - manageip = '' + manageip = "" try: for iface in ifaces: try: setMsg(f"creating host interface {iface['name']}") - if 'management' in iface: - manageip = iface['ip'] - if 'boot_protocol' not in iface: - if 'ip' in iface: - iface['boot_protocol'] = 'static' + if "management" in iface: + manageip = iface["ip"] + if "boot_protocol" not in iface: + if "ip" in iface: + iface["boot_protocol"] = "static" else: - iface['boot_protocol'] = 'none' - if 'ip' not in iface: - iface['ip'] = '' - if 'netmask' not in iface: - iface['netmask'] = '' - if 'gateway' not in iface: - iface['gateway'] = '' - - if 'network' in iface: - if 'bond' in iface: + iface["boot_protocol"] = "none" + if "ip" not in iface: + iface["ip"] = "" + if "netmask" not in iface: + iface["netmask"] = "" + if "gateway" not in iface: + iface["gateway"] = "" + + if "network" in iface: + if "bond" in iface: bond = [] - for slave in iface['bond']: + for slave in iface["bond"]: bond.append(ifacelist[slave]) try: tmpiface = params.Bonding( slaves=params.Slaves(host_nic=bond), options=params.Options( option=[ - params.Option(name='miimon', value='100'), - params.Option(name='mode', value='4') + params.Option(name="miimon", value="100"), + params.Option(name="mode", value="4"), ] - ) + ), ) except Exception as e: setMsg(f"Failed to create the bond for {iface['name']}") @@ -748,16 +745,15 @@ def set_Host(self, host_name, cluster, ifaces): return False try: tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], + network=params.Network(name=iface["network"]), + name=iface["name"], + boot_protocol=iface["boot_protocol"], ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] + address=iface["ip"], netmask=iface["netmask"], gateway=iface["gateway"] ), override_configuration=True, - bonding=tmpiface) + bonding=tmpiface, + ) networklist.append(tmpnetwork) setMsg(f"Applying network {iface['name']}") except Exception as e: @@ -767,94 +763,92 @@ def set_Host(self, host_name, cluster, ifaces): return False else: tmpnetwork = params.HostNIC( - network=params.Network(name=iface['network']), - name=iface['name'], - boot_protocol=iface['boot_protocol'], + network=params.Network(name=iface["network"]), + name=iface["name"], + boot_protocol=iface["boot_protocol"], ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) + address=iface["ip"], netmask=iface["netmask"], gateway=iface["gateway"] + ), + ) networklist.append(tmpnetwork) setMsg(f"Applying network {iface['name']}") else: tmpiface = params.HostNIC( - name=iface['name'], + name=iface["name"], network=params.Network(), - boot_protocol=iface['boot_protocol'], - ip=params.IP( - address=iface['ip'], - netmask=iface['netmask'], - gateway=iface['gateway'] - )) - ifacelist[iface['name']] = tmpiface + boot_protocol=iface["boot_protocol"], + ip=params.IP(address=iface["ip"], netmask=iface["netmask"], gateway=iface["gateway"]), + ) + ifacelist[iface["name"]] = tmpiface except Exception as e: setMsg(f"Failed to set {iface['name']}") setFailed() setMsg(str(e)) return False except Exception as e: - setMsg('Failed to set networks') + setMsg("Failed to set networks") setMsg(str(e)) setFailed() return False - if manageip == '': - setMsg('No management network is defined') + if manageip == "": + setMsg("No management network is defined") setFailed() return False try: - HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) + HOST = params.Host( + name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method="publickey") + ) if self.conn.hosts.add(HOST): setChanged() HOST = self.get_Host(host_name) state = HOST.status.state - while state != 'non_operational' and state != 'up': + while state != "non_operational" and state != "up": HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to add host to RHEVM') + if state == "non_responsive": + setMsg("Failed to add host to RHEVM") setFailed() return False - setMsg('status host: up') + setMsg("status host: up") time.sleep(5) HOST = self.get_Host(host_name) state = HOST.status.state setMsg(f"State before setting to maintenance: {state}") HOST.deactivate() - while state != 'maintenance': + while state != "maintenance": HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) - setMsg('status host: maintenance') + setMsg("status host: maintenance") try: - HOST.nics.setupnetworks(params.Action( - force=True, - check_connectivity=False, - host_nics=params.HostNics(host_nic=networklist) - )) - setMsg('nics are set') + HOST.nics.setupnetworks( + params.Action( + force=True, check_connectivity=False, host_nics=params.HostNics(host_nic=networklist) + ) + ) + setMsg("nics are set") except Exception as e: - setMsg('Failed to apply networkconfig') + setMsg("Failed to apply networkconfig") setFailed() setMsg(str(e)) return False try: HOST.commitnetconfig() - setMsg('Network config is saved') + setMsg("Network config is saved") except Exception as e: - setMsg('Failed to save networkconfig') + setMsg("Failed to save networkconfig") setFailed() setMsg(str(e)) return False except Exception as e: - if 'The Host name is already in use' in str(e): + if "The Host name is already in use" in str(e): setMsg("Host already exists") else: setMsg("Failed to add host") @@ -863,15 +857,15 @@ def set_Host(self, host_name, cluster, ifaces): return False HOST.activate() - while state != 'up': + while state != "up": HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) - if state == 'non_responsive': - setMsg('Failed to apply networkconfig.') + if state == "non_responsive": + setMsg("Failed to apply networkconfig.") setFailed() return False - setMsg('status host: up') + setMsg("status host: up") else: setMsg("Host exists.") @@ -929,7 +923,7 @@ def stop_VM(self, vmname, timeout): def set_CD(self, vmname, cd_drive): VM = self.get_VM(vmname) try: - if str(VM.status.state) == 'down': + if str(VM.status.state) == "down": cdrom = params.CdRom(file=cd_drive) VM.cdroms.add(cdrom) setMsg("Attached the image.") @@ -1015,43 +1009,45 @@ def getVM(self, name): VM = self.conn.get_VM(name) if VM: vminfo = dict() - vminfo['uuid'] = VM.id - vminfo['name'] = VM.name - vminfo['status'] = VM.status.state - vminfo['cpu_cores'] = VM.cpu.topology.cores - vminfo['cpu_sockets'] = VM.cpu.topology.sockets - vminfo['cpu_shares'] = VM.cpu_shares - vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) - vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) - vminfo['os'] = VM.get_os().type_ - vminfo['del_prot'] = VM.delete_protected + vminfo["uuid"] = VM.id + vminfo["name"] = VM.name + vminfo["status"] = VM.status.state + vminfo["cpu_cores"] = VM.cpu.topology.cores + vminfo["cpu_sockets"] = VM.cpu.topology.sockets + vminfo["cpu_shares"] = VM.cpu_shares + vminfo["memory"] = int(VM.memory) // 1024 // 1024 // 1024 + vminfo["mem_pol"] = int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024 + vminfo["os"] = VM.get_os().type_ + vminfo["del_prot"] = VM.delete_protected try: - vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) + vminfo["host"] = str(self.conn.get_Host_byid(str(VM.host.id)).name) except Exception: - vminfo['host'] = None - vminfo['boot_order'] = [] + vminfo["host"] = None + vminfo["boot_order"] = [] for boot_dev in VM.os.get_boot(): - vminfo['boot_order'].append(str(boot_dev.dev)) - vminfo['disks'] = [] + vminfo["boot_order"].append(str(boot_dev.dev)) + vminfo["disks"] = [] for DISK in VM.disks.list(): disk = dict() - disk['name'] = DISK.name - disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) - disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) - disk['interface'] = DISK.interface - vminfo['disks'].append(disk) - vminfo['ifaces'] = [] + disk["name"] = DISK.name + disk["size"] = int(DISK.size) // 1024 // 1024 // 1024 + disk["domain"] = str( + (self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name + ) + disk["interface"] = DISK.interface + vminfo["disks"].append(disk) + vminfo["ifaces"] = [] for NIC in VM.nics.list(): iface = dict() - iface['name'] = str(NIC.name) - iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) - iface['interface'] = NIC.interface - iface['mac'] = NIC.mac.address - vminfo['ifaces'].append(iface) + iface["name"] = str(NIC.name) + iface["vlan"] = str(self.conn.get_network_byid(NIC.get_network().id).name) + iface["interface"] = NIC.interface + iface["mac"] = NIC.mac.address + vminfo["ifaces"].append(iface) vminfo[str(NIC.name)] = NIC.mac.address CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) if CLUSTER: - vminfo['cluster'] = CLUSTER.name + vminfo["cluster"] = CLUSTER.name else: vminfo = False return vminfo @@ -1085,22 +1081,22 @@ def setDisks(self, name, disks): counter = 0 bootselect = False for disk in disks: - if 'bootable' in disk: - if disk['bootable'] is True: + if "bootable" in disk: + if disk["bootable"] is True: bootselect = True for disk in disks: diskname = f"{name}_Disk{counter}_{disk.get('name', '').replace('/', '_')}" - disksize = disk.get('size', 1) - diskdomain = disk.get('domain', None) + disksize = disk.get("size", 1) + diskdomain = disk.get("domain", None) if diskdomain is None: setMsg("`domain` is a required disk key.") setFailed() return False - diskinterface = disk.get('interface', 'virtio') - diskformat = disk.get('format', 'raw') - diskallocationtype = disk.get('thin', False) - diskboot = disk.get('bootable', False) + diskinterface = disk.get("interface", "virtio") + diskformat = disk.get("format", "raw") + diskallocationtype = disk.get("thin", False) + diskboot = disk.get("bootable", False) if bootselect is False and counter == 0: diskboot = True @@ -1108,7 +1104,9 @@ def setDisks(self, name, disks): DISK = self.conn.get_disk(diskname) if DISK is None: - self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) + self.conn.createDisk( + name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot + ) else: self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) checkFail() @@ -1126,7 +1124,7 @@ def setNetworks(self, vmname, ifaces): for NIC in VM.nics.list(): if counter < length: iface = ifaces[counter] - name = iface.get('name', None) + name = iface.get("name", None) if name is None: setMsg("`name` is a required iface key.") setFailed() @@ -1137,12 +1135,12 @@ def setNetworks(self, vmname, ifaces): self.setNetworks(vmname, ifaces) checkFail() return True - vlan = iface.get('vlan', None) + vlan = iface.get("vlan", None) if vlan is None: setMsg("`vlan` is a required iface key.") setFailed() checkFail() - interface = iface.get('interface', 'virtio') + interface = iface.get("interface", "virtio") self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) else: self.conn.del_NIC(vmname, NIC.name) @@ -1151,17 +1149,17 @@ def setNetworks(self, vmname, ifaces): while counter < length: iface = ifaces[counter] - name = iface.get('name', None) + name = iface.get("name", None) if name is None: setMsg("`name` is a required iface key.") setFailed() - vlan = iface.get('vlan', None) + vlan = iface.get("vlan", None) if vlan is None: setMsg("`vlan` is a required iface key.") setFailed() if failed is True: return False - interface = iface.get('interface', 'virtio') + interface = iface.get("interface", "virtio") self.conn.createNIC(vmname, name, vlan, interface) counter += 1 @@ -1188,9 +1186,9 @@ def setBootOrder(self, vmname, boot_order): if boot_order != bootorder: self.conn.set_BootOrder(vmname, boot_order) - setMsg('The boot order has been set') + setMsg("The boot order has been set") else: - setMsg('The boot order has already been set') + setMsg("The boot order has already been set") return True def removeVM(self, vmname): @@ -1263,41 +1261,40 @@ def setMsg(message: str) -> None: def core(module): - r = RHEV(module) - state = module.params.get('state') + state = module.params.get("state") - if state == 'ping': + if state == "ping": r.test() return RHEV_SUCCESS, {"ping": "pong"} - elif state == 'info': - name = module.params.get('name') + elif state == "info": + name = module.params.get("name") if not name: setMsg("`name` is a required argument.") return RHEV_FAILED, msg vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} - elif state == 'present': + return RHEV_SUCCESS, {"changed": changed, "msg": msg, "vm": vminfo} + elif state == "present": created = False - name = module.params.get('name') + name = module.params.get("name") if not name: setMsg("`name` is a required argument.") return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': + actiontype = module.params.get("type") + if actiontype == "server" or actiontype == "desktop": vminfo = r.getVM(name) if vminfo: - setMsg('VM exists') + setMsg("VM exists") else: # Create VM - cluster = module.params.get('cluster') + cluster = module.params.get("cluster") if cluster is None: setMsg("cluster is a required argument.") setFailed() - template = module.params.get('image') + template = module.params.get("image") if template: - disks = module.params.get('disks') + disks = module.params.get("disks") if disks is None: setMsg("disks is a required argument.") setFailed() @@ -1305,7 +1302,7 @@ def core(module): if r.createVMimage(name, cluster, template, disks) is False: return RHEV_FAILED, vminfo else: - os = module.params.get('osver') + os = module.params.get("osver") if os is None: setMsg("osver is a required argument.") setFailed() @@ -1316,172 +1313,176 @@ def core(module): # Set MEMORY and MEMORY POLICY vminfo = r.getVM(name) - memory = module.params.get('vmmem') + memory = module.params.get("vmmem") if memory is not None: - memory_policy = module.params.get('mempol') + memory_policy = module.params.get("mempol") if memory_policy == 0: memory_policy = memory mem_pol_nok = True - if int(vminfo['mem_pol']) == memory_policy: + if int(vminfo["mem_pol"]) == memory_policy: setMsg("Memory is correct") mem_pol_nok = False mem_nok = True - if int(vminfo['memory']) == memory: + if int(vminfo["memory"]) == memory: setMsg("Memory is correct") mem_nok = False if memory_policy > memory: - setMsg('memory_policy cannot have a higher value than memory.') + setMsg("memory_policy cannot have a higher value than memory.") return RHEV_FAILED, msg if mem_nok and mem_pol_nok: - if memory_policy > int(vminfo['memory']): - r.setMemory(vminfo['name'], memory) - r.setMemoryPolicy(vminfo['name'], memory_policy) + if memory_policy > int(vminfo["memory"]): + r.setMemory(vminfo["name"], memory) + r.setMemoryPolicy(vminfo["name"], memory_policy) else: - r.setMemoryPolicy(vminfo['name'], memory_policy) - r.setMemory(vminfo['name'], memory) + r.setMemoryPolicy(vminfo["name"], memory_policy) + r.setMemory(vminfo["name"], memory) elif mem_nok: - r.setMemory(vminfo['name'], memory) + r.setMemory(vminfo["name"], memory) elif mem_pol_nok: - r.setMemoryPolicy(vminfo['name'], memory_policy) + r.setMemoryPolicy(vminfo["name"], memory_policy) checkFail() # Set CPU - cpu = module.params.get('vmcpu') - if int(vminfo['cpu_cores']) == cpu: + cpu = module.params.get("vmcpu") + if int(vminfo["cpu_cores"]) == cpu: setMsg("Number of CPUs is correct") else: - if r.setCPU(vminfo['name'], cpu) is False: + if r.setCPU(vminfo["name"], cpu) is False: return RHEV_FAILED, msg # Set CPU SHARE - cpu_share = module.params.get('cpu_share') + cpu_share = module.params.get("cpu_share") if cpu_share is not None: - if int(vminfo['cpu_shares']) == cpu_share: + if int(vminfo["cpu_shares"]) == cpu_share: setMsg("CPU share is correct.") else: - if r.setCPUShare(vminfo['name'], cpu_share) is False: + if r.setCPUShare(vminfo["name"], cpu_share) is False: return RHEV_FAILED, msg # Set DISKS - disks = module.params.get('disks') + disks = module.params.get("disks") if disks is not None: - if r.setDisks(vminfo['name'], disks) is False: + if r.setDisks(vminfo["name"], disks) is False: return RHEV_FAILED, msg # Set NETWORKS - ifaces = module.params.get('ifaces', None) + ifaces = module.params.get("ifaces", None) if ifaces is not None: - if r.setNetworks(vminfo['name'], ifaces) is False: + if r.setNetworks(vminfo["name"], ifaces) is False: return RHEV_FAILED, msg # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: + del_prot = module.params.get("del_prot") + if r.setDeleteProtection(vminfo["name"], del_prot) is False: return RHEV_FAILED, msg # Set Boot Order - boot_order = module.params.get('boot_order') - if r.setBootOrder(vminfo['name'], boot_order) is False: + boot_order = module.params.get("boot_order") + if r.setBootOrder(vminfo["name"], boot_order) is False: return RHEV_FAILED, msg # Set VM Host - vmhost = module.params.get('vmhost') + vmhost = module.params.get("vmhost") if vmhost: - if r.setVMHost(vminfo['name'], vmhost) is False: + if r.setVMHost(vminfo["name"], vmhost) is False: return RHEV_FAILED, msg vminfo = r.getVM(name) - vminfo['created'] = created - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + vminfo["created"] = created + return RHEV_SUCCESS, {"changed": changed, "msg": msg, "vm": vminfo} - if actiontype == 'host': - cluster = module.params.get('cluster') + if actiontype == "host": + cluster = module.params.get("cluster") if cluster is None: setMsg("cluster is a required argument.") setFailed() - ifaces = module.params.get('ifaces') + ifaces = module.params.get("ifaces") if ifaces is None: setMsg("ifaces is a required argument.") setFailed() if r.setHost(name, cluster, ifaces) is False: return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + return RHEV_SUCCESS, {"changed": changed, "msg": msg} - elif state == 'absent': - name = module.params.get('name') + elif state == "absent": + name = module.params.get("name") if not name: setMsg("`name` is a required argument.") return RHEV_FAILED, msg - actiontype = module.params.get('type') - if actiontype == 'server' or actiontype == 'desktop': + actiontype = module.params.get("type") + if actiontype == "server" or actiontype == "desktop": vminfo = r.getVM(name) if vminfo: - setMsg('VM exists') + setMsg("VM exists") # Set Delete Protection - del_prot = module.params.get('del_prot') - if r.setDeleteProtection(vminfo['name'], del_prot) is False: + del_prot = module.params.get("del_prot") + if r.setDeleteProtection(vminfo["name"], del_prot) is False: return RHEV_FAILED, msg # Remove VM - if r.removeVM(vminfo['name']) is False: + if r.removeVM(vminfo["name"]) is False: return RHEV_FAILED, msg - setMsg('VM has been removed.') - vminfo['state'] = 'DELETED' + setMsg("VM has been removed.") + vminfo["state"] = "DELETED" else: - setMsg('VM was already removed.') - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + setMsg("VM was already removed.") + return RHEV_SUCCESS, {"changed": changed, "msg": msg, "vm": vminfo} - elif state == 'up' or state == 'down' or state == 'restarted': - name = module.params.get('name') + elif state == "up" or state == "down" or state == "restarted": + name = module.params.get("name") if not name: setMsg("`name` is a required argument.") return RHEV_FAILED, msg - timeout = module.params.get('timeout') + timeout = module.params.get("timeout") if r.setPower(name, state, timeout) is False: return RHEV_FAILED, msg vminfo = r.getVM(name) - return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + return RHEV_SUCCESS, {"changed": changed, "msg": msg, "vm": vminfo} - elif state == 'cd': - name = module.params.get('name') - cd_drive = module.params.get('cd_drive') + elif state == "cd": + name = module.params.get("name") + cd_drive = module.params.get("cd_drive") if r.setCD(name, cd_drive) is False: return RHEV_FAILED, msg - return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + return RHEV_SUCCESS, {"changed": changed, "msg": msg} def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), - user=dict(type='str', default='admin@internal'), - password=dict(type='str', required=True, no_log=True), - server=dict(type='str', default='127.0.0.1'), - port=dict(type='int', default=443), - insecure_api=dict(type='bool', default=False), - name=dict(type='str'), - image=dict(type='str'), - datacenter=dict(type='str', default="Default"), - type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), - cluster=dict(type='str', default=''), - vmhost=dict(type='str'), - vmcpu=dict(type='int', default=2), - vmmem=dict(type='int', default=1), - disks=dict(type='list', elements='str'), - osver=dict(type='str', default="rhel_6x64"), - ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']), - timeout=dict(type='int'), - mempol=dict(type='int', default=1), - vm_ha=dict(type='bool', default=True), - cpu_share=dict(type='int', default=0), - boot_order=dict(type='list', elements='str', default=['hd', 'network']), - del_prot=dict(type='bool', default=True), - cd_drive=dict(type='str'), + state=dict( + type="str", + default="present", + choices=["absent", "cd", "down", "info", "ping", "present", "restarted", "up"], + ), + user=dict(type="str", default="admin@internal"), + password=dict(type="str", required=True, no_log=True), + server=dict(type="str", default="127.0.0.1"), + port=dict(type="int", default=443), + insecure_api=dict(type="bool", default=False), + name=dict(type="str"), + image=dict(type="str"), + datacenter=dict(type="str", default="Default"), + type=dict(type="str", default="server", choices=["desktop", "host", "server"]), + cluster=dict(type="str", default=""), + vmhost=dict(type="str"), + vmcpu=dict(type="int", default=2), + vmmem=dict(type="int", default=1), + disks=dict(type="list", elements="str"), + osver=dict(type="str", default="rhel_6x64"), + ifaces=dict(type="list", elements="str", aliases=["interfaces", "nics"]), + timeout=dict(type="int"), + mempol=dict(type="int", default=1), + vm_ha=dict(type="bool", default=True), + cpu_share=dict(type="int", default=0), + boot_order=dict(type="list", elements="str", default=["hd", "network"]), + del_prot=dict(type="bool", default=True), + cd_drive=dict(type="str"), ), ) @@ -1500,5 +1501,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py index f42096698fd..4620bce7560 100644 --- a/plugins/modules/rhsm_release.py +++ b/plugins/modules/rhsm_release.py @@ -66,21 +66,21 @@ # Matches release-like values such as 7.2, 5.10, 6Server, 8 # but rejects unlikely values, like 100Server, 1.100, 7server etc. -release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') +release_matcher = re.compile(r"\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b") def _sm_release(module, *args): # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes # "subscription-manager release --set 0.1" - sm_bin = module.get_bin_path('subscription-manager', required=True) - cmd = [sm_bin, 'release'] + list(args) + sm_bin = module.get_bin_path("subscription-manager", required=True) + cmd = [sm_bin, "release"] + list(args) # delegate nonzero rc handling to run_command return module.run_command(cmd, check_rc=True, expand_user_and_vars=False) def get_release(module): # Get the current release version, or None if release unset - rc, out, err = _sm_release(module, '--show') + rc, out, err = _sm_release(module, "--show") try: match = release_matcher.findall(out)[0] except IndexError: @@ -93,9 +93,9 @@ def get_release(module): def set_release(module, release): # Set current release version, or unset if release is None if release is None: - args = ('--unset',) + args = ("--unset",) else: - args = ('--set', release) + args = ("--set", release) return _sm_release(module, *args) @@ -103,17 +103,15 @@ def set_release(module, release): def main(): module = AnsibleModule( argument_spec=dict( - release=dict(type='str'), + release=dict(type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) if os.getuid() != 0: - module.fail_json( - msg="Interacting with subscription-manager requires root permissions ('become: true')" - ) + module.fail_json(msg="Interacting with subscription-manager requires root permissions ('become: true')") - target_release = module.params['release'] + target_release = module.params["release"] # sanity check: the target release at least looks like a valid release if target_release and not release_matcher.findall(target_release): @@ -122,7 +120,7 @@ def main(): # Will fail with useful error from s-m if system not subscribed current_release = get_release(module) - changed = (target_release != current_release) + changed = target_release != current_release if not module.check_mode and changed: set_release(module, target_release) # If setting the release fails, then a fail_json would have exited with @@ -133,5 +131,5 @@ def main(): module.exit_json(current_release=current_release, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py index 1e8d4942c16..4f96ea48248 100644 --- a/plugins/modules/rhsm_repository.py +++ b/plugins/modules/rhsm_repository.py @@ -90,26 +90,23 @@ class Rhsm: def __init__(self, module): self.module = module - self.rhsm_bin = self.module.get_bin_path('subscription-manager', required=True) + self.rhsm_bin = self.module.get_bin_path("subscription-manager", required=True) self.rhsm_kwargs = { - 'environ_update': dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'), - 'expand_user_and_vars': False, - 'use_unsafe_shell': False, + "environ_update": dict(LANG="C", LC_ALL="C", LC_MESSAGES="C"), + "expand_user_and_vars": False, + "use_unsafe_shell": False, } def run_repos(self, arguments): """ Execute `subscription-manager repos` with arguments and manage common errors """ - rc, out, err = self.module.run_command( - [self.rhsm_bin, 'repos'] + arguments, - **self.rhsm_kwargs - ) + rc, out, err = self.module.run_command([self.rhsm_bin, "repos"] + arguments, **self.rhsm_kwargs) - if rc == 0 and out == 'This system has no repositories available through subscriptions.\n': - self.module.fail_json(msg='This system has no repositories available through subscriptions') + if rc == 0 and out == "This system has no repositories available through subscriptions.\n": + self.module.fail_json(msg="This system has no repositories available through subscriptions") elif rc == 1: - self.module.fail_json(msg=f'subscription-manager failed with the following error: {err}') + self.module.fail_json(msg=f"subscription-manager failed with the following error: {err}") else: return rc, out, err @@ -117,12 +114,12 @@ def list_repositories(self): """ Generate RHSM repository list and return a list of dict """ - rc, out, err = self.run_repos(['--list']) + rc, out, err = self.run_repos(["--list"]) - repo_id = '' - repo_name = '' - repo_url = '' - repo_enabled = '' + repo_id = "" + repo_name = "" + repo_url = "" + repo_enabled = "" repo_result = [] for line in out.splitlines(): @@ -130,29 +127,29 @@ def list_repositories(self): # - empty # - "+---------[...]" -- i.e. header # - " Available Repositories [...]" -- i.e. header - if line == '' or line[0] == '+' or line[0] == ' ': + if line == "" or line[0] == "+" or line[0] == " ": continue - if line.startswith('Repo ID: '): + if line.startswith("Repo ID: "): repo_id = line[9:].lstrip() continue - if line.startswith('Repo Name: '): + if line.startswith("Repo Name: "): repo_name = line[11:].lstrip() continue - if line.startswith('Repo URL: '): + if line.startswith("Repo URL: "): repo_url = line[10:].lstrip() continue - if line.startswith('Enabled: '): + if line.startswith("Enabled: "): repo_enabled = line[9:].lstrip() repo = { "id": repo_id, "name": repo_name, "url": repo_url, - "enabled": True if repo_enabled == '1' else False + "enabled": True if repo_enabled == "1" else False, } repo_result.append(repo) @@ -168,10 +165,10 @@ def repository_modify(module, rhsm, state, name, purge=False): for repoid in name: matched_existing_repo[repoid] = [] for idx, repo in enumerate(current_repo_list): - if fnmatch(repo['id'], repoid): + if fnmatch(repo["id"], repoid): matched_existing_repo[repoid].append(repo) # Update current_repo_list to return it as result variable - updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False + updated_repo_list[idx]["enabled"] = True if state == "enabled" else False changed = False results = [] @@ -184,25 +181,25 @@ def repository_modify(module, rhsm, state, name, purge=False): results.append(f"{repoid} is not a valid repository ID") module.fail_json(results=results, msg=f"{repoid} is not a valid repository ID") for repo in matched_existing_repo[repoid]: - if state in ['disabled', 'absent']: - if repo['enabled']: + if state in ["disabled", "absent"]: + if repo["enabled"]: changed = True diff_before += f"Repository '{repo['id']}' is enabled for this system\n" diff_after += f"Repository '{repo['id']}' is disabled for this system\n" results.append(f"Repository '{repo['id']}' is disabled for this system") - rhsm_arguments += ['--disable', repo['id']] - elif state in ['enabled', 'present']: - if not repo['enabled']: + rhsm_arguments += ["--disable", repo["id"]] + elif state in ["enabled", "present"]: + if not repo["enabled"]: changed = True diff_before += f"Repository '{repo['id']}' is disabled for this system\n" diff_after += f"Repository '{repo['id']}' is enabled for this system\n" results.append(f"Repository '{repo['id']}' is enabled for this system") - rhsm_arguments += ['--enable', repo['id']] + rhsm_arguments += ["--enable", repo["id"]] # Disable all enabled repos on the system that are not in the task and not # marked as disabled by the task if purge: - enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled']) + enabled_repo_ids = set(repo["id"] for repo in updated_repo_list if repo["enabled"]) matched_repoids_set = set(matched_existing_repo.keys()) difference = enabled_repo_ids.difference(matched_repoids_set) if len(difference) > 0: @@ -211,15 +208,17 @@ def repository_modify(module, rhsm, state, name, purge=False): diff_before.join(f"Repository '{repoid}'' is enabled for this system\n") diff_after.join(f"Repository '{repoid}' is disabled for this system\n") results.append(f"Repository '{repoid}' is disabled for this system") - rhsm_arguments.extend(['--disable', repoid]) + rhsm_arguments.extend(["--disable", repoid]) for updated_repo in updated_repo_list: - if updated_repo['id'] in difference: - updated_repo['enabled'] = False + if updated_repo["id"] in difference: + updated_repo["enabled"] = False - diff = {'before': diff_before, - 'after': diff_after, - 'before_header': "RHSM repositories", - 'after_header': "RHSM repositories"} + diff = { + "before": diff_before, + "after": diff_after, + "before_header": "RHSM repositories", + "after_header": "RHSM repositories", + } if not module.check_mode and changed: rc, out, err = rhsm.run_repos(rhsm_arguments) @@ -230,26 +229,24 @@ def repository_modify(module, rhsm, state, name, purge=False): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='list', elements='str', required=True), - state=dict(choices=['enabled', 'disabled'], default='enabled'), - purge=dict(type='bool', default=False), + name=dict(type="list", elements="str", required=True), + state=dict(choices=["enabled", "disabled"], default="enabled"), + purge=dict(type="bool", default=False), ), supports_check_mode=True, ) if os.getuid() != 0: - module.fail_json( - msg="Interacting with subscription-manager requires root permissions ('become: true')" - ) + module.fail_json(msg="Interacting with subscription-manager requires root permissions ('become: true')") rhsm = Rhsm(module) - name = module.params['name'] - state = module.params['state'] - purge = module.params['purge'] + name = module.params["name"] + state = module.params["state"] + purge = module.params["purge"] repository_modify(module, rhsm, state, name, purge) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py index 7b1ce92b4fc..173e2e14f8e 100644 --- a/plugins/modules/riak.py +++ b/plugins/modules/riak.py @@ -89,46 +89,46 @@ def ring_check(module, riak_admin_bin): - cmd = riak_admin_bin + ['ringready'] + cmd = riak_admin_bin + ["ringready"] rc, out, err = module.run_command(cmd) - if rc == 0 and 'TRUE All nodes agree on the ring' in out: + if rc == 0 and "TRUE All nodes agree on the ring" in out: return True else: return False def main(): - module = AnsibleModule( argument_spec=dict( - command=dict(choices=['ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak', type='path'), - http_conn=dict(default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1'), - wait_for_handoffs=dict(default=0, type='int'), - wait_for_ring=dict(default=0, type='int'), - wait_for_service=dict(choices=['kv']), - validate_certs=dict(default=True, type='bool')) + command=dict(choices=["ping", "kv_test", "join", "plan", "commit"]), + config_dir=dict(default="/etc/riak", type="path"), + http_conn=dict(default="127.0.0.1:8098"), + target_node=dict(default="riak@127.0.0.1"), + wait_for_handoffs=dict(default=0, type="int"), + wait_for_ring=dict(default=0, type="int"), + wait_for_service=dict(choices=["kv"]), + validate_certs=dict(default=True, type="bool"), + ) ) - command = module.params.get('command') - http_conn = module.params.get('http_conn') - target_node = module.params.get('target_node') - wait_for_handoffs = module.params.get('wait_for_handoffs') - wait_for_ring = module.params.get('wait_for_ring') - wait_for_service = module.params.get('wait_for_service') + command = module.params.get("command") + http_conn = module.params.get("http_conn") + target_node = module.params.get("target_node") + wait_for_handoffs = module.params.get("wait_for_handoffs") + wait_for_ring = module.params.get("wait_for_ring") + wait_for_service = module.params.get("wait_for_service") # make sure riak commands are on the path - riak_bin = module.get_bin_path('riak') - riak_admin_bin = module.get_bin_path('riak-admin') - riak_admin_bin = [riak_admin_bin] if riak_admin_bin is not None else [riak_bin, 'admin'] + riak_bin = module.get_bin_path("riak") + riak_admin_bin = module.get_bin_path("riak-admin") + riak_admin_bin = [riak_admin_bin] if riak_admin_bin is not None else [riak_bin, "admin"] timeout = time.time() + 120 while True: if time.time() > timeout: - module.fail_json(msg='Timeout, could not fetch Riak stats.') - (response, info) = fetch_url(module, f'http://{http_conn}/stats', force=True, timeout=5) - if info['status'] == 200: + module.fail_json(msg="Timeout, could not fetch Riak stats.") + (response, info) = fetch_url(module, f"http://{http_conn}/stats", force=True, timeout=5) + if info["status"] == 200: stats_raw = response.read() break time.sleep(5) @@ -137,83 +137,80 @@ def main(): try: stats = json.loads(stats_raw) except Exception: - module.fail_json(msg='Could not parse Riak stats.') + module.fail_json(msg="Could not parse Riak stats.") - node_name = stats['nodename'] - nodes = stats['ring_members'] - ring_size = stats['ring_creation_size'] - rc, out, err = module.run_command([riak_bin, 'version']) + node_name = stats["nodename"] + nodes = stats["ring_members"] + ring_size = stats["ring_creation_size"] + rc, out, err = module.run_command([riak_bin, "version"]) version = out.strip() - result = dict(node_name=node_name, - nodes=nodes, - ring_size=ring_size, - version=version) + result = dict(node_name=node_name, nodes=nodes, ring_size=ring_size, version=version) - if command == 'ping': - cmd = [riak_bin, 'ping', target_node] + if command == "ping": + cmd = [riak_bin, "ping", target_node] rc, out, err = module.run_command(cmd) if rc == 0: - result['ping'] = out + result["ping"] = out else: module.fail_json(msg=out) - elif command == 'kv_test': - cmd = riak_admin_bin + ['test'] + elif command == "kv_test": + cmd = riak_admin_bin + ["test"] rc, out, err = module.run_command(cmd) if rc == 0: - result['kv_test'] = out + result["kv_test"] = out else: module.fail_json(msg=out) - elif command == 'join': + elif command == "join": if nodes.count(node_name) == 1 and len(nodes) > 1: - result['join'] = 'Node is already in cluster or staged to be in cluster.' + result["join"] = "Node is already in cluster or staged to be in cluster." else: - cmd = riak_admin_bin + ['cluster', 'join', target_node] + cmd = riak_admin_bin + ["cluster", "join", target_node] rc, out, err = module.run_command(cmd) if rc == 0: - result['join'] = out - result['changed'] = True + result["join"] = out + result["changed"] = True else: module.fail_json(msg=out) - elif command == 'plan': - cmd = riak_admin_bin + ['cluster', 'plan'] + elif command == "plan": + cmd = riak_admin_bin + ["cluster", "plan"] rc, out, err = module.run_command(cmd) if rc == 0: - result['plan'] = out - if 'Staged Changes' in out: - result['changed'] = True + result["plan"] = out + if "Staged Changes" in out: + result["changed"] = True else: module.fail_json(msg=out) - elif command == 'commit': - cmd = riak_admin_bin + ['cluster', 'commit'] + elif command == "commit": + cmd = riak_admin_bin + ["cluster", "commit"] rc, out, err = module.run_command(cmd) if rc == 0: - result['commit'] = out - result['changed'] = True + result["commit"] = out + result["changed"] = True else: module.fail_json(msg=out) -# this could take a while, recommend to run in async mode + # this could take a while, recommend to run in async mode if wait_for_handoffs: timeout = time.time() + wait_for_handoffs while True: - cmd = riak_admin_bin + ['transfers'] + cmd = riak_admin_bin + ["transfers"] rc, out, err = module.run_command(cmd) - if 'No transfers active' in out: - result['handoffs'] = 'No transfers active.' + if "No transfers active" in out: + result["handoffs"] = "No transfers active." break time.sleep(10) if time.time() > timeout: - module.fail_json(msg='Timeout waiting for handoffs.') + module.fail_json(msg="Timeout waiting for handoffs.") if wait_for_service: - cmd = riak_admin_bin + ['wait_for_service', f'riak_{wait_for_service}', node_name] + cmd = riak_admin_bin + ["wait_for_service", f"riak_{wait_for_service}", node_name] rc, out, err = module.run_command(cmd) - result['service'] = out + result["service"] = out if wait_for_ring: timeout = time.time() + wait_for_ring @@ -222,12 +219,12 @@ def main(): break time.sleep(10) if time.time() > timeout: - module.fail_json(msg='Timeout waiting for nodes to agree on ring.') + module.fail_json(msg="Timeout waiting for nodes to agree on ring.") - result['ring_ready'] = ring_check(module, riak_admin_bin) + result["ring_ready"] = ring_check(module, riak_admin_bin) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py index 0675db61b51..b884f440e24 100644 --- a/plugins/modules/rocketchat.py +++ b/plugins/modules/rocketchat.py @@ -161,38 +161,40 @@ from ansible.module_utils.urls import fetch_url -ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' +ROCKETCHAT_INCOMING_WEBHOOK = "%s://%s/hooks/%s" -def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740): +def build_payload_for_rocketchat( + module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740 +): payload = {} if color == "normal" and text is not None: payload = dict(text=text) elif text is not None: payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: - if channel[0] == '#' or channel[0] == '@': - payload['channel'] = channel + if channel[0] == "#" or channel[0] == "@": + payload["channel"] = channel else: - payload['channel'] = f"#{channel}" + payload["channel"] = f"#{channel}" if username is not None: - payload['username'] = username + payload["username"] = username if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji + payload["icon_emoji"] = icon_emoji else: - payload['icon_url'] = icon_url + payload["icon_url"] = icon_url if link_names is not None: - payload['link_names'] = link_names + payload["link_names"] = link_names if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] + if "attachments" not in payload: + payload["attachments"] = [] if attachments is not None: for attachment in attachments: - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] - payload['attachments'].append(attachment) + if "fallback" not in attachment: + attachment["fallback"] = attachment["text"] + payload["attachments"].append(attachment) payload = module.jsonify(payload) if is_pre740: @@ -201,51 +203,50 @@ def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon def do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740): - - if token.count('/') < 1: + if token.count("/") < 1: module.fail_json(msg="Invalid Token specified, provide a valid token") rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) headers = None if not is_pre740: - headers = {'Content-type': 'application/json'} + headers = {"Content-type": "application/json"} response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload, headers=headers) - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"failed to send message, return status={info['status']}") def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str', required=True), - token=dict(type='str', required=True, no_log=True), - protocol=dict(type='str', default='https', choices=['http', 'https']), - msg=dict(type='str'), - channel=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', elements='dict'), - is_pre740=dict(type='bool') + domain=dict(type="str", required=True), + token=dict(type="str", required=True, no_log=True), + protocol=dict(type="str", default="https", choices=["http", "https"]), + msg=dict(type="str"), + channel=dict(type="str"), + username=dict(type="str", default="Ansible"), + icon_url=dict(type="str", default="https://docs.ansible.com/favicon.ico"), + icon_emoji=dict(type="str"), + link_names=dict(type="int", default=1, choices=[0, 1]), + validate_certs=dict(default=True, type="bool"), + color=dict(type="str", default="normal", choices=["normal", "good", "warning", "danger"]), + attachments=dict(type="list", elements="dict"), + is_pre740=dict(type="bool"), ) ) - domain = module.params['domain'] - token = module.params['token'] - protocol = module.params['protocol'] - text = module.params['msg'] - channel = module.params['channel'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - color = module.params['color'] - attachments = module.params['attachments'] - is_pre740 = module.params['is_pre740'] + domain = module.params["domain"] + token = module.params["token"] + protocol = module.params["protocol"] + text = module.params["msg"] + channel = module.params["channel"] + username = module.params["username"] + icon_url = module.params["icon_url"] + icon_emoji = module.params["icon_emoji"] + link_names = module.params["link_names"] + color = module.params["color"] + attachments = module.params["attachments"] + is_pre740 = module.params["is_pre740"] if is_pre740 is None: module.deprecate( @@ -256,11 +257,13 @@ def main(): ) is_pre740 = True - payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740) + payload = build_payload_for_rocketchat( + module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740 + ) do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740) module.exit_json(msg="OK") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py index 60930af680b..4dab9dc42b6 100644 --- a/plugins/modules/rollbar_deployment.py +++ b/plugins/modules/rollbar_deployment.py @@ -92,7 +92,6 @@ def main(): - module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), @@ -101,43 +100,43 @@ def main(): user=dict(), rollbar_user=dict(), comment=dict(), - url=dict(default='https://api.rollbar.com/api/1/deploy/'), - validate_certs=dict(default=True, type='bool'), + url=dict(default="https://api.rollbar.com/api/1/deploy/"), + validate_certs=dict(default=True, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) if module.check_mode: module.exit_json(changed=True) params = dict( - access_token=module.params['token'], - environment=module.params['environment'], - revision=module.params['revision'] + access_token=module.params["token"], + environment=module.params["environment"], + revision=module.params["revision"], ) - if module.params['user']: - params['local_username'] = module.params['user'] + if module.params["user"]: + params["local_username"] = module.params["user"] - if module.params['rollbar_user']: - params['rollbar_username'] = module.params['rollbar_user'] + if module.params["rollbar_user"]: + params["rollbar_username"] = module.params["rollbar_user"] - if module.params['comment']: - params['comment'] = module.params['comment'] + if module.params["comment"]: + params["comment"] = module.params["comment"] - url = module.params.get('url') + url = module.params.get("url") try: data = urlencode(params) - response, info = fetch_url(module, url, data=data, method='POST') + response, info = fetch_url(module, url, data=data, method="POST") except Exception as e: - module.fail_json(msg=f'Unable to notify Rollbar: {e}', exception=traceback.format_exc()) + module.fail_json(msg=f"Unable to notify Rollbar: {e}", exception=traceback.format_exc()) else: - if info['status'] == 200: + if info["status"] == 200: module.exit_json(changed=True) else: module.fail_json(msg=f"HTTP result code: {info['status']} connecting to {url}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py index a543986706e..cf5b9272351 100644 --- a/plugins/modules/rpm_ostree_pkg.py +++ b/plugins/modules/rpm_ostree_pkg.py @@ -110,65 +110,67 @@ class RpmOstreePkg: def __init__(self, module): self.module = module self.params = module.params - self.state = module.params['state'] + self.state = module.params["state"] def ensure(self): results = dict( rc=0, changed=False, - action='', + action="", packages=[], - stdout='', - stderr='', - cmd='', + stdout="", + stderr="", + cmd="", needs_reboot=False, ) # Ensure rpm-ostree command exists - cmd = [self.module.get_bin_path('rpm-ostree', required=True)] + cmd = [self.module.get_bin_path("rpm-ostree", required=True)] # Decide action to perform - if self.state == 'present': - results['action'] = 'install' - cmd.append('install') - elif self.state == 'absent': - results['action'] = 'uninstall' - cmd.append('uninstall') + if self.state == "present": + results["action"] = "install" + cmd.append("install") + elif self.state == "absent": + results["action"] = "uninstall" + cmd.append("uninstall") # Add the options to the command line - if self.params['apply_live'] and self.state == 'present': - cmd.extend(['--apply-live', '--assumeyes']) + if self.params["apply_live"] and self.state == "present": + cmd.extend(["--apply-live", "--assumeyes"]) # Additional parameters - cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) - for pkg in self.params['name']: + cmd.extend(["--allow-inactive", "--idempotent", "--unchanged-exit-77"]) + for pkg in self.params["name"]: cmd.append(pkg) - results['packages'].append(pkg) + results["packages"].append(pkg) rc, out, err = self.module.run_command(cmd) # Determine if system needs a reboot to apply change if 'Changes queued for next boot. Run "systemctl reboot" to start a reboot' in out: - results['needs_reboot'] = True - - results.update(dict( - rc=rc, - cmd=' '.join(cmd), - stdout=out, - stderr=err, - )) + results["needs_reboot"] = True + + results.update( + dict( + rc=rc, + cmd=" ".join(cmd), + stdout=out, + stderr=err, + ) + ) # A few possible options: # - rc=0 - succeeded in making a change # - rc=77 - no change was needed # - rc=? - error if rc == 0: - results['changed'] = True + results["changed"] = True elif rc == 77: - results['changed'] = False - results['rc'] = 0 + results["changed"] = False + results["rc"] = 0 else: - self.module.fail_json(msg='non-zero return code', **results) + self.module.fail_json(msg="non-zero return code", **results) self.module.exit_json(**results) @@ -176,18 +178,15 @@ def ensure(self): def main(): module = AnsibleModule( argument_spec=dict( - state=dict( - default="present", - choices=['absent', 'present'] - ), + state=dict(default="present", choices=["absent", "present"]), name=dict( aliases=["pkg"], required=True, - type='list', - elements='str', + type="list", + elements="str", ), apply_live=dict( - type='bool', + type="bool", default=False, ), ), @@ -197,5 +196,5 @@ def main(): rpm_ostree_pkg.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py index ed7dc2abbc1..1e676650ceb 100644 --- a/plugins/modules/rundeck_acl_policy.py +++ b/plugins/modules/rundeck_acl_policy.py @@ -157,12 +157,17 @@ def create_or_update_acl(self): if info["status"] == 201: self.module.exit_json(changed=True, before={}, after=self.get_acl()) elif info["status"] == 400: - self.module.fail_json(msg=f"Unable to validate acl {self.module.params['name']}. Please ensure it is a valid ACL") + self.module.fail_json( + msg=f"Unable to validate acl {self.module.params['name']}. Please ensure it is a valid ACL" + ) elif info["status"] == 409: self.module.fail_json(msg=f"ACL {self.module.params['name']} already exists") else: - self.module.fail_json(msg=f"Unhandled HTTP status {info['status']}, please report the bug", - before={}, after=self.get_acl()) + self.module.fail_json( + msg=f"Unhandled HTTP status {info['status']}, please report the bug", + before={}, + after=self.get_acl(), + ) else: if facts["contents"] == self.module.params["policy"]: self.module.exit_json(changed=False, before=facts, after=facts) @@ -180,7 +185,9 @@ def create_or_update_acl(self): if info["status"] == 200: self.module.exit_json(changed=True, before=facts, after=self.get_acl()) elif info["status"] == 400: - self.module.fail_json(msg=f"Unable to validate acl {self.module.params['name']}. Please ensure it is a valid ACL") + self.module.fail_json( + msg=f"Unable to validate acl {self.module.params['name']}. Please ensure it is a valid ACL" + ) elif info["status"] == 404: self.module.fail_json(msg=f"ACL {self.module.params['name']} doesn't exists. Cannot update.") @@ -204,35 +211,39 @@ def remove_acl(self): def main(): # Also allow the user to set values for fetch_url argument_spec = api_argument_spec() - argument_spec.update(dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(required=True, type='str'), - policy=dict(type='str'), - project=dict(type='str'), - )) + argument_spec.update( + dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(required=True, type="str"), + policy=dict(type="str"), + project=dict(type="str"), + ) + ) - argument_spec['api_token']['aliases'] = ['token'] + argument_spec["api_token"]["aliases"] = ["token"] module = AnsibleModule( argument_spec=argument_spec, required_if=[ - ['state', 'present', ['policy']], + ["state", "present", ["policy"]], ], supports_check_mode=True, ) if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])): - module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-") + module.fail_json( + msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-" + ) if module.params["api_version"] < 14: module.fail_json(msg="API version should be at least 14") rundeck = RundeckACLManager(module) - if module.params['state'] == 'present': + if module.params["state"] == "present": rundeck.create_or_update_acl() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": rundeck.remove_acl() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py index b067a5dfbcd..cc49221ceb6 100644 --- a/plugins/modules/rundeck_job_executions_info.py +++ b/plugins/modules/rundeck_job_executions_info.py @@ -130,10 +130,7 @@ from urllib.parse import quote from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rundeck import ( - api_argument_spec, - api_request -) +from ansible_collections.community.general.plugins.module_utils.rundeck import api_argument_spec, api_request class RundeckJobExecutionsInfo: @@ -150,35 +147,28 @@ def job_executions(self): response, info = api_request( module=self.module, endpoint=f"job/{quote(self.job_id)}/executions?offset={self.offset}&max={self.max}&status={self.status}", - method="GET" + method="GET", ) if info["status"] != 200: - self.module.fail_json( - msg=info["msg"], - executions=response - ) + self.module.fail_json(msg=info["msg"], executions=response) self.module.exit_json(msg="Executions info result", **response) def main(): argument_spec = api_argument_spec() - argument_spec.update(dict( - job_id=dict(required=True, type="str"), - offset=dict(type="int", default=0), - max=dict(type="int", default=20), - status=dict( - type="str", - choices=["succeeded", "failed", "aborted", "running"] + argument_spec.update( + dict( + job_id=dict(required=True, type="str"), + offset=dict(type="int", default=0), + max=dict(type="int", default=20), + status=dict(type="str", choices=["succeeded", "failed", "aborted", "running"]), ) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if module.params["api_version"] < 14: module.fail_json(msg="API version should be at least 14") diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py index 52909f54120..14921295a13 100644 --- a/plugins/modules/rundeck_job_run.py +++ b/plugins/modules/rundeck_job_run.py @@ -183,10 +183,7 @@ from urllib.parse import quote from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rundeck import ( - api_argument_spec, - api_request -) +from ansible_collections.community.general.plugins.module_utils.rundeck import api_argument_spec, api_request class RundeckJobRun: @@ -199,17 +196,14 @@ def __init__(self, module): self.filter_nodes = self.module.params["filter_nodes"] or "" self.run_at_time = self.module.params["run_at_time"] or "" self.loglevel = self.module.params["loglevel"].upper() - self.wait_execution = self.module.params['wait_execution'] - self.wait_execution_delay = self.module.params['wait_execution_delay'] - self.wait_execution_timeout = self.module.params['wait_execution_timeout'] - self.abort_on_timeout = self.module.params['abort_on_timeout'] + self.wait_execution = self.module.params["wait_execution"] + self.wait_execution_delay = self.module.params["wait_execution_delay"] + self.wait_execution_timeout = self.module.params["wait_execution_timeout"] + self.abort_on_timeout = self.module.params["abort_on_timeout"] for k, v in self.job_options.items(): if not isinstance(v, str): - self.module.exit_json( - msg=f"Job option '{k}' value must be a string", - execution_info={} - ) + self.module.exit_json(msg=f"Job option '{k}' value must be a string", execution_info={}) def job_status_check(self, execution_id): response = dict() @@ -219,23 +213,20 @@ def job_status_check(self, execution_id): while not timeout: endpoint = f"execution/{execution_id}" response = api_request(module=self.module, endpoint=endpoint)[0] - output = api_request(module=self.module, - endpoint=f"execution/{execution_id}/output") + output = api_request(module=self.module, endpoint=f"execution/{execution_id}/output") log_output = "\n".join([x["log"] for x in output[0]["entries"]]) response.update({"output": log_output}) if response["status"] == "aborted": break elif response["status"] == "scheduled": - self.module.exit_json(msg=f"Job scheduled to run at {self.run_at_time}", - execution_info=response, - changed=True) + self.module.exit_json( + msg=f"Job scheduled to run at {self.run_at_time}", execution_info=response, changed=True + ) elif response["status"] == "failed": - self.module.fail_json(msg="Job execution failed", - execution_info=response) + self.module.fail_json(msg="Job execution failed", execution_info=response) elif response["status"] == "succeeded": - self.module.exit_json(msg="Job execution succeeded!", - execution_info=response) + self.module.exit_json(msg="Job execution succeeded!", execution_info=response) if datetime.now() >= due: timeout = True @@ -256,59 +247,49 @@ def job_run(self): "loglevel": self.loglevel, "options": self.job_options, "runAtTime": self.run_at_time, - "filter": self.filter_nodes - } + "filter": self.filter_nodes, + }, ) if info["status"] != 200: self.module.fail_json(msg=info["msg"]) if not self.wait_execution: - self.module.exit_json(msg="Job run send successfully!", - execution_info=response) + self.module.exit_json(msg="Job run send successfully!", execution_info=response) job_status = self.job_status_check(response["id"]) if job_status["timed_out"]: if self.abort_on_timeout: - api_request( - module=self.module, - endpoint=f"execution/{response['id']}/abort", - method="GET" - ) + api_request(module=self.module, endpoint=f"execution/{response['id']}/abort", method="GET") abort_status = self.job_status_check(response["id"]) - self.module.fail_json(msg="Job execution aborted due the timeout specified", - execution_info=abort_status) + self.module.fail_json( + msg="Job execution aborted due the timeout specified", execution_info=abort_status + ) - self.module.fail_json(msg="Job execution timed out", - execution_info=job_status) + self.module.fail_json(msg="Job execution timed out", execution_info=job_status) def main(): argument_spec = api_argument_spec() - argument_spec.update(dict( - job_id=dict(required=True, type="str"), - job_options=dict(type="dict"), - filter_nodes=dict(type="str"), - run_at_time=dict(type="str"), - wait_execution=dict(type="bool", default=True), - wait_execution_delay=dict(type="int", default=5), - wait_execution_timeout=dict(type="int", default=120), - abort_on_timeout=dict(type="bool", default=False), - loglevel=dict( - type="str", - choices=["debug", "verbose", "info", "warn", "error"], - default="info" + argument_spec.update( + dict( + job_id=dict(required=True, type="str"), + job_options=dict(type="dict"), + filter_nodes=dict(type="str"), + run_at_time=dict(type="str"), + wait_execution=dict(type="bool", default=True), + wait_execution_delay=dict(type="int", default=5), + wait_execution_timeout=dict(type="int", default=120), + abort_on_timeout=dict(type="bool", default=False), + loglevel=dict(type="str", choices=["debug", "verbose", "info", "warn", "error"], default="info"), ) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if module.params["api_version"] < 14: module.fail_json(msg="API version should be at least 14") diff --git a/plugins/modules/rundeck_project.py b/plugins/modules/rundeck_project.py index 5ace492c6d9..76ceb91ba84 100644 --- a/plugins/modules/rundeck_project.py +++ b/plugins/modules/rundeck_project.py @@ -126,9 +126,7 @@ def create_or_update_project(self): self.module.exit_json( changed=True, before={}, - after={ - "name": self.module.params["name"] - }, + after={"name": self.module.params["name"]}, ) resp, info = api_request( @@ -138,14 +136,17 @@ def create_or_update_project(self): data={ "name": self.module.params["name"], "config": {}, - } + }, ) if info["status"] == 201: self.module.exit_json(changed=True, before={}, after=self.get_project_facts()) else: - self.module.fail_json(msg=f"Unhandled HTTP status {info['status']}, please report the bug", - before={}, after=self.get_project_facts()) + self.module.fail_json( + msg=f"Unhandled HTTP status {info['status']}, please report the bug", + before={}, + after=self.get_project_facts(), + ) else: self.module.exit_json(changed=False, before=facts, after=facts) @@ -168,27 +169,26 @@ def remove_project(self): def main(): # Also allow the user to set values for fetch_url argument_spec = api_argument_spec() - argument_spec.update(dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(required=True, type='str'), - )) + argument_spec.update( + dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(required=True, type="str"), + ) + ) - argument_spec['api_token']['aliases'] = ['token'] + argument_spec["api_token"]["aliases"] = ["token"] - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module.params["api_version"] < 14: module.fail_json(msg="API version should be at least 14") rundeck = RundeckProjectManager(module) - if module.params['state'] == 'present': + if module.params["state"] == "present": rundeck.create_or_update_project() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": rundeck.remove_project() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py index 430862d5add..cda921d9e06 100644 --- a/plugins/modules/runit.py +++ b/plugins/modules/runit.py @@ -97,21 +97,21 @@ class Sv: def __init__(self, module): self.extra_paths = [] - self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + self.report_vars = ["state", "enabled", "svc_full", "src_full", "pid", "duration", "full_state"] self.module = module - self.name = module.params['name'] - self.service_dir = module.params['service_dir'] - self.service_src = module.params['service_src'] + self.name = module.params["name"] + self.service_dir = module.params["service_dir"] + self.service_src = module.params["service_src"] self.enabled = None self.full_state = None self.state = None self.pid = None self.duration = None - self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True) - self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) + self.svc_cmd = module.get_bin_path("sv", opt_dirs=self.extra_paths, required=True) + self.svstat_cmd = module.get_bin_path("sv", opt_dirs=self.extra_paths) self.svc_full = f"{self.service_dir}/{self.name}" self.src_full = f"{self.service_src}/{self.name}" @@ -119,26 +119,26 @@ def __init__(self, module): if self.enabled: self.get_status() else: - self.state = 'stopped' + self.state = "stopped" def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) except OSError as e: - self.module.fail_json(path=self.src_full, msg=f'Error while linking: {e}') + self.module.fail_json(path=self.src_full, msg=f"Error while linking: {e}") else: self.module.fail_json(msg=f"Could not find source for service to enable ({self.src_full}).") def disable(self): - self.execute_command([self.svc_cmd, 'force-stop', self.src_full]) + self.execute_command([self.svc_cmd, "force-stop", self.src_full]) try: os.unlink(self.svc_full) except OSError as e: - self.module.fail_json(path=self.svc_full, msg=f'Error while unlinking: {e}') + self.module.fail_json(path=self.svc_full, msg=f"Error while unlinking: {e}") def get_status(self): - (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full]) + (rc, out, err) = self.execute_command([self.svstat_cmd, "status", self.svc_full]) if err is not None and err: self.full_state = self.state = err @@ -149,54 +149,54 @@ def get_status(self): # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n" full_state_no_logger = self.full_state.split("; ")[0] - m = re.search(r'\(pid (\d+)\)', full_state_no_logger) + m = re.search(r"\(pid (\d+)\)", full_state_no_logger) if m: self.pid = m.group(1) - m = re.search(r' (\d+)s', full_state_no_logger) + m = re.search(r" (\d+)s", full_state_no_logger) if m: self.duration = m.group(1) - if re.search(r'^run:', full_state_no_logger): - self.state = 'started' - elif re.search(r'^down:', full_state_no_logger): - self.state = 'stopped' + if re.search(r"^run:", full_state_no_logger): + self.state = "started" + elif re.search(r"^down:", full_state_no_logger): + self.state = "stopped" else: - self.state = 'unknown' + self.state = "unknown" return def started(self): return self.start() def start(self): - return self.execute_command([self.svc_cmd, 'start', self.svc_full]) + return self.execute_command([self.svc_cmd, "start", self.svc_full]) def stopped(self): return self.stop() def stop(self): - return self.execute_command([self.svc_cmd, 'stop', self.svc_full]) + return self.execute_command([self.svc_cmd, "stop", self.svc_full]) def once(self): - return self.execute_command([self.svc_cmd, 'once', self.svc_full]) + return self.execute_command([self.svc_cmd, "once", self.svc_full]) def reloaded(self): return self.reload() def reload(self): - return self.execute_command([self.svc_cmd, 'reload', self.svc_full]) + return self.execute_command([self.svc_cmd, "reload", self.svc_full]) def restarted(self): return self.restart() def restart(self): - return self.execute_command([self.svc_cmd, 'restart', self.svc_full]) + return self.execute_command([self.svc_cmd, "restart", self.svc_full]) def killed(self): return self.kill() def kill(self): - return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full]) + return self.execute_command([self.svc_cmd, "force-stop", self.svc_full]) def execute_command(self, cmd): try: @@ -216,19 +216,19 @@ def report(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), - enabled=dict(type='bool'), - service_dir=dict(type='str', default='/var/service'), - service_src=dict(type='str', default='/etc/sv'), + name=dict(type="str", required=True), + state=dict(type="str", choices=["killed", "once", "reloaded", "restarted", "started", "stopped"]), + enabled=dict(type="bool"), + service_dir=dict(type="str", default="/var/service"), + service_src=dict(type="str", default="/etc/sv"), ), supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") - state = module.params['state'] - enabled = module.params['enabled'] + state = module.params["state"] + enabled = module.params["enabled"] sv = Sv(module) changed = False @@ -252,5 +252,5 @@ def main(): module.exit_json(changed=changed, sv=sv.report()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/say.py b/plugins/modules/say.py index b19e9212add..4a896038dd2 100644 --- a/plugins/modules/say.py +++ b/plugins/modules/say.py @@ -56,25 +56,24 @@ def say(module, executable, msg, voice): cmd = [executable, msg] if voice: - cmd.extend(('-v', voice)) + cmd.extend(("-v", voice)) module.run_command(cmd, check_rc=True) def main(): - module = AnsibleModule( argument_spec=dict( msg=dict(required=True), voice=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) - msg = module.params['msg'] - voice = module.params['voice'] - possibles = ('say', 'espeak', 'espeak-ng') + msg = module.params["msg"] + voice = module.params["voice"] + possibles = ("say", "espeak", "espeak-ng") - if platform.system() != 'Darwin': + if platform.system() != "Darwin": # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter voice = None @@ -93,5 +92,5 @@ def main(): module.exit_json(msg=msg, changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py index 8214a051171..0c1bdcb8059 100644 --- a/plugins/modules/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -187,21 +187,15 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.datetime import now -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway - -SCALEWAY_SERVER_STATES = ( - 'stopped', - 'stopping', - 'starting', - 'running', - 'locked' +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, ) -SCALEWAY_TRANSITIONS_STATES = ( - "stopping", - "starting", - "pending" -) +SCALEWAY_SERVER_STATES = ("stopped", "stopping", "starting", "running", "locked") + +SCALEWAY_TRANSITIONS_STATES = ("stopping", "starting", "pending") def check_image_id(compute_api, image_id): @@ -220,7 +214,7 @@ def fetch_state(compute_api, server): return "absent" if not response.ok: - msg = f'Error during state fetching: ({response.status_code}) {response.json}' + msg = f"Error during state fetching: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) try: @@ -264,7 +258,7 @@ def public_ip_payload(compute_api, public_ip): # We check that the IP we want to attach exists, if so its ID is returned response = compute_api.get("ips") if not response.ok: - msg = f'Error during public IP validation: ({response.status_code}) {response.json}' + msg = f"Error during public IP validation: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) ip_list = [] @@ -281,13 +275,14 @@ def public_ip_payload(compute_api, public_ip): def create_server(compute_api, server): compute_api.module.debug("Starting a create_server") target_server = None - data = {"enable_ipv6": server["enable_ipv6"], - "tags": server["tags"], - "commercial_type": server["commercial_type"], - "image": server["image"], - "dynamic_ip_required": server["dynamic_ip_required"], - "name": server["name"] - } + data = { + "enable_ipv6": server["enable_ipv6"], + "tags": server["tags"], + "commercial_type": server["commercial_type"], + "image": server["image"], + "dynamic_ip_required": server["dynamic_ip_required"], + "name": server["name"], + } if server["project"]: data["project"] = server["project"] @@ -301,7 +296,7 @@ def create_server(compute_api, server): response = compute_api.post(path="servers", data=data) if not response.ok: - msg = f'Error during server creation: ({response.status_code}) {response.json}' + msg = f"Error during server creation: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) try: @@ -327,10 +322,9 @@ def start_server(compute_api, server): def perform_action(compute_api, server, action): - response = compute_api.post(path=f"servers/{server['id']}/action", - data={"action": action}) + response = compute_api.post(path=f"servers/{server['id']}/action", data={"action": action}) if not response.ok: - msg = f'Error during server {action}: ({response.status_code}) {response.json}' + msg = f"Error during server {action}: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) wait_to_complete_state_transition(compute_api=compute_api, server=server) @@ -342,7 +336,7 @@ def remove_server(compute_api, server): compute_api.module.debug("Starting remove server strategy") response = compute_api.delete(path=f"servers/{server['id']}") if not response.ok: - msg = f'Error during server deletion: ({response.status_code}) {response.json}' + msg = f"Error during server deletion: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) wait_to_complete_state_transition(compute_api=compute_api, server=server) @@ -364,14 +358,17 @@ def present_strategy(compute_api, wished_server): else: target_server = query_results[0] - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): + if server_attributes_should_be_changed( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ): changed = True if compute_api.module.check_mode: return changed, {"status": f"Server {target_server['id']} attributes would be changed."} - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + target_server = server_change_attributes( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ) return changed, target_server @@ -398,7 +395,7 @@ def absent_strategy(compute_api, wished_server): response = stop_server(compute_api=compute_api, server=target_server) if not response.ok: - err_msg = f'Error while stopping a server before removing it [{response.status_code}: {response.json}]' + err_msg = f"Error while stopping a server before removing it [{response.status_code}: {response.json}]" compute_api.module.fail_json(msg=err_msg) wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) @@ -406,7 +403,7 @@ def absent_strategy(compute_api, wished_server): response = remove_server(compute_api=compute_api, server=target_server) if not response.ok: - err_msg = f'Error while removing server [{response.status_code}: {response.json}]' + err_msg = f"Error while removing server [{response.status_code}: {response.json}]" compute_api.module.fail_json(msg=err_msg) return changed, {"status": f"Server {target_server['id']} deleted"} @@ -426,14 +423,17 @@ def running_strategy(compute_api, wished_server): else: target_server = query_results[0] - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): + if server_attributes_should_be_changed( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ): changed = True if compute_api.module.check_mode: return changed, {"status": f"Server {target_server['id']} attributes would be changed before running it."} - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + target_server = server_change_attributes( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ) current_state = fetch_state(compute_api=compute_api, server=target_server) if current_state not in ("running", "starting"): @@ -445,7 +445,7 @@ def running_strategy(compute_api, wished_server): response = start_server(compute_api=compute_api, server=target_server) if not response.ok: - msg = f'Error while running server [{response.status_code}: {response.json}]' + msg = f"Error while running server [{response.status_code}: {response.json}]" compute_api.module.fail_json(msg=msg) return changed, target_server @@ -458,7 +458,6 @@ def stop_strategy(compute_api, wished_server): changed = False if not query_results: - if compute_api.module.check_mode: return changed, {"status": "A server would be created before being stopped."} @@ -469,15 +468,17 @@ def stop_strategy(compute_api, wished_server): compute_api.module.debug("stop_strategy: Servers are found.") - if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, - wished_server=wished_server): + if server_attributes_should_be_changed( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ): changed = True if compute_api.module.check_mode: - return changed, { - "status": f"Server {target_server['id']} attributes would be changed before stopping it."} + return changed, {"status": f"Server {target_server['id']} attributes would be changed before stopping it."} - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + target_server = server_change_attributes( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ) wait_to_complete_state_transition(compute_api=compute_api, server=target_server) @@ -495,7 +496,7 @@ def stop_strategy(compute_api, wished_server): compute_api.module.debug(response.ok) if not response.ok: - msg = f'Error while stopping server [{response.status_code}: {response.json}]' + msg = f"Error while stopping server [{response.status_code}: {response.json}]" compute_api.module.fail_json(msg=msg) return changed, target_server @@ -515,16 +516,17 @@ def restart_strategy(compute_api, wished_server): else: target_server = query_results[0] - if server_attributes_should_be_changed(compute_api=compute_api, - target_server=target_server, - wished_server=wished_server): + if server_attributes_should_be_changed( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ): changed = True if compute_api.module.check_mode: - return changed, { - "status": f"Server {target_server['id']} attributes would be changed before rebooting it."} + return changed, {"status": f"Server {target_server['id']} attributes would be changed before rebooting it."} - target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + target_server = server_change_attributes( + compute_api=compute_api, target_server=target_server, wished_server=wished_server + ) changed = True if compute_api.module.check_mode: @@ -536,14 +538,14 @@ def restart_strategy(compute_api, wished_server): response = restart_server(compute_api=compute_api, server=target_server) wait_to_complete_state_transition(compute_api=compute_api, server=target_server) if not response.ok: - msg = f'Error while restarting server that was running [{response.status_code}: {response.json}].' + msg = f"Error while restarting server that was running [{response.status_code}: {response.json}]." compute_api.module.fail_json(msg=msg) if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): response = restart_server(compute_api=compute_api, server=target_server) wait_to_complete_state_transition(compute_api=compute_api, server=target_server) if not response.ok: - msg = f'Error while restarting server that was stopped [{response.status_code}: {response.json}].' + msg = f"Error while restarting server that was stopped [{response.status_code}: {response.json}]." compute_api.module.fail_json(msg=msg) return changed, target_server @@ -554,18 +556,17 @@ def restart_strategy(compute_api, wished_server): "restarted": restart_strategy, "stopped": stop_strategy, "running": running_strategy, - "absent": absent_strategy + "absent": absent_strategy, } def find(compute_api, wished_server, per_page=1): compute_api.module.debug("Getting inside find") # Only the name attribute is accepted in the Compute query API - response = compute_api.get("servers", params={"name": wished_server["name"], - "per_page": per_page}) + response = compute_api.get("servers", params={"name": wished_server["name"], "per_page": per_page}) if not response.ok: - msg = f'Error during server search: ({response.status_code}) {response.json}' + msg = f"Error during server search: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) search_results = response.json["servers"] @@ -596,8 +597,12 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: if key in target_server and key in wished_server: # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook - if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( - ) and target_server[key]["id"] != wished_server[key]: + if ( + isinstance(target_server[key], dict) + and wished_server[key] + and "id" in target_server[key].keys() + and target_server[key]["id"] != wished_server[key] + ): return True # Handling other structure compare simply the two objects content elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: @@ -623,10 +628,9 @@ def server_change_attributes(compute_api, target_server, wished_server): elif not isinstance(target_server[key], dict): patch_payload[key] = wished_server[key] - response = compute_api.patch(path=f"servers/{target_server['id']}", - data=patch_payload) + response = compute_api.patch(path=f"servers/{target_server['id']}", data=patch_payload) if not response.ok: - msg = f'Error during server attributes patching: ({response.status_code}) {response.json}' + msg = f"Error during server attributes patching: ({response.status_code}) {response.json}" compute_api.module.fail_json(msg=msg) try: @@ -650,9 +654,9 @@ def core(module): "tags": module.params["tags"], "organization": module.params["organization"], "project": module.params["project"], - "security_group": module.params["security_group"] + "security_group": module.params["security_group"], } - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] compute_api = Scaleway(module=module) @@ -668,35 +672,37 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - image=dict(required=True), - name=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - commercial_type=dict(required=True), - enable_ipv6=dict(default=False, type="bool"), - public_ip=dict(default="absent"), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization=dict(), - project=dict(), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - security_group=dict(), - )) + argument_spec.update( + dict( + image=dict(required=True), + name=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + commercial_type=dict(required=True), + enable_ipv6=dict(default=False, type="bool"), + public_ip=dict(default="absent"), + state=dict(choices=list(state_strategy.keys()), default="present"), + tags=dict(type="list", elements="str", default=[]), + organization=dict(), + project=dict(), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + security_group=dict(), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('organization', 'project'), + ("organization", "project"), ], required_one_of=[ - ('organization', 'project'), + ("organization", "project"), ], ) core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py index 79274757df2..7a0332a8e67 100644 --- a/plugins/modules/scaleway_compute_private_network.py +++ b/plugins/modules/scaleway_compute_private_network.py @@ -121,22 +121,25 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) from ansible.module_utils.basic import AnsibleModule def get_nics_info(api, compute_id, private_network_id): - response = api.get(f"servers/{compute_id}/private_nics") if not response.ok: msg = f"Error during get servers information: {response.info['msg']}: '{response.json['message']}' ({response.json})" api.module.fail_json(msg=msg) i = 0 - list_nics = response.json['private_nics'] + list_nics = response.json["private_nics"] while i < len(list_nics): - if list_nics[i]['private_network_id'] == private_network_id: + if list_nics[i]["private_network_id"] == private_network_id: return list_nics[i] i += 1 @@ -144,7 +147,6 @@ def get_nics_info(api, compute_id, private_network_id): def present_strategy(api, compute_id, private_network_id): - changed = False nic = get_nics_info(api, compute_id, private_network_id) if nic is not None: @@ -158,13 +160,14 @@ def present_strategy(api, compute_id, private_network_id): response = api.post(path=f"servers/{compute_id}/private_nics", data=data) if not response.ok: - api.module.fail_json(msg=f'Error when adding a private network to a server [{response.status_code}: {response.json}]') + api.module.fail_json( + msg=f"Error when adding a private network to a server [{response.status_code}: {response.json}]" + ) return changed, response.json def absent_strategy(api, compute_id, private_network_id): - changed = False nic = get_nics_info(api, compute_id, private_network_id) if nic is None: @@ -177,18 +180,19 @@ def absent_strategy(api, compute_id, private_network_id): response = api.delete(f"servers/{compute_id}/private_nics/{nic['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting private network from server [{response.status_code}: {response.json}]') + api.module.fail_json( + msg=f"Error deleting private network from server [{response.status_code}: {response.json}]" + ) return changed, response.json def core(module): - - compute_id = module.params['compute_id'] - pn_id = module.params['private_network_id'] + compute_id = module.params["compute_id"] + pn_id = module.params["private_network_id"] region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] api = Scaleway(module=module) if module.params["state"] == "absent": @@ -200,13 +204,15 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - project=dict(required=True), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - compute_id=dict(required=True), - private_network_id=dict(required=True) - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present"]), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + compute_id=dict(required=True), + private_network_id=dict(required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -215,5 +221,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py index 21c58823991..d759d12fab9 100644 --- a/plugins/modules/scaleway_container.py +++ b/plugins/modules/scaleway_container.py @@ -216,17 +216,16 @@ from copy import deepcopy from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, - scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, - SecretVariables + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, + SecretVariables, ) from ansible.module_utils.basic import AnsibleModule -STABLE_STATES = ( - "ready", - "created", - "absent" -) +STABLE_STATES = ("ready", "created", "absent") MUTABLE_ATTRIBUTES = ( "description", @@ -241,7 +240,7 @@ "max_concurrency", "protocol", "port", - "secret_environment_variables" + "secret_environment_variables", ) @@ -262,7 +261,7 @@ def payload_from_wished_cn(wished_cn): "max_concurrency": wished_cn["max_concurrency"], "protocol": wished_cn["protocol"], "port": wished_cn["port"], - "redeploy": wished_cn["redeploy"] + "redeploy": wished_cn["redeploy"], } return payload @@ -285,7 +284,7 @@ def absent_strategy(api, wished_cn): api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_cn['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting container [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting container [{response.status_code}: {response.json}]") api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) return changed, response.json @@ -309,8 +308,7 @@ def present_strategy(api, wished_cn): # Create container api.warn(payload_cn) - creation_response = api.post(path=api.api_path, - data=payload_cn) + creation_response = api.post(path=api.api_path, data=payload_cn) if not creation_response.ok: msg = f"Error during container creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -322,12 +320,15 @@ def present_strategy(api, wished_cn): target_cn = cn_lookup[wished_cn["name"]] decoded_target_cn = deepcopy(target_cn) - decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], - payload_cn["secret_environment_variables"]) - patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, - wished=payload_cn, - verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, - mutable_attributes=MUTABLE_ATTRIBUTES) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode( + decoded_target_cn["secret_environment_variables"], payload_cn["secret_environment_variables"] + ) + patch_payload = resource_attributes_should_be_changed( + target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES, + ) if not patch_payload: return changed, target_cn @@ -336,21 +337,19 @@ def present_strategy(api, wished_cn): if api.module.check_mode: return changed, {"status": "Container attributes would be changed."} - cn_patch_response = api.patch(path=f"{api.api_path}/{target_cn['id']}", - data=patch_payload) + cn_patch_response = api.patch(path=f"{api.api_path}/{target_cn['id']}", data=patch_payload) if not cn_patch_response.ok: - api.module.fail_json(msg=f"Error during container attributes update: [{cn_patch_response.status_code}: {cn_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during container attributes update: [{cn_patch_response.status_code}: {cn_patch_response.json['message']}]" + ) api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) response = api.get(path=f"{api.api_path}/{target_cn['id']}") return changed, response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -361,11 +360,11 @@ def core(module): "state": module.params["state"], "namespace_id": module.params["namespace_id"], "name": module.params["name"], - "description": module.params['description'], + "description": module.params["description"], "min_scale": module.params["min_scale"], "max_scale": module.params["max_scale"], - "environment_variables": module.params['environment_variables'], - "secret_environment_variables": module.params['secret_environment_variables'], + "environment_variables": module.params["environment_variables"], + "secret_environment_variables": module.params["secret_environment_variables"], "cpu_limit": module.params["cpu_limit"], "memory_limit": module.params["memory_limit"], "timeout": module.params["container_timeout"], @@ -374,7 +373,7 @@ def core(module): "max_concurrency": module.params["max_concurrency"], "protocol": module.params["protocol"], "port": module.params["port"], - "redeploy": module.params["redeploy"] + "redeploy": module.params["redeploy"], } api = Scaleway(module=module) @@ -388,26 +387,28 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() argument_spec.update(scaleway_waitable_resource_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - namespace_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True), - description=dict(type='str', default=''), - min_scale=dict(type='int'), - max_scale=dict(type='int'), - cpu_limit=dict(type='int'), - memory_limit=dict(type='int'), - container_timeout=dict(type='str'), - privacy=dict(type='str', default='public', choices=['public', 'private']), - registry_image=dict(type='str', required=True), - max_concurrency=dict(type='int'), - protocol=dict(type='str', default='http1', choices=['http1', 'h2c']), - port=dict(type='int'), - redeploy=dict(type='bool', default=False), - environment_variables=dict(type='dict', default={}), - secret_environment_variables=dict(type='dict', default={}, no_log=True) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + namespace_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + description=dict(type="str", default=""), + min_scale=dict(type="int"), + max_scale=dict(type="int"), + cpu_limit=dict(type="int"), + memory_limit=dict(type="int"), + container_timeout=dict(type="str"), + privacy=dict(type="str", default="public", choices=["public", "private"]), + registry_image=dict(type="str", required=True), + max_concurrency=dict(type="int"), + protocol=dict(type="str", default="http1", choices=["http1", "h2c"]), + port=dict(type="int"), + redeploy=dict(type="bool", default=False), + environment_variables=dict(type="dict", default={}), + secret_environment_variables=dict(type="dict", default={}, no_log=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -416,5 +417,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py index 7ae30a4dd64..986b2719d68 100644 --- a/plugins/modules/scaleway_container_info.py +++ b/plugins/modules/scaleway_container_info.py @@ -92,7 +92,9 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, ) from ansible.module_utils.basic import AnsibleModule @@ -118,10 +120,7 @@ def info_strategy(api, wished_cn): def core(module): region = module.params["region"] - wished_container = { - "namespace_id": module.params["namespace_id"], - "name": module.params["name"] - } + wished_container = {"namespace_id": module.params["namespace_id"], "name": module.params["name"]} api = Scaleway(module=module) api.api_path = f"containers/v1beta1/regions/{region}/containers" @@ -133,11 +132,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - namespace_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True) - )) + argument_spec.update( + dict( + namespace_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -146,5 +147,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py index e5305077a60..b2b2ce2e229 100644 --- a/plugins/modules/scaleway_container_namespace.py +++ b/plugins/modules/scaleway_container_namespace.py @@ -133,22 +133,18 @@ from copy import deepcopy from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, scaleway_waitable_resource_argument_spec, - resource_attributes_should_be_changed, SecretVariables + resource_attributes_should_be_changed, + SecretVariables, ) from ansible.module_utils.basic import AnsibleModule -STABLE_STATES = ( - "ready", - "absent" -) +STABLE_STATES = ("ready", "absent") -MUTABLE_ATTRIBUTES = ( - "description", - "environment_variables", - "secret_environment_variables" -) +MUTABLE_ATTRIBUTES = ("description", "environment_variables", "secret_environment_variables") def payload_from_wished_cn(wished_cn): @@ -157,7 +153,7 @@ def payload_from_wished_cn(wished_cn): "name": wished_cn["name"], "description": wished_cn["description"], "environment_variables": wished_cn["environment_variables"], - "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]) + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]), } return payload @@ -180,7 +176,7 @@ def absent_strategy(api, wished_cn): api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_cn['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting container namespace [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting container namespace [{response.status_code}: {response.json}]") api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) return changed, response.json @@ -201,12 +197,13 @@ def present_strategy(api, wished_cn): # Create container namespace api.warn(payload_cn) - creation_response = api.post(path=api.api_path, - data=payload_cn) + creation_response = api.post(path=api.api_path, data=payload_cn) if not creation_response.ok: - msg = (f"Error during container namespace creation: {creation_response.info['msg']}: " - f"'{creation_response.json['message']}' ({creation_response.json})") + msg = ( + f"Error during container namespace creation: {creation_response.info['msg']}: " + f"'{creation_response.json['message']}' ({creation_response.json})" + ) api.module.fail_json(msg=msg) api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) @@ -215,12 +212,15 @@ def present_strategy(api, wished_cn): target_cn = cn_lookup[wished_cn["name"]] decoded_target_cn = deepcopy(target_cn) - decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], - payload_cn["secret_environment_variables"]) - patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, - wished=payload_cn, - verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, - mutable_attributes=MUTABLE_ATTRIBUTES) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode( + decoded_target_cn["secret_environment_variables"], payload_cn["secret_environment_variables"] + ) + patch_payload = resource_attributes_should_be_changed( + target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES, + ) if not patch_payload: return changed, target_cn @@ -229,21 +229,19 @@ def present_strategy(api, wished_cn): if api.module.check_mode: return changed, {"status": "Container namespace attributes would be changed."} - cn_patch_response = api.patch(path=f"{api.api_path}/{target_cn['id']}", - data=patch_payload) + cn_patch_response = api.patch(path=f"{api.api_path}/{target_cn['id']}", data=patch_payload) if not cn_patch_response.ok: - api.module.fail_json(msg=f"Error during container namespace attributes update: [{cn_patch_response.status_code}: {cn_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during container namespace attributes update: [{cn_patch_response.status_code}: {cn_patch_response.json['message']}]" + ) api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) response = api.get(path=f"{api.api_path}/{target_cn['id']}") return changed, cn_patch_response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -254,15 +252,17 @@ def core(module): "state": module.params["state"], "project_id": module.params["project_id"], "name": module.params["name"], - "description": module.params['description'], - "environment_variables": module.params['environment_variables'], - "secret_environment_variables": module.params['secret_environment_variables'] + "description": module.params["description"], + "environment_variables": module.params["environment_variables"], + "secret_environment_variables": module.params["secret_environment_variables"], } api = Scaleway(module=module) api.api_path = f"containers/v1beta1/regions/{region}/namespaces" - changed, summary = state_strategy[wished_container_namespace["state"]](api=api, wished_cn=wished_container_namespace) + changed, summary = state_strategy[wished_container_namespace["state"]]( + api=api, wished_cn=wished_container_namespace + ) module.exit_json(changed=changed, container_namespace=summary) @@ -270,15 +270,17 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() argument_spec.update(scaleway_waitable_resource_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True), - description=dict(type='str', default=''), - environment_variables=dict(type='dict', default={}), - secret_environment_variables=dict(type='dict', default={}, no_log=True) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + description=dict(type="str", default=""), + environment_variables=dict(type="dict", default={}), + secret_environment_variables=dict(type="dict", default={}, no_log=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -287,5 +289,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py index 0f974cb9737..3b6a1ad085f 100644 --- a/plugins/modules/scaleway_container_namespace_info.py +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -83,7 +83,9 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, ) from ansible.module_utils.basic import AnsibleModule @@ -109,10 +111,7 @@ def info_strategy(api, wished_cn): def core(module): region = module.params["region"] - wished_container_namespace = { - "project_id": module.params["project_id"], - "name": module.params["name"] - } + wished_container_namespace = {"project_id": module.params["project_id"], "name": module.params["name"]} api = Scaleway(module=module) api.api_path = f"containers/v1beta1/regions/{region}/namespaces" @@ -124,11 +123,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True) - )) + argument_spec.update( + dict( + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -137,5 +138,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py index eb9665bc8b8..7cacc2c7137 100644 --- a/plugins/modules/scaleway_container_registry.py +++ b/plugins/modules/scaleway_container_registry.py @@ -119,20 +119,17 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, - scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, ) from ansible.module_utils.basic import AnsibleModule -STABLE_STATES = ( - "ready", - "absent" -) +STABLE_STATES = ("ready", "absent") -MUTABLE_ATTRIBUTES = ( - "description", - "is_public" -) +MUTABLE_ATTRIBUTES = ("description", "is_public") def payload_from_wished_cr(wished_cr): @@ -140,7 +137,7 @@ def payload_from_wished_cr(wished_cr): "project_id": wished_cr["project_id"], "name": wished_cr["name"], "description": wished_cr["description"], - "is_public": wished_cr["privacy_policy"] == "public" + "is_public": wished_cr["privacy_policy"] == "public", } return payload @@ -163,7 +160,7 @@ def absent_strategy(api, wished_cr): api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_cr['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting container registry [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting container registry [{response.status_code}: {response.json}]") api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) return changed, response.json @@ -184,8 +181,7 @@ def present_strategy(api, wished_cr): # Create container registry api.warn(payload_cr) - creation_response = api.post(path=api.api_path, - data=payload_cr) + creation_response = api.post(path=api.api_path, data=payload_cr) if not creation_response.ok: msg = f"Error during container registry creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -196,10 +192,12 @@ def present_strategy(api, wished_cr): return changed, response.json target_cr = cr_lookup[wished_cr["name"]] - patch_payload = resource_attributes_should_be_changed(target=target_cr, - wished=payload_cr, - verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, - mutable_attributes=MUTABLE_ATTRIBUTES) + patch_payload = resource_attributes_should_be_changed( + target=target_cr, + wished=payload_cr, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES, + ) if not patch_payload: return changed, target_cr @@ -208,21 +206,19 @@ def present_strategy(api, wished_cr): if api.module.check_mode: return changed, {"status": "Container registry attributes would be changed."} - cr_patch_response = api.patch(path=f"{api.api_path}/{target_cr['id']}", - data=patch_payload) + cr_patch_response = api.patch(path=f"{api.api_path}/{target_cr['id']}", data=patch_payload) if not cr_patch_response.ok: - api.module.fail_json(msg=f"Error during container registry attributes update: [{cr_patch_response.status_code}: {cr_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during container registry attributes update: [{cr_patch_response.status_code}: {cr_patch_response.json['message']}]" + ) api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) response = api.get(path=f"{api.api_path}/{target_cr['id']}") return changed, response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -231,8 +227,8 @@ def core(module): "state": module.params["state"], "project_id": module.params["project_id"], "name": module.params["name"], - "description": module.params['description'], - "privacy_policy": module.params['privacy_policy'] + "description": module.params["description"], + "privacy_policy": module.params["privacy_policy"], } api = Scaleway(module=module) @@ -246,14 +242,16 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() argument_spec.update(scaleway_waitable_resource_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True), - description=dict(type='str', default=''), - privacy_policy=dict(type='str', default='private', choices=['public', 'private']) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + description=dict(type="str", default=""), + privacy_policy=dict(type="str", default="private", choices=["public", "private"]), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -262,5 +260,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py index ce72bfc801d..34fb3b499fa 100644 --- a/plugins/modules/scaleway_container_registry_info.py +++ b/plugins/modules/scaleway_container_registry_info.py @@ -82,7 +82,9 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, ) from ansible.module_utils.basic import AnsibleModule @@ -108,10 +110,7 @@ def info_strategy(api, wished_cn): def core(module): region = module.params["region"] - wished_container_namespace = { - "project_id": module.params["project_id"], - "name": module.params["name"] - } + wished_container_namespace = {"project_id": module.params["project_id"], "name": module.params["name"]} api = Scaleway(module=module) api.api_path = f"registry/v1/regions/{region}/namespaces" @@ -123,11 +122,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True) - )) + argument_spec.update( + dict( + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -136,5 +137,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py index 7c07577dbbc..03cd75cac9b 100644 --- a/plugins/modules/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -182,47 +182,48 @@ ) stable_states = ( - 'ready', - 'deleting', + "ready", + "deleting", ) def wait_to_complete_state_transition(module, account_api, backup=None): - wait_timeout = module.params['wait_timeout'] - wait_sleep_time = module.params['wait_sleep_time'] + wait_timeout = module.params["wait_timeout"] + wait_sleep_time = module.params["wait_sleep_time"] - if backup is None or backup['status'] in stable_states: + if backup is None or backup["status"] in stable_states: return backup start = now() end = start + datetime.timedelta(seconds=wait_timeout) while now() < end: - module.debug('We are going to wait for the backup to finish its transition') + module.debug("We are going to wait for the backup to finish its transition") response = account_api.get(f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}") if not response.ok: - module.fail_json(msg=f'Error getting backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error getting backup [{response.status_code}: {response.json}]") break response_json = response.json - if response_json['status'] in stable_states: - module.debug('It seems that the backup is not in transition anymore.') + if response_json["status"] in stable_states: + module.debug("It seems that the backup is not in transition anymore.") module.debug(f"Backup in state: {response_json['status']}") return response_json time.sleep(wait_sleep_time) else: - module.fail_json(msg='Backup takes too long to finish its transition') + module.fail_json(msg="Backup takes too long to finish its transition") def present_strategy(module, account_api, backup): - name = module.params['name'] - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] - expiration_date = module.params['expires_at'] + name = module.params["name"] + database_name = module.params["database_name"] + instance_id = module.params["instance_id"] + expiration_date = module.params["expires_at"] if backup is not None: - if (backup['name'] == name or name is None) and ( - backup['expires_at'] == expiration_date or expiration_date is None): + if (backup["name"] == name or name is None) and ( + backup["expires_at"] == expiration_date or expiration_date is None + ): wait_to_complete_state_transition(module, account_api, backup) module.exit_json(changed=False) @@ -231,24 +232,23 @@ def present_strategy(module, account_api, backup): payload = {} if name is not None: - payload['name'] = name + payload["name"] = name if expiration_date is not None: - payload['expires_at'] = expiration_date + payload["expires_at"] = expiration_date - response = account_api.patch(f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}", - payload) + response = account_api.patch(f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}", payload) if response.ok: result = wait_to_complete_state_transition(module, account_api, response.json) module.exit_json(changed=True, metadata=result) - module.fail_json(msg=f'Error modifying backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error modifying backup [{response.status_code}: {response.json}]") if module.check_mode: module.exit_json(changed=True) - payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id} + payload = {"name": name, "database_name": database_name, "instance_id": instance_id} if expiration_date is not None: - payload['expires_at'] = expiration_date + payload["expires_at"] = expiration_date response = account_api.post(f"/rdb/v1/regions/{module.params.get('region')}/backups", payload) @@ -256,7 +256,7 @@ def present_strategy(module, account_api, backup): result = wait_to_complete_state_transition(module, account_api, response.json) module.exit_json(changed=True, metadata=result) - module.fail_json(msg=f'Error creating backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error creating backup [{response.status_code}: {response.json}]") def absent_strategy(module, account_api, backup): @@ -271,64 +271,64 @@ def absent_strategy(module, account_api, backup): result = wait_to_complete_state_transition(module, account_api, response.json) module.exit_json(changed=True, metadata=result) - module.fail_json(msg=f'Error deleting backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error deleting backup [{response.status_code}: {response.json}]") def exported_strategy(module, account_api, backup): if backup is None: module.fail_json(msg=f'Backup "{module.params["id"]}" not found') - if backup['download_url'] is not None: + if backup["download_url"] is not None: module.exit_json(changed=False, metadata=backup) if module.check_mode: module.exit_json(changed=True) backup = wait_to_complete_state_transition(module, account_api, backup) - response = account_api.post( - f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}/export", {}) + response = account_api.post(f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}/export", {}) if response.ok: result = wait_to_complete_state_transition(module, account_api, response.json) module.exit_json(changed=True, metadata=result) - module.fail_json(msg=f'Error exporting backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error exporting backup [{response.status_code}: {response.json}]") def restored_strategy(module, account_api, backup): if backup is None: module.fail_json(msg=f'Backup "{module.params["id"]}" not found') - database_name = module.params['database_name'] - instance_id = module.params['instance_id'] + database_name = module.params["database_name"] + instance_id = module.params["instance_id"] if module.check_mode: module.exit_json(changed=True) backup = wait_to_complete_state_transition(module, account_api, backup) - payload = {'database_name': database_name, 'instance_id': instance_id} - response = account_api.post(f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}/restore", - payload) + payload = {"database_name": database_name, "instance_id": instance_id} + response = account_api.post( + f"/rdb/v1/regions/{module.params.get('region')}/backups/{backup['id']}/restore", payload + ) if response.ok: result = wait_to_complete_state_transition(module, account_api, response.json) module.exit_json(changed=True, metadata=result) - module.fail_json(msg=f'Error restoring backup [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error restoring backup [{response.status_code}: {response.json}]") state_strategy = { - 'present': present_strategy, - 'absent': absent_strategy, - 'exported': exported_strategy, - 'restored': restored_strategy, + "present": present_strategy, + "absent": absent_strategy, + "exported": exported_strategy, + "restored": restored_strategy, } def core(module): - state = module.params['state'] - backup_id = module.params['id'] + state = module.params["state"] + backup_id = module.params["id"] account_api = Scaleway(module) @@ -351,34 +351,36 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']), - region=dict(required=True, choices=SCALEWAY_REGIONS), - id=dict(), - name=dict(type='str'), - database_name=dict(), - instance_id=dict(), - expires_at=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - wait_sleep_time=dict(type='int', default=3), - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present", "exported", "restored"]), + region=dict(required=True, choices=SCALEWAY_REGIONS), + id=dict(), + name=dict(type="str"), + database_name=dict(), + instance_id=dict(), + expires_at=dict(), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_together=[ - ['database_name', 'instance_id'], + ["database_name", "instance_id"], ], required_if=[ - ['state', 'present', ['name', 'database_name', 'instance_id']], - ['state', 'absent', ['id']], - ['state', 'exported', ['id']], - ['state', 'restored', ['id', 'database_name', 'instance_id']], + ["state", "present", ["name", "database_name", "instance_id"]], + ["state", "absent", ["id"]], + ["state", "exported", ["id"]], + ["state", "restored", ["id", "database_name", "instance_id"]], ], ) core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py index 27e404ba10e..c6e998f51e3 100644 --- a/plugins/modules/scaleway_function.py +++ b/plugins/modules/scaleway_function.py @@ -193,17 +193,16 @@ from copy import deepcopy from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, - scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, - SecretVariables + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, + SecretVariables, ) from ansible.module_utils.basic import AnsibleModule -STABLE_STATES = ( - "ready", - "created", - "absent" -) +STABLE_STATES = ("ready", "created", "absent") VERIFIABLE_MUTABLE_ATTRIBUTES = ( "description", @@ -215,12 +214,10 @@ "timeout", "handler", "privacy", - "secret_environment_variables" + "secret_environment_variables", ) -MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + ( - "redeploy", -) +MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + ("redeploy",) def payload_from_wished_fn(wished_fn): @@ -237,7 +234,7 @@ def payload_from_wished_fn(wished_fn): "privacy": wished_fn["privacy"], "redeploy": wished_fn["redeploy"], "environment_variables": wished_fn["environment_variables"], - "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]), } return payload @@ -260,7 +257,7 @@ def absent_strategy(api, wished_fn): api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_fn['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting function [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting function [{response.status_code}: {response.json}]") api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) return changed, response.json @@ -284,8 +281,7 @@ def present_strategy(api, wished_fn): # Create function api.warn(payload_fn) - creation_response = api.post(path=api.api_path, - data=payload_fn) + creation_response = api.post(path=api.api_path, data=payload_fn) if not creation_response.ok: msg = f"Error during function creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -297,13 +293,16 @@ def present_strategy(api, wished_fn): target_fn = fn_lookup[wished_fn["name"]] decoded_target_fn = deepcopy(target_fn) - decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], - payload_fn["secret_environment_variables"]) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode( + decoded_target_fn["secret_environment_variables"], payload_fn["secret_environment_variables"] + ) - patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, - wished=payload_fn, - verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES, - mutable_attributes=MUTABLE_ATTRIBUTES) + patch_payload = resource_attributes_should_be_changed( + target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES, + ) if not patch_payload: return changed, target_fn @@ -312,21 +311,19 @@ def present_strategy(api, wished_fn): if api.module.check_mode: return changed, {"status": "Function attributes would be changed."} - fn_patch_response = api.patch(path=f"{api.api_path}/{target_fn['id']}", - data=patch_payload) + fn_patch_response = api.patch(path=f"{api.api_path}/{target_fn['id']}", data=patch_payload) if not fn_patch_response.ok: - api.module.fail_json(msg=f"Error during function attributes update: [{fn_patch_response.status_code}: {fn_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during function attributes update: [{fn_patch_response.status_code}: {fn_patch_response.json['message']}]" + ) api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) response = api.get(path=f"{api.api_path}/{target_fn['id']}") return changed, response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -337,17 +334,17 @@ def core(module): "state": module.params["state"], "namespace_id": module.params["namespace_id"], "name": module.params["name"], - "description": module.params['description'], - "min_scale": module.params['min_scale'], - "max_scale": module.params['max_scale'], + "description": module.params["description"], + "min_scale": module.params["min_scale"], + "max_scale": module.params["max_scale"], "runtime": module.params["runtime"], "memory_limit": module.params["memory_limit"], "timeout": module.params["function_timeout"], "handler": module.params["handler"], "privacy": module.params["privacy"], "redeploy": module.params["redeploy"], - "environment_variables": module.params['environment_variables'], - "secret_environment_variables": module.params['secret_environment_variables'] + "environment_variables": module.params["environment_variables"], + "secret_environment_variables": module.params["secret_environment_variables"], } api = Scaleway(module=module) @@ -361,23 +358,25 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() argument_spec.update(scaleway_waitable_resource_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - namespace_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True), - description=dict(type='str', default=''), - min_scale=dict(type='int'), - max_scale=dict(type='int'), - runtime=dict(type='str', required=True), - memory_limit=dict(type='int'), - function_timeout=dict(type='str'), - handler=dict(type='str'), - privacy=dict(type='str', default='public', choices=['public', 'private']), - redeploy=dict(type='bool', default=False), - environment_variables=dict(type='dict', default={}), - secret_environment_variables=dict(type='dict', default={}, no_log=True) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + namespace_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + description=dict(type="str", default=""), + min_scale=dict(type="int"), + max_scale=dict(type="int"), + runtime=dict(type="str", required=True), + memory_limit=dict(type="int"), + function_timeout=dict(type="str"), + handler=dict(type="str"), + privacy=dict(type="str", default="public", choices=["public", "private"]), + redeploy=dict(type="bool", default=False), + environment_variables=dict(type="dict", default={}), + secret_environment_variables=dict(type="dict", default={}, no_log=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -386,5 +385,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py index e9c7dff3ec6..583371c3175 100644 --- a/plugins/modules/scaleway_function_info.py +++ b/plugins/modules/scaleway_function_info.py @@ -91,7 +91,9 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, ) from ansible.module_utils.basic import AnsibleModule @@ -117,10 +119,7 @@ def info_strategy(api, wished_fn): def core(module): region = module.params["region"] - wished_function = { - "namespace_id": module.params["namespace_id"], - "name": module.params["name"] - } + wished_function = {"namespace_id": module.params["namespace_id"], "name": module.params["name"]} api = Scaleway(module=module) api.api_path = f"functions/v1beta1/regions/{region}/functions" @@ -132,11 +131,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - namespace_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True) - )) + argument_spec.update( + dict( + namespace_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -145,5 +146,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py index cdaf0bdc43d..2f981ca1549 100644 --- a/plugins/modules/scaleway_function_namespace.py +++ b/plugins/modules/scaleway_function_namespace.py @@ -133,17 +133,17 @@ from copy import deepcopy from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, - scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, - SecretVariables + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, + SecretVariables, ) from ansible.module_utils.basic import AnsibleModule -STABLE_STATES = ( - "ready", - "absent" -) +STABLE_STATES = ("ready", "absent") MUTABLE_ATTRIBUTES = ( "description", @@ -158,7 +158,7 @@ def payload_from_wished_fn(wished_fn): "name": wished_fn["name"], "description": wished_fn["description"], "environment_variables": wished_fn["environment_variables"], - "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]), } return payload @@ -181,7 +181,7 @@ def absent_strategy(api, wished_fn): api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_fn['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting function namespace [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting function namespace [{response.status_code}: {response.json}]") api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) return changed, response.json @@ -202,8 +202,7 @@ def present_strategy(api, wished_fn): # Create function namespace api.warn(payload_fn) - creation_response = api.post(path=api.api_path, - data=payload_fn) + creation_response = api.post(path=api.api_path, data=payload_fn) if not creation_response.ok: msg = f"Error during function namespace creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -215,13 +214,16 @@ def present_strategy(api, wished_fn): target_fn = fn_lookup[wished_fn["name"]] decoded_target_fn = deepcopy(target_fn) - decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], - payload_fn["secret_environment_variables"]) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode( + decoded_target_fn["secret_environment_variables"], payload_fn["secret_environment_variables"] + ) - patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, - wished=payload_fn, - verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, - mutable_attributes=MUTABLE_ATTRIBUTES) + patch_payload = resource_attributes_should_be_changed( + target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES, + ) if not patch_payload: return changed, target_fn @@ -230,21 +232,19 @@ def present_strategy(api, wished_fn): if api.module.check_mode: return changed, {"status": "Function namespace attributes would be changed."} - fn_patch_response = api.patch(path=f"{api.api_path}/{target_fn['id']}", - data=patch_payload) + fn_patch_response = api.patch(path=f"{api.api_path}/{target_fn['id']}", data=patch_payload) if not fn_patch_response.ok: - api.module.fail_json(msg=f"Error during function namespace attributes update: [{fn_patch_response.status_code}: {fn_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during function namespace attributes update: [{fn_patch_response.status_code}: {fn_patch_response.json['message']}]" + ) api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) response = api.get(path=f"{api.api_path}/{target_fn['id']}") return changed, response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -255,9 +255,9 @@ def core(module): "state": module.params["state"], "project_id": module.params["project_id"], "name": module.params["name"], - "description": module.params['description'], - "environment_variables": module.params['environment_variables'], - "secret_environment_variables": module.params['secret_environment_variables'] + "description": module.params["description"], + "environment_variables": module.params["environment_variables"], + "secret_environment_variables": module.params["secret_environment_variables"], } api = Scaleway(module=module) @@ -271,15 +271,17 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() argument_spec.update(scaleway_waitable_resource_argument_spec()) - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True), - description=dict(type='str', default=''), - environment_variables=dict(type='dict', default={}), - secret_environment_variables=dict(type='dict', default={}, no_log=True) - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + description=dict(type="str", default=""), + environment_variables=dict(type="dict", default={}), + secret_environment_variables=dict(type="dict", default={}, no_log=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -288,5 +290,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py index e58fd7e0a40..86cfd814b2d 100644 --- a/plugins/modules/scaleway_function_namespace_info.py +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -83,7 +83,9 @@ """ from ansible_collections.community.general.plugins.module_utils.scaleway import ( - SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + SCALEWAY_REGIONS, + scaleway_argument_spec, + Scaleway, ) from ansible.module_utils.basic import AnsibleModule @@ -109,10 +111,7 @@ def info_strategy(api, wished_fn): def core(module): region = module.params["region"] - wished_function_namespace = { - "project_id": module.params["project_id"], - "name": module.params["name"] - } + wished_function_namespace = {"project_id": module.params["project_id"], "name": module.params["name"]} api = Scaleway(module=module) api.api_path = f"functions/v1beta1/regions/{region}/namespaces" @@ -124,11 +123,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - project_id=dict(type='str', required=True), - region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), - name=dict(type='str', required=True) - )) + argument_spec.update( + dict( + project_id=dict(type="str", required=True), + region=dict(type="str", required=True, choices=SCALEWAY_REGIONS), + name=dict(type="str", required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -137,5 +138,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py index d8761abef4f..fa0edd5eb67 100644 --- a/plugins/modules/scaleway_image_info.py +++ b/plugins/modules/scaleway_image_info.py @@ -101,36 +101,39 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) class ScalewayImageInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'images' + self.name = "images" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) try: - module.exit_json( - scaleway_image_info=ScalewayImageInfo(module).get_resources() - ) + module.exit_json(scaleway_image_info=ScalewayImageInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py index d1e8840ef5e..2ee9cfa5431 100644 --- a/plugins/modules/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -116,7 +116,11 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) from ansible.module_utils.basic import AnsibleModule @@ -148,17 +152,13 @@ def ip_attributes_should_be_changed(api, target_ip, wished_ip): def payload_from_wished_ip(wished_ip): - return { - k: v - for k, v in wished_ip.items() - if k != 'id' and v is not None - } + return {k: v for k, v in wished_ip.items() if k != "id" and v is not None} def present_strategy(api, wished_ip): changed = False - response = api.get('ips') + response = api.get("ips") if not response.ok: api.module.fail_json(msg=f"Error getting IPs [{response.status_code}: {response.json['message']}]") @@ -171,8 +171,7 @@ def present_strategy(api, wished_ip): return changed, {"status": "An IP would be created."} # Create IP - creation_response = api.post('/ips', - data=payload_from_wished_ip(wished_ip)) + creation_response = api.post("/ips", data=payload_from_wished_ip(wished_ip)) if not creation_response.ok: msg = f"Error during ip creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -189,17 +188,18 @@ def present_strategy(api, wished_ip): if api.module.check_mode: return changed, {"status": "IP attributes would be changed."} - ip_patch_response = api.patch(path=f"ips/{target_ip['id']}", - data=patch_payload) + ip_patch_response = api.patch(path=f"ips/{target_ip['id']}", data=patch_payload) if not ip_patch_response.ok: - api.module.fail_json(msg=f"Error during IP attributes update: [{ip_patch_response.status_code}: {ip_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during IP attributes update: [{ip_patch_response.status_code}: {ip_patch_response.json['message']}]" + ) return changed, ip_patch_response.json["ip"] def absent_strategy(api, wished_ip): - response = api.get('ips') + response = api.get("ips") changed = False status_code = response.status_code @@ -219,21 +219,21 @@ def absent_strategy(api, wished_ip): response = api.delete(f"/ips/{wished_ip['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting IP [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting IP [{response.status_code}: {response.json}]") return changed, response.json def core(module): wished_ip = { - "organization": module.params['organization'], + "organization": module.params["organization"], "reverse": module.params["reverse"], "id": module.params["id"], - "server": module.params["server"] + "server": module.params["server"], } region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] api = Scaleway(module=module) if module.params["state"] == "absent": @@ -245,14 +245,16 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - organization=dict(required=True), - server=dict(), - reverse=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - id=dict() - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present"]), + organization=dict(required=True), + server=dict(), + reverse=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + id=dict(), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -261,5 +263,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py index 60dd8da62d2..315420d7f04 100644 --- a/plugins/modules/scaleway_ip_info.py +++ b/plugins/modules/scaleway_ip_info.py @@ -89,32 +89,31 @@ class ScalewayIpInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'ips' + self.name = "ips" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) try: - module.exit_json( - scaleway_ip_info=ScalewayIpInfo(module).get_resources() - ) + module.exit_json(scaleway_ip_info=ScalewayIpInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_lb.py b/plugins/modules/scaleway_lb.py index e1c90ba3ebc..f90c2811f8b 100644 --- a/plugins/modules/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -164,17 +164,16 @@ import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.datetime import now -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway - -STABLE_STATES = ( - "ready", - "absent" +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, + SCALEWAY_ENDPOINT, + scaleway_argument_spec, + Scaleway, ) -MUTABLE_ATTRIBUTES = ( - "name", - "description" -) +STABLE_STATES = ("ready", "absent") + +MUTABLE_ATTRIBUTES = ("name", "description") def payload_from_wished_lb(wished_lb): @@ -182,7 +181,7 @@ def payload_from_wished_lb(wished_lb): "organization_id": wished_lb["organization_id"], "name": wished_lb["name"], "tags": wished_lb["tags"], - "description": wished_lb["description"] + "description": wished_lb["description"], } @@ -194,7 +193,7 @@ def fetch_state(api, lb): return "absent" if not response.ok: - msg = f'Error during state fetching: ({response.status_code}) {response.json}' + msg = f"Error during state fetching: ({response.status_code}) {response.json}" api.module.fail_json(msg=msg) try: @@ -251,8 +250,7 @@ def present_strategy(api, wished_lb): # Create Load-balancer api.warn(payload_from_wished_lb(wished_lb)) - creation_response = api.post(path=api.api_path, - data=payload_from_wished_lb(wished_lb)) + creation_response = api.post(path=api.api_path, data=payload_from_wished_lb(wished_lb)) if not creation_response.ok: msg = f"Error during lb creation: {creation_response.info['msg']}: '{creation_response.json['message']}' ({creation_response.json})" @@ -263,8 +261,7 @@ def present_strategy(api, wished_lb): return changed, response.json target_lb = lb_lookup[wished_lb["name"]] - patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, - wished_lb=wished_lb) + patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, wished_lb=wished_lb) if not patch_payload: return changed, target_lb @@ -273,11 +270,12 @@ def present_strategy(api, wished_lb): if api.module.check_mode: return changed, {"status": "Load-balancer attributes would be changed."} - lb_patch_response = api.put(path=f"{api.api_path}/{target_lb['id']}", - data=patch_payload) + lb_patch_response = api.put(path=f"{api.api_path}/{target_lb['id']}", data=patch_payload) if not lb_patch_response.ok: - api.module.fail_json(msg=f"Error during load-balancer attributes update: [{lb_patch_response.status_code}: {lb_patch_response.json['message']}]") + api.module.fail_json( + msg=f"Error during load-balancer attributes update: [{lb_patch_response.status_code}: {lb_patch_response.json['message']}]" + ) wait_to_complete_state_transition(api=api, lb=target_lb) return changed, lb_patch_response.json @@ -306,16 +304,13 @@ def absent_strategy(api, wished_lb): wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) response = api.delete(path=f"{api.api_path}/{target_lb['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting load-balancer [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting load-balancer [{response.status_code}: {response.json}]") wait_to_complete_state_transition(api=api, lb=target_lb) return changed, response.json -state_strategy = { - "present": present_strategy, - "absent": absent_strategy -} +state_strategy = {"present": present_strategy, "absent": absent_strategy} def core(module): @@ -325,30 +320,31 @@ def core(module): "name": module.params["name"], "description": module.params["description"], "tags": module.params["tags"], - "organization_id": module.params["organization_id"] + "organization_id": module.params["organization_id"], } - module.params['api_url'] = SCALEWAY_ENDPOINT + module.params["api_url"] = SCALEWAY_ENDPOINT api = Scaleway(module=module) api.api_path = f"lb/v1/regions/{region}/lbs" - changed, summary = state_strategy[wished_load_balancer["state"]](api=api, - wished_lb=wished_load_balancer) + changed, summary = state_strategy[wished_load_balancer["state"]](api=api, wished_lb=wished_load_balancer) module.exit_json(changed=changed, scaleway_lb=summary) def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - description=dict(required=True), - region=dict(required=True, choices=SCALEWAY_REGIONS), - state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", elements="str", default=[]), - organization_id=dict(required=True), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int", default=300), - wait_sleep_time=dict(type="int", default=3), - )) + argument_spec.update( + dict( + name=dict(required=True), + description=dict(required=True), + region=dict(required=True, choices=SCALEWAY_REGIONS), + state=dict(choices=list(state_strategy.keys()), default="present"), + tags=dict(type="list", elements="str", default=[]), + organization_id=dict(required=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -357,5 +353,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py index 57717f4c6ca..35ff636f5e2 100644 --- a/plugins/modules/scaleway_organization_info.py +++ b/plugins/modules/scaleway_organization_info.py @@ -75,22 +75,27 @@ from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec + Scaleway, + ScalewayException, + scaleway_argument_spec, ) class ScalewayOrganizationInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'organizations' + self.name = "organizations" def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) + argument_spec.update( + dict( + api_url=dict( + fallback=(env_fallback, ["SCW_API_URL"]), default="https://account.scaleway.com", aliases=["base_url"] + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, @@ -98,12 +103,10 @@ def main(): ) try: - module.exit_json( - scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() - ) + module.exit_json(scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py index bac81ebd551..74bdf27232d 100644 --- a/plugins/modules/scaleway_private_network.py +++ b/plugins/modules/scaleway_private_network.py @@ -117,52 +117,55 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) from ansible.module_utils.basic import AnsibleModule def get_private_network(api, name, page=1): page_size = 10 - response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size}) + response = api.get( + "private-networks", params={"name": name, "order_by": "name_asc", "page": page, "page_size": page_size} + ) if not response.ok: msg = f"Error during get private network creation: {response.info['msg']}: '{response.json['message']}' ({response.json})" api.module.fail_json(msg=msg) - if response.json['total_count'] == 0: + if response.json["total_count"] == 0: return None i = 0 - while i < len(response.json['private_networks']): - if response.json['private_networks'][i]['name'] == name: - return response.json['private_networks'][i] + while i < len(response.json["private_networks"]): + if response.json["private_networks"][i]["name"] == name: + return response.json["private_networks"][i] i += 1 # search on next page if needed - if (page * page_size) < response.json['total_count']: + if (page * page_size) < response.json["total_count"]: return get_private_network(api, name, page + 1) return None def present_strategy(api, wished_private_network): - changed = False - private_network = get_private_network(api, wished_private_network['name']) + private_network = get_private_network(api, wished_private_network["name"]) if private_network is not None: - if set(wished_private_network['tags']) == set(private_network['tags']): + if set(wished_private_network["tags"]) == set(private_network["tags"]): return changed, private_network else: # private network need to be updated - data = {'name': wished_private_network['name'], - 'tags': wished_private_network['tags'] - } + data = {"name": wished_private_network["name"], "tags": wished_private_network["tags"]} changed = True if api.module.check_mode: return changed, {"status": "private network would be updated"} response = api.patch(path=f"private-networks/{private_network['id']}", data=data) if not response.ok: - api.module.fail_json(msg=f'Error updating private network [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error updating private network [{response.status_code}: {response.json}]") return changed, response.json @@ -171,23 +174,23 @@ def present_strategy(api, wished_private_network): if api.module.check_mode: return changed, {"status": "private network would be created"} - data = {'name': wished_private_network['name'], - 'project_id': wished_private_network['project'], - 'tags': wished_private_network['tags'] - } + data = { + "name": wished_private_network["name"], + "project_id": wished_private_network["project"], + "tags": wished_private_network["tags"], + } - response = api.post(path='private-networks/', data=data) + response = api.post(path="private-networks/", data=data) if not response.ok: - api.module.fail_json(msg=f'Error creating private network [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error creating private network [{response.status_code}: {response.json}]") return changed, response.json def absent_strategy(api, wished_private_network): - changed = False - private_network = get_private_network(api, wished_private_network['name']) + private_network = get_private_network(api, wished_private_network["name"]) if private_network is None: return changed, {} @@ -198,21 +201,20 @@ def absent_strategy(api, wished_private_network): response = api.delete(f"private-networks/{private_network['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting private network [{response.status_code}: {response.json}]') + api.module.fail_json(msg=f"Error deleting private network [{response.status_code}: {response.json}]") return changed, response.json def core(module): - wished_private_network = { - "project": module.params['project'], - "tags": module.params['tags'], - "name": module.params['name'] + "project": module.params["project"], + "tags": module.params["tags"], + "name": module.params["name"], } region = module.params["region"] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"] api = Scaleway(module=module) if module.params["state"] == "absent": @@ -224,13 +226,15 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - project=dict(required=True), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - tags=dict(type="list", elements="str", default=[]), - name=dict() - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present"]), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + tags=dict(type="list", elements="str", default=[]), + name=dict(), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -239,5 +243,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py index 70980ce4a01..d614a53a64c 100644 --- a/plugins/modules/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -135,88 +135,93 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) from ansible.module_utils.basic import AnsibleModule from uuid import uuid4 def payload_from_security_group(security_group): - return { - k: v - for k, v in security_group.items() - if k != 'id' and v is not None - } + return {k: v for k, v in security_group.items() if k != "id" and v is not None} def present_strategy(api, security_group): - ret = {'changed': False} + ret = {"changed": False} - response = api.get('security_groups') + response = api.get("security_groups") if not response.ok: - api.module.fail_json(msg=f'Error getting security groups "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + api.module.fail_json( + msg=f'Error getting security groups "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) - security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} + security_group_lookup = {sg["name"]: sg for sg in response.json["security_groups"]} - if security_group['name'] not in security_group_lookup.keys(): - ret['changed'] = True + if security_group["name"] not in security_group_lookup.keys(): + ret["changed"] = True if api.module.check_mode: # Help user when check mode is enabled by defining id key - ret['scaleway_security_group'] = {'id': str(uuid4())} + ret["scaleway_security_group"] = {"id": str(uuid4())} return ret # Create Security Group - response = api.post('/security_groups', - data=payload_from_security_group(security_group)) + response = api.post("/security_groups", data=payload_from_security_group(security_group)) if not response.ok: msg = f'Error during security group creation: "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' api.module.fail_json(msg=msg) - ret['scaleway_security_group'] = response.json['security_group'] + ret["scaleway_security_group"] = response.json["security_group"] else: - ret['scaleway_security_group'] = security_group_lookup[security_group['name']] + ret["scaleway_security_group"] = security_group_lookup[security_group["name"]] return ret def absent_strategy(api, security_group): - response = api.get('security_groups') - ret = {'changed': False} + response = api.get("security_groups") + ret = {"changed": False} if not response.ok: - api.module.fail_json(msg=f'Error getting security groups "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + api.module.fail_json( + msg=f'Error getting security groups "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) - security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} - if security_group['name'] not in security_group_lookup.keys(): + security_group_lookup = {sg["name"]: sg for sg in response.json["security_groups"]} + if security_group["name"] not in security_group_lookup.keys(): return ret - ret['changed'] = True + ret["changed"] = True if api.module.check_mode: return ret response = api.delete(f"/security_groups/{security_group_lookup[security_group['name']]['id']}") if not response.ok: - api.module.fail_json(msg=f'Error deleting security group "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + api.module.fail_json( + msg=f'Error deleting security group "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) return ret def core(module): security_group = { - 'organization': module.params['organization'], - 'name': module.params['name'], - 'description': module.params['description'], - 'stateful': module.params['stateful'], - 'inbound_default_policy': module.params['inbound_default_policy'], - 'outbound_default_policy': module.params['outbound_default_policy'], - 'organization_default': module.params['organization_default'], + "organization": module.params["organization"], + "name": module.params["name"], + "description": module.params["description"], + "stateful": module.params["stateful"], + "inbound_default_policy": module.params["inbound_default_policy"], + "outbound_default_policy": module.params["outbound_default_policy"], + "organization_default": module.params["organization_default"], } - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + region = module.params["region"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] api = Scaleway(module=module) - if module.params['state'] == 'present': + if module.params["state"] == "present": summary = present_strategy(api=api, security_group=security_group) else: summary = absent_strategy(api=api, security_group=security_group) @@ -225,25 +230,27 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - organization=dict(type='str', required=True), - name=dict(type='str', required=True), - description=dict(type='str'), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - stateful=dict(type='bool', required=True), - inbound_default_policy=dict(type='str', choices=['accept', 'drop']), - outbound_default_policy=dict(type='str', choices=['accept', 'drop']), - organization_default=dict(type='bool'), - )) + argument_spec.update( + dict( + state=dict(type="str", default="present", choices=["absent", "present"]), + organization=dict(type="str", required=True), + name=dict(type="str", required=True), + description=dict(type="str"), + region=dict(type="str", required=True, choices=list(SCALEWAY_LOCATION.keys())), + stateful=dict(type="bool", required=True), + inbound_default_policy=dict(type="str", choices=["accept", "drop"]), + outbound_default_policy=dict(type="str", choices=["accept", "drop"]), + organization_default=dict(type="bool"), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] + required_if=[["stateful", True, ["inbound_default_policy", "outbound_default_policy"]]], ) core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py index 6bbbd9b62a6..55a8b4afb9d 100644 --- a/plugins/modules/scaleway_security_group_info.py +++ b/plugins/modules/scaleway_security_group_info.py @@ -93,32 +93,31 @@ class ScalewaySecurityGroupInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'security_groups' + self.name = "security_groups" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) try: - module.exit_json( - scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() - ) + module.exit_json(scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py index 9146f983d9c..ccd4e40ffb6 100644 --- a/plugins/modules/scaleway_security_group_rule.py +++ b/plugins/modules/scaleway_security_group_rule.py @@ -141,78 +141,89 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, + payload_from_object, +) from ansible.module_utils.basic import AnsibleModule def get_sgr_from_api(security_group_rules, security_group_rule): - """ Check if a security_group_rule specs are present in security_group_rules - Return None if no rules match the specs - Return the rule if found + """Check if a security_group_rule specs are present in security_group_rules + Return None if no rules match the specs + Return the rule if found """ for sgr in security_group_rules: - if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and - sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and - sgr['protocol'] == security_group_rule['protocol']): + if ( + sgr["ip_range"] == security_group_rule["ip_range"] + and sgr["dest_port_from"] == security_group_rule["dest_port_from"] + and sgr["direction"] == security_group_rule["direction"] + and sgr["action"] == security_group_rule["action"] + and sgr["protocol"] == security_group_rule["protocol"] + ): return sgr return None def present_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} + ret = {"changed": False} - response = api.get(f'security_groups/{security_group_id}/rules') + response = api.get(f"security_groups/{security_group_id}/rules") if not response.ok: api.module.fail_json( - msg=f'Error getting security group rules "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + msg=f'Error getting security group rules "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) + existing_rule = get_sgr_from_api(response.json["rules"], security_group_rule) if not existing_rule: - ret['changed'] = True + ret["changed"] = True if api.module.check_mode: return ret # Create Security Group Rule - response = api.post(f'/security_groups/{security_group_id}/rules', - data=payload_from_object(security_group_rule)) + response = api.post( + f"/security_groups/{security_group_id}/rules", data=payload_from_object(security_group_rule) + ) if not response.ok: api.module.fail_json( - msg=f'Error during security group rule creation: "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') - ret['scaleway_security_group_rule'] = response.json['rule'] + msg=f'Error during security group rule creation: "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) + ret["scaleway_security_group_rule"] = response.json["rule"] else: - ret['scaleway_security_group_rule'] = existing_rule + ret["scaleway_security_group_rule"] = existing_rule return ret def absent_strategy(api, security_group_id, security_group_rule): - ret = {'changed': False} + ret = {"changed": False} - response = api.get(f'security_groups/{security_group_id}/rules') + response = api.get(f"security_groups/{security_group_id}/rules") if not response.ok: api.module.fail_json( - msg=f'Error getting security group rules "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + msg=f'Error getting security group rules "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) - existing_rule = get_sgr_from_api( - response.json['rules'], security_group_rule) + existing_rule = get_sgr_from_api(response.json["rules"], security_group_rule) if not existing_rule: return ret - ret['changed'] = True + ret["changed"] = True if api.module.check_mode: return ret - response = api.delete( - f"/security_groups/{security_group_id}/rules/{existing_rule['id']}") + response = api.delete(f"/security_groups/{security_group_id}/rules/{existing_rule['id']}") if not response.ok: api.module.fail_json( - msg=f'Error deleting security group rule "{response.info["msg"]}": "{response.json["message"]}" ({response.json})') + msg=f'Error deleting security group rule "{response.info["msg"]}": "{response.json["message"]}" ({response.json})' + ) return ret @@ -221,40 +232,38 @@ def core(module): api = Scaleway(module=module) security_group_rule = { - 'protocol': module.params['protocol'], - 'dest_port_from': module.params['port'], - 'ip_range': module.params['ip_range'], - 'direction': module.params['direction'], - 'action': module.params['action'], + "protocol": module.params["protocol"], + "dest_port_from": module.params["port"], + "ip_range": module.params["ip_range"], + "direction": module.params["direction"], + "action": module.params["action"], } - region = module.params['region'] - module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + region = module.params["region"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] - if module.params['state'] == 'present': + if module.params["state"] == "present": summary = present_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) + api=api, security_group_id=module.params["security_group"], security_group_rule=security_group_rule + ) else: summary = absent_strategy( - api=api, - security_group_id=module.params['security_group'], - security_group_rule=security_group_rule) + api=api, security_group_id=module.params["security_group"], security_group_rule=security_group_rule + ) module.exit_json(**summary) def main(): argument_spec = scaleway_argument_spec() argument_spec.update( - state=dict(type='str', default='present', choices=['absent', 'present']), - region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), - protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), - port=dict(type='int', required=True), - ip_range=dict(type='str', default='0.0.0.0/0'), - direction=dict(type='str', required=True, choices=['inbound', 'outbound']), - action=dict(type='str', required=True, choices=['accept', 'drop']), - security_group=dict(type='str', required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + region=dict(type="str", required=True, choices=list(SCALEWAY_LOCATION.keys())), + protocol=dict(type="str", required=True, choices=["TCP", "UDP", "ICMP"]), + port=dict(type="int", required=True), + ip_range=dict(type="str", default="0.0.0.0/0"), + direction=dict(type="str", required=True, choices=["inbound", "outbound"]), + action=dict(type="str", required=True, choices=["accept", "drop"]), + security_group=dict(type="str", required=True), ) module = AnsibleModule( argument_spec=argument_spec, @@ -264,5 +273,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py index 85f23ee77e6..91eb42b2d23 100644 --- a/plugins/modules/scaleway_server_info.py +++ b/plugins/modules/scaleway_server_info.py @@ -175,20 +175,21 @@ class ScalewayServerInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'servers' + self.name = "servers" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, @@ -196,12 +197,10 @@ def main(): ) try: - module.exit_json( - scaleway_server_info=ScalewayServerInfo(module).get_resources() - ) + module.exit_json(scaleway_server_info=ScalewayServerInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py index df932ee6d13..a38f05b2271 100644 --- a/plugins/modules/scaleway_snapshot_info.py +++ b/plugins/modules/scaleway_snapshot_info.py @@ -88,25 +88,26 @@ Scaleway, ScalewayException, scaleway_argument_spec, - SCALEWAY_LOCATION + SCALEWAY_LOCATION, ) class ScalewaySnapshotInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'snapshots' + self.name = "snapshots" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, @@ -114,12 +115,10 @@ def main(): ) try: - module.exit_json( - scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() - ) + module.exit_json(scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py index f1e07cd8f29..d0707d8ca37 100644 --- a/plugins/modules/scaleway_sshkey.py +++ b/plugins/modules/scaleway_sshkey.py @@ -100,16 +100,15 @@ def extract_user_id(raw_organization_dict): def sshkey_user_patch(ssh_lookup): - ssh_list = {"ssh_public_keys": [{"key": key} - for key in ssh_lookup]} + ssh_list = {"ssh_public_keys": [{"key": key} for key in ssh_lookup]} return ssh_list def core(module): - ssh_pub_key = module.params['ssh_pub_key'] + ssh_pub_key = module.params["ssh_pub_key"] state = module.params["state"] account_api = Scaleway(module) - response = account_api.get('organizations') + response = account_api.get("organizations") status_code = response.status_code organization_json = response.json @@ -124,7 +123,7 @@ def core(module): except (KeyError, IndexError) as e: module.fail_json(changed=False, data="Error while extracting present SSH keys from API") - if state in ('present',): + if state in ("present",): if ssh_pub_key in present_sshkeys: module.exit_json(changed=False) @@ -135,14 +134,14 @@ def core(module): present_sshkeys.append(ssh_pub_key) payload = sshkey_user_patch(present_sshkeys) - response = account_api.patch(f'/users/{user_id}', data=payload) + response = account_api.patch(f"/users/{user_id}", data=payload) if response.ok: module.exit_json(changed=True, data=response.json) - module.fail_json(msg=f'Error creating ssh key [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error creating ssh key [{response.status_code}: {response.json}]") - elif state in ('absent',): + elif state in ("absent",): if ssh_pub_key not in present_sshkeys: module.exit_json(changed=False) @@ -152,21 +151,25 @@ def core(module): present_sshkeys.remove(ssh_pub_key) payload = sshkey_user_patch(present_sshkeys) - response = account_api.patch(f'/users/{user_id}', data=payload) + response = account_api.patch(f"/users/{user_id}", data=payload) if response.ok: module.exit_json(changed=True, data=response.json) - module.fail_json(msg=f'Error deleting ssh key [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error deleting ssh key [{response.status_code}: {response.json}]") def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - ssh_pub_key=dict(required=True), - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present"]), + ssh_pub_key=dict(required=True), + api_url=dict( + fallback=(env_fallback, ["SCW_API_URL"]), default="https://account.scaleway.com", aliases=["base_url"] + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -175,5 +178,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py index fa42370b0e5..674ac649d83 100644 --- a/plugins/modules/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -80,7 +80,11 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) def patch_user_data(compute_api, server_id, key, value): @@ -89,7 +93,7 @@ def patch_user_data(compute_api, server_id, key, value): path = f"servers/{server_id}/user_data/{key}" response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"}) if not response.ok: - msg = f'Error during user_data patching: {response.status_code} {response.body}' + msg = f"Error during user_data patching: {response.status_code} {response.body}" compute_api.module.fail_json(msg=msg) return response @@ -101,7 +105,7 @@ def delete_user_data(compute_api, server_id, key): response = compute_api.delete(path=f"servers/{server_id}/user_data/{key}") if not response.ok: - msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body + msg = "Error during user_data deleting: (%s) %s" % response.status_code, response.body compute_api.module.fail_json(msg=msg) return response @@ -113,7 +117,7 @@ def get_user_data(compute_api, server_id, key): path = f"servers/{server_id}/user_data/{key}" response = compute_api.get(path=path) if not response.ok: - msg = f'Error during user_data patching: {response.status_code} {response.body}' + msg = f"Error during user_data patching: {response.status_code} {response.body}" compute_api.module.fail_json(msg=msg) return response.json @@ -125,18 +129,17 @@ def core(module): user_data = module.params["user_data"] changed = False - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] compute_api = Scaleway(module=module) user_data_list = compute_api.get(path=f"servers/{server_id}/user_data") if not user_data_list.ok: - msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body + msg = "Error during user_data fetching: %s %s" % user_data_list.status_code, user_data_list.body compute_api.module.fail_json(msg=msg) present_user_data_keys = user_data_list.json["user_data"] present_user_data = { - key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) - for key in present_user_data_keys + key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) for key in present_user_data_keys } if present_user_data == user_data: @@ -145,7 +148,6 @@ def core(module): # First we remove keys that are not defined in the wished user_data for key in present_user_data: if key not in user_data: - changed = True if compute_api.module.check_mode: module.exit_json(changed=changed, msg={"status": f"User-data of {server_id} would be patched."}) @@ -155,7 +157,6 @@ def core(module): # Then we patch keys that are different for key, value in user_data.items(): if key not in present_user_data or value != present_user_data[key]: - changed = True if compute_api.module.check_mode: module.exit_json(changed=changed, msg={"status": f"User-data of {server_id} would be patched."}) @@ -167,11 +168,13 @@ def core(module): def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - user_data=dict(type="dict"), - server_id=dict(required=True), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + user_data=dict(type="dict"), + server_id=dict(required=True), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, @@ -180,5 +183,5 @@ def main(): core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py index 83f2e4e7319..91be7342814 100644 --- a/plugins/modules/scaleway_volume.py +++ b/plugins/modules/scaleway_volume.py @@ -118,22 +118,26 @@ } """ -from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_LOCATION, + scaleway_argument_spec, + Scaleway, +) from ansible.module_utils.basic import AnsibleModule def core(module): region = module.params["region"] - state = module.params['state'] - name = module.params['name'] - organization = module.params['organization'] - project = module.params['project'] - size = module.params['size'] - volume_type = module.params['volume_type'] - module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + state = module.params["state"] + name = module.params["name"] + organization = module.params["organization"] + project = module.params["project"] + size = module.params["size"] + volume_type = module.params["volume_type"] + module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] account_api = Scaleway(module) - response = account_api.get('volumes') + response = account_api.get("volumes") status_code = response.status_code volumes_json = response.json @@ -144,24 +148,24 @@ def core(module): module.fail_json(msg=f"Error getting volume [{status_code}: {response.json['message']}]") volumeByName = None - for volume in volumes_json['volumes']: - if volume['project'] == project and volume['name'] == name: + for volume in volumes_json["volumes"]: + if volume["project"] == project and volume["name"] == name: volumeByName = volume - if state in ('present',): + if state in ("present",): if volumeByName is not None: module.exit_json(changed=False) - payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type} + payload = {"name": name, "project": project, "size": size, "volume_type": volume_type} - response = account_api.post('/volumes', payload) + response = account_api.post("/volumes", payload) if response.ok: module.exit_json(changed=True, data=response.json) - module.fail_json(msg=f'Error creating volume [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error creating volume [{response.status_code}: {response.json}]") - elif state in ('absent',): + elif state in ("absent",): if volumeByName is None: module.exit_json(changed=False) @@ -172,33 +176,35 @@ def core(module): if response.status_code == 204: module.exit_json(changed=True, data=response.json) - module.fail_json(msg=f'Error deleting volume [{response.status_code}: {response.json}]') + module.fail_json(msg=f"Error deleting volume [{response.status_code}: {response.json}]") def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - state=dict(default='present', choices=['absent', 'present']), - name=dict(required=True), - size=dict(type='int'), - project=dict(), - organization=dict(), - volume_type=dict(), - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + state=dict(default="present", choices=["absent", "present"]), + name=dict(required=True), + size=dict(type="int"), + project=dict(), + organization=dict(), + volume_type=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ('organization', 'project'), + ("organization", "project"), ], required_one_of=[ - ('organization', 'project'), + ("organization", "project"), ], ) core(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py index 8e418968fa0..1293bec265f 100644 --- a/plugins/modules/scaleway_volume_info.py +++ b/plugins/modules/scaleway_volume_info.py @@ -83,25 +83,29 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, - SCALEWAY_LOCATION) + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) class ScalewayVolumeInfo(Scaleway): - def __init__(self, module): super().__init__(module) - self.name = 'volumes' + self.name = "volumes" region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + self.module.params["api_url"] = SCALEWAY_LOCATION[region]["api_endpoint"] def main(): argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) + argument_spec.update( + dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + ) + ) module = AnsibleModule( argument_spec=argument_spec, @@ -109,12 +113,10 @@ def main(): ) try: - module.exit_json( - scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() - ) + module.exit_json(scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()) except ScalewayException as exc: module.fail_json(msg=exc.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py index d1d399f8da9..9014471cb04 100644 --- a/plugins/modules/sefcontext.py +++ b/plugins/modules/sefcontext.py @@ -142,6 +142,7 @@ SELINUX_IMP_ERR = None try: import selinux + HAVE_SELINUX = True except ImportError: SELINUX_IMP_ERR = traceback.format_exc() @@ -150,6 +151,7 @@ SEOBJECT_IMP_ERR = None try: import seobject + HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() @@ -170,14 +172,14 @@ # Make backward compatible option_to_file_type_str = dict( - a='all files', - b='block device', - c='character device', - d='directory', - f='regular file', - l='symbolic link', - p='named pipe', - s='socket', + a="all files", + b="block device", + c="character device", + d="directory", + f="regular file", + l="symbolic link", + p="named pipe", + s="socket", ) @@ -186,7 +188,7 @@ def get_runtime_status(ignore_selinux_state=False): def semanage_fcontext_exists(sefcontext, target, ftype): - ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + """Get the SELinux file context mapping definition from policy. Return None if it does not exist.""" # Beware that records comprise of a string representation of the file_type record = (target, option_to_file_type_str[ftype]) @@ -198,16 +200,16 @@ def semanage_fcontext_exists(sefcontext, target, ftype): def semanage_fcontext_substitute_exists(sefcontext, target): - ''' Get the SELinux file context path substitution definition from policy. Return None if it does not exist. ''' + """Get the SELinux file context path substitution definition from policy. Return None if it does not exist.""" return sefcontext.equiv_dist.get(target, sefcontext.equiv.get(target)) -def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=''): - ''' Add or modify SELinux file context mapping definition to the policy. ''' +def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=""): + """Add or modify SELinux file context mapping definition to the policy.""" changed = False - prepared_diff = '' + prepared_diff = "" try: sefcontext = seobject.fcontextRecords(sestore) @@ -229,23 +231,25 @@ def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, changed = True if module._diff: - prepared_diff += '# Change to semanage file context mappings\n' - prepared_diff += f'-{target} {ftype} {orig_seuser}:{orig_serole}:{orig_setype}:{orig_serange}\n' - prepared_diff += f'+{target} {ftype} {seuser}:{orig_serole}:{setype}:{serange}\n' + prepared_diff += "# Change to semanage file context mappings\n" + prepared_diff += ( + f"-{target} {ftype} {orig_seuser}:{orig_serole}:{orig_setype}:{orig_serange}\n" + ) + prepared_diff += f"+{target} {ftype} {seuser}:{orig_serole}:{setype}:{serange}\n" else: # Add missing entry if seuser is None: - seuser = 'system_u' + seuser = "system_u" if serange is None: - serange = 's0' + serange = "s0" if not module.check_mode: sefcontext.add(target, setype, ftype, serange, seuser) changed = True if module._diff: - prepared_diff += '# Addition to semanage file context mappings\n' - prepared_diff += f'+{target} {ftype} {seuser}:object_r:{setype}:{serange}\n' + prepared_diff += "# Addition to semanage file context mappings\n" + prepared_diff += f"+{target} {ftype} {seuser}:object_r:{setype}:{serange}\n" else: exists = semanage_fcontext_substitute_exists(sefcontext, target) if exists: @@ -258,32 +262,32 @@ def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, changed = True if module._diff: - prepared_diff += '# Change to semanage file context path substitutions\n' - prepared_diff += f'-{target} = {orig_substitute}\n' - prepared_diff += f'+{target} = {substitute}\n' + prepared_diff += "# Change to semanage file context path substitutions\n" + prepared_diff += f"-{target} = {orig_substitute}\n" + prepared_diff += f"+{target} = {substitute}\n" else: # Add missing path substitution entry if not module.check_mode: sefcontext.add_equal(target, substitute) changed = True if module._diff: - prepared_diff += '# Addition to semanage file context path substitutions\n' - prepared_diff += f'+{target} = {substitute}\n' + prepared_diff += "# Addition to semanage file context path substitutions\n" + prepared_diff += f"+{target} = {substitute}\n" except Exception as e: module.fail_json(msg=f"{e.__class__.__name__}: {e}\n") if module._diff and prepared_diff: - result['diff'] = dict(prepared=prepared_diff) + result["diff"] = dict(prepared=prepared_diff) module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) -def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=''): - ''' Delete SELinux file context mapping definition from the policy. ''' +def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=""): + """Delete SELinux file context mapping definition from the policy.""" changed = False - prepared_diff = '' + prepared_diff = "" try: sefcontext = seobject.fcontextRecords(sestore) @@ -299,9 +303,13 @@ def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, changed = True if module._diff: - prepared_diff += '# Deletion to semanage file context mappings\n' - prepared_diff += f'-{target} {ftype} {exists[0]}:{exists[1]}:{exists[2]}:{exists[3]}\n' - if substitute_exists and setype is None and ((substitute is not None and substitute_exists == substitute) or substitute is None): + prepared_diff += "# Deletion to semanage file context mappings\n" + prepared_diff += f"-{target} {ftype} {exists[0]}:{exists[1]}:{exists[2]}:{exists[3]}\n" + if ( + substitute_exists + and setype is None + and ((substitute is not None and substitute_exists == substitute) or substitute is None) + ): # Remove existing path substitution entry orig_substitute = substitute_exists @@ -310,14 +318,14 @@ def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, changed = True if module._diff: - prepared_diff += '# Deletion to semanage file context path substitutions\n' - prepared_diff += f'-{target} = {orig_substitute}\n' + prepared_diff += "# Deletion to semanage file context path substitutions\n" + prepared_diff += f"-{target} = {orig_substitute}\n" except Exception as e: module.fail_json(msg=f"{e.__class__.__name__}: {e}\n") if module._diff and prepared_diff: - result['diff'] = dict(prepared=prepared_diff) + result["diff"] = dict(prepared=prepared_diff) module.exit_json(changed=changed, **result) @@ -325,26 +333,25 @@ def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, def main(): module = AnsibleModule( argument_spec=dict( - ignore_selinux_state=dict(type='bool', default=False), - target=dict(type='str', required=True, aliases=['path']), - ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())), - setype=dict(type='str'), - substitute=dict(type='str', aliases=['equal']), - seuser=dict(type='str'), - selevel=dict(type='str', aliases=['serange']), - state=dict(type='str', default='present', choices=['absent', 'present']), - reload=dict(type='bool', default=True), + ignore_selinux_state=dict(type="bool", default=False), + target=dict(type="str", required=True, aliases=["path"]), + ftype=dict(type="str", default="a", choices=list(option_to_file_type_str.keys())), + setype=dict(type="str"), + substitute=dict(type="str", aliases=["equal"]), + seuser=dict(type="str"), + selevel=dict(type="str", aliases=["serange"]), + state=dict(type="str", default="present", choices=["absent", "present"]), + reload=dict(type="bool", default=True), ), mutually_exclusive=[ - ('setype', 'substitute'), - ('substitute', 'ftype'), - ('substitute', 'seuser'), - ('substitute', 'selevel'), + ("setype", "substitute"), + ("substitute", "ftype"), + ("substitute", "seuser"), + ("substitute", "selevel"), ], required_if=[ - ('state', 'present', ('setype', 'substitute'), True), + ("state", "present", ("setype", "substitute"), True), ], - supports_check_mode=True, ) if not HAVE_SELINUX: @@ -353,29 +360,29 @@ def main(): if not HAVE_SEOBJECT: module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) - ignore_selinux_state = module.params['ignore_selinux_state'] + ignore_selinux_state = module.params["ignore_selinux_state"] if not get_runtime_status(ignore_selinux_state): module.fail_json(msg="SELinux is disabled on this host.") - target = module.params['target'] - ftype = module.params['ftype'] - setype = module.params['setype'] - substitute = module.params['substitute'] - seuser = module.params['seuser'] - serange = module.params['selevel'] - state = module.params['state'] - do_reload = module.params['reload'] + target = module.params["target"] + ftype = module.params["ftype"] + setype = module.params["setype"] + substitute = module.params["substitute"] + seuser = module.params["seuser"] + serange = module.params["selevel"] + state = module.params["state"] + do_reload = module.params["reload"] result = dict(target=target, ftype=ftype, setype=setype, substitute=substitute, state=state) - if state == 'present': + if state == "present": semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser) - elif state == 'absent': + elif state == "absent": semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload) else: module.fail_json(msg=f'Invalid value of argument "state": {state}') -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py index 64d77e33cf0..2e743d17917 100644 --- a/plugins/modules/selinux_permissive.py +++ b/plugins/modules/selinux_permissive.py @@ -63,6 +63,7 @@ SEOBJECT_IMP_ERR = None try: import seobject + HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() @@ -74,24 +75,23 @@ def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str', required=True, aliases=['name']), - store=dict(type='str', default=''), - permissive=dict(type='bool', required=True), - no_reload=dict(type='bool', default=False), + domain=dict(type="str", required=True, aliases=["name"]), + store=dict(type="str", default=""), + permissive=dict(type="bool", required=True), + no_reload=dict(type="bool", default=False), ), supports_check_mode=True, ) # global vars changed = False - store = module.params['store'] - permissive = module.params['permissive'] - domain = module.params['domain'] - no_reload = module.params['no_reload'] + store = module.params["store"] + permissive = module.params["permissive"] + domain = module.params["domain"] + no_reload = module.params["no_reload"] if not HAVE_SEOBJECT: - module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"), - exception=SEOBJECT_IMP_ERR) + module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) try: permissive_domains = seobject.permissiveRecords(store) @@ -99,7 +99,7 @@ def main(): module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) # not supported on EL 6 - if 'set_reload' in dir(permissive_domains): + if "set_reload" in dir(permissive_domains): permissive_domains.set_reload(not no_reload) try: @@ -124,9 +124,8 @@ def main(): module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) changed = True - module.exit_json(changed=changed, store=store, - permissive=permissive, domain=domain) + module.exit_json(changed=changed, store=store, permissive=permissive, domain=domain) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py index 7b0144893e1..fcd24f0a06a 100644 --- a/plugins/modules/selogin.py +++ b/plugins/modules/selogin.py @@ -92,6 +92,7 @@ SELINUX_IMP_ERR = None try: import selinux + HAVE_SELINUX = True except ImportError: SELINUX_IMP_ERR = traceback.format_exc() @@ -100,6 +101,7 @@ SEOBJECT_IMP_ERR = None try: import seobject + HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() @@ -109,8 +111,8 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib -def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): - """ Add linux user to SELinux user mapping +def semanage_login_add(module, login, seuser, do_reload, serange="s0", sestore=""): + """Add linux user to SELinux user mapping :type module: AnsibleModule :param module: Ansible module @@ -156,8 +158,8 @@ def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=' return change -def semanage_login_del(module, login, seuser, do_reload, sestore=''): - """ Delete linux user to SELinux user mapping +def semanage_login_del(module, login, seuser, do_reload, sestore=""): + """Delete linux user to SELinux user mapping :type module: AnsibleModule :param module: Ansible module @@ -201,17 +203,15 @@ def get_runtime_status(ignore_selinux_state=False): def main(): module = AnsibleModule( argument_spec=dict( - ignore_selinux_state=dict(type='bool', default=False), - login=dict(type='str', required=True), - seuser=dict(type='str'), - selevel=dict(type='str', aliases=['serange'], default='s0'), - state=dict(type='str', default='present', choices=['absent', 'present']), - reload=dict(type='bool', default=True), + ignore_selinux_state=dict(type="bool", default=False), + login=dict(type="str", required=True), + seuser=dict(type="str"), + selevel=dict(type="str", aliases=["serange"], default="s0"), + state=dict(type="str", default="present", choices=["absent", "present"]), + reload=dict(type="bool", default=True), ), - required_if=[ - ["state", "present", ["seuser"]] - ], - supports_check_mode=True + required_if=[["state", "present", ["seuser"]]], + supports_check_mode=True, ) if not HAVE_SELINUX: module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) @@ -219,33 +219,33 @@ def main(): if not HAVE_SEOBJECT: module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) - ignore_selinux_state = module.params['ignore_selinux_state'] + ignore_selinux_state = module.params["ignore_selinux_state"] if not get_runtime_status(ignore_selinux_state): module.fail_json(msg="SELinux is disabled on this host.") - login = module.params['login'] - seuser = module.params['seuser'] - serange = module.params['selevel'] - state = module.params['state'] - do_reload = module.params['reload'] + login = module.params["login"] + seuser = module.params["seuser"] + serange = module.params["selevel"] + state = module.params["state"] + do_reload = module.params["reload"] result = { - 'login': login, - 'seuser': seuser, - 'serange': serange, - 'state': state, + "login": login, + "seuser": seuser, + "serange": serange, + "state": state, } - if state == 'present': - result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) - elif state == 'absent': - result['changed'] = semanage_login_del(module, login, seuser, do_reload) + if state == "present": + result["changed"] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == "absent": + result["changed"] = semanage_login_del(module, login, seuser, do_reload) else: module.fail_json(msg=f'Invalid value of argument "state": {state}') module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py index cb67f3ad5f5..e6676a49365 100644 --- a/plugins/modules/sendgrid.py +++ b/plugins/modules/sendgrid.py @@ -132,6 +132,7 @@ SENDGRID_IMP_ERR = None try: import sendgrid + HAS_SENDGRID = True except ImportError: SENDGRID_IMP_ERR = traceback.format_exc() @@ -142,26 +143,39 @@ from ansible.module_utils.urls import fetch_url -def post_sendgrid_api(module, username, password, from_address, to_addresses, - subject, body, api_key=None, cc=None, bcc=None, attachments=None, - html_body=False, from_name=None, headers=None): - +def post_sendgrid_api( + module, + username, + password, + from_address, + to_addresses, + subject, + body, + api_key=None, + cc=None, + bcc=None, + attachments=None, + html_body=False, + from_name=None, + headers=None, +): if not HAS_SENDGRID: SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" AGENT = "Ansible" - data = {'api_user': username, 'api_key': password, - 'from': from_address, 'subject': subject, 'text': body} + data = {"api_user": username, "api_key": password, "from": from_address, "subject": subject, "text": body} encoded_data = urlencode(data) - to_addresses_api = '' + to_addresses_api = "" for recipient in to_addresses: - recipient = to_bytes(recipient, errors='surrogate_or_strict') - to_addresses_api += f'&to[]={recipient}' + recipient = to_bytes(recipient, errors="surrogate_or_strict") + to_addresses_api += f"&to[]={recipient}" encoded_data += to_addresses_api - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json'} - return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + headers = { + "User-Agent": AGENT, + "Content-type": "application/x-www-form-urlencoded", + "Accept": "application/json", + } + return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method="POST") else: # Remove this check when adding Sendgrid API v3 support if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"): @@ -194,7 +208,7 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses, message.add_attachment(name, f) if from_name: - message.set_from(f'{from_name} <{from_address}.') + message.set_from(f"{from_name} <{from_address}.") else: message.set_from(from_address) @@ -204,6 +218,8 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses, message.set_text(body) return sg.send(message) + + # ======================================= # Main # @@ -215,53 +231,62 @@ def main(): username=dict(), password=dict(no_log=True), api_key=dict(no_log=True), - bcc=dict(type='list', elements='str'), - cc=dict(type='list', elements='str'), - headers=dict(type='dict'), + bcc=dict(type="list", elements="str"), + cc=dict(type="list", elements="str"), + headers=dict(type="dict"), from_address=dict(required=True), from_name=dict(), - to_addresses=dict(required=True, type='list', elements='str'), + to_addresses=dict(required=True, type="list", elements="str"), subject=dict(required=True), body=dict(required=True), - html_body=dict(default=False, type='bool'), - attachments=dict(type='list', elements='path') + html_body=dict(default=False, type="bool"), + attachments=dict(type="list", elements="path"), ), supports_check_mode=True, - mutually_exclusive=[ - ['api_key', 'password'], - ['api_key', 'username'] - ], - required_together=[['username', 'password']], + mutually_exclusive=[["api_key", "password"], ["api_key", "username"]], + required_together=[["username", "password"]], ) - username = module.params['username'] - password = module.params['password'] - api_key = module.params['api_key'] - bcc = module.params['bcc'] - cc = module.params['cc'] - headers = module.params['headers'] - from_name = module.params['from_name'] - from_address = module.params['from_address'] - to_addresses = module.params['to_addresses'] - subject = module.params['subject'] - body = module.params['body'] - html_body = module.params['html_body'] - attachments = module.params['attachments'] + username = module.params["username"] + password = module.params["password"] + api_key = module.params["api_key"] + bcc = module.params["bcc"] + cc = module.params["cc"] + headers = module.params["headers"] + from_name = module.params["from_name"] + from_address = module.params["from_address"] + to_addresses = module.params["to_addresses"] + subject = module.params["subject"] + body = module.params["body"] + html_body = module.params["html_body"] + attachments = module.params["attachments"] sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: - reason = 'when using any of the following arguments: ' \ - 'api_key, bcc, cc, headers, from_name, html_body, attachments' - module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), - exception=SENDGRID_IMP_ERR) - - response, info = post_sendgrid_api(module, username, password, - from_address, to_addresses, subject, body, attachments=attachments, - bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) + reason = ( + "when using any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments" + ) + module.fail_json(msg=missing_required_lib("sendgrid", reason=reason), exception=SENDGRID_IMP_ERR) + + response, info = post_sendgrid_api( + module, + username, + password, + from_address, + to_addresses, + subject, + body, + attachments=attachments, + bcc=bcc, + cc=cc, + headers=headers, + html_body=html_body, + api_key=api_key, + ) if not HAS_SENDGRID: - if info['status'] != 200: + if info["status"] != 200: module.fail_json(msg=f"unable to send email through SendGrid API: {info['msg']}") else: if response != 200: @@ -270,5 +295,5 @@ def main(): module.exit_json(msg=subject, changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py index 2c85af87657..0432fd93a20 100644 --- a/plugins/modules/sensu_check.py +++ b/plugins/modules/sensu_check.py @@ -187,137 +187,143 @@ from ansible.module_utils.common.text.converters import to_native -def sensu_check(module, path, name, state='present', backup=False): +def sensu_check(module, path, name, state="present", backup=False): changed = False reasons = [] stream = None try: try: - stream = open(path, 'r') + stream = open(path, "r") config = json.load(stream) except IOError as e: if e.errno == 2: # File not found, non-fatal - if state == 'absent': - reasons.append('file did not exist and state is `absent\'') + if state == "absent": + reasons.append("file did not exist and state is `absent'") return changed, reasons config = {} else: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except ValueError: - msg = f'{path} contains invalid JSON' + msg = f"{path} contains invalid JSON" module.fail_json(msg=msg) finally: if stream: stream.close() - if 'checks' not in config: - if state == 'absent': - reasons.append('`checks\' section did not exist and state is `absent\'') + if "checks" not in config: + if state == "absent": + reasons.append("`checks' section did not exist and state is `absent'") return changed, reasons - config['checks'] = {} + config["checks"] = {} changed = True - reasons.append('`checks\' section did not exist') + reasons.append("`checks' section did not exist") - if state == 'absent': - if name in config['checks']: - del config['checks'][name] + if state == "absent": + if name in config["checks"]: + del config["checks"][name] changed = True - reasons.append('check was present and state is `absent\'') + reasons.append("check was present and state is `absent'") - if state == 'present': - if name not in config['checks']: + if state == "present": + if name not in config["checks"]: check = {} - config['checks'][name] = check + config["checks"][name] = check changed = True - reasons.append('check was absent and state is `present\'') + reasons.append("check was absent and state is `present'") else: - check = config['checks'][name] - simple_opts = ['command', - 'handlers', - 'subscribers', - 'interval', - 'timeout', - 'ttl', - 'handle', - 'dependencies', - 'standalone', - 'publish', - 'occurrences', - 'refresh', - 'aggregate', - 'low_flap_threshold', - 'high_flap_threshold', - 'source', - ] + check = config["checks"][name] + simple_opts = [ + "command", + "handlers", + "subscribers", + "interval", + "timeout", + "ttl", + "handle", + "dependencies", + "standalone", + "publish", + "occurrences", + "refresh", + "aggregate", + "low_flap_threshold", + "high_flap_threshold", + "source", + ] for opt in simple_opts: if module.params[opt] is not None: if opt not in check or check[opt] != module.params[opt]: check[opt] = module.params[opt] changed = True - reasons.append(f'`{opt}\' did not exist or was different') + reasons.append(f"`{opt}' did not exist or was different") else: if opt in check: del check[opt] changed = True - reasons.append(f'`{opt}\' was removed') + reasons.append(f"`{opt}' was removed") - if module.params['custom']: + if module.params["custom"]: # Convert to json - custom_params = module.params['custom'] - overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) + custom_params = module.params["custom"] + overwrited_fields = set(custom_params.keys()) & set( + simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"] + ) if overwrited_fields: msg = f'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {list(overwrited_fields)}' module.fail_json(msg=msg) for k, v in custom_params.items(): - if k in config['checks'][name]: - if not config['checks'][name][k] == v: + if k in config["checks"][name]: + if not config["checks"][name][k] == v: changed = True - reasons.append(f'`custom param {k}\' was changed') + reasons.append(f"`custom param {k}' was changed") else: changed = True - reasons.append(f'`custom param {k}\' was added') + reasons.append(f"`custom param {k}' was added") check[k] = v simple_opts += custom_params.keys() # Remove obsolete custom params - for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): + for opt in set(config["checks"][name].keys()) - set( + simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"] + ): changed = True - reasons.append(f'`custom param {opt}\' was deleted') + reasons.append(f"`custom param {opt}' was deleted") del check[opt] - if module.params['metric']: - if 'type' not in check or check['type'] != 'metric': - check['type'] = 'metric' + if module.params["metric"]: + if "type" not in check or check["type"] != "metric": + check["type"] = "metric" changed = True - reasons.append('`type\' was not defined or not `metric\'') - if not module.params['metric'] and 'type' in check: - del check['type'] + reasons.append("`type' was not defined or not `metric'") + if not module.params["metric"] and "type" in check: + del check["type"] changed = True - reasons.append('`type\' was defined') - - if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: - subdue = {'begin': module.params['subdue_begin'], - 'end': module.params['subdue_end'], - } - if 'subdue' not in check or check['subdue'] != subdue: - check['subdue'] = subdue + reasons.append("`type' was defined") + + if module.params["subdue_begin"] is not None and module.params["subdue_end"] is not None: + subdue = { + "begin": module.params["subdue_begin"], + "end": module.params["subdue_end"], + } + if "subdue" not in check or check["subdue"] != subdue: + check["subdue"] = subdue changed = True - reasons.append('`subdue\' did not exist or was different') + reasons.append("`subdue' did not exist or was different") else: - if 'subdue' in check: - del check['subdue'] + if "subdue" in check: + del check["subdue"] changed = True - reasons.append('`subdue\' was removed') + reasons.append("`subdue' was removed") if changed and not module.check_mode: if backup: module.backup_local(path) try: try: - stream = open(path, 'w') - stream.write(json.dumps(config, indent=2) + '\n') + stream = open(path, "w") + stream.write(json.dumps(config, indent=2) + "\n") except IOError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) finally: @@ -328,50 +334,48 @@ def sensu_check(module, path, name, state='present', backup=False): def main(): - - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': False}, - 'command': {'type': 'str'}, - 'handlers': {'type': 'list', 'elements': 'str'}, - 'subscribers': {'type': 'list', 'elements': 'str'}, - 'interval': {'type': 'int'}, - 'timeout': {'type': 'int'}, - 'ttl': {'type': 'int'}, - 'handle': {'type': 'bool'}, - 'subdue_begin': {'type': 'str'}, - 'subdue_end': {'type': 'str'}, - 'dependencies': {'type': 'list', 'elements': 'str'}, - 'metric': {'type': 'bool', 'default': False}, - 'standalone': {'type': 'bool'}, - 'publish': {'type': 'bool'}, - 'occurrences': {'type': 'int'}, - 'refresh': {'type': 'int'}, - 'aggregate': {'type': 'bool'}, - 'low_flap_threshold': {'type': 'int'}, - 'high_flap_threshold': {'type': 'int'}, - 'custom': {'type': 'dict'}, - 'source': {'type': 'str'}, - } - - required_together = [['subdue_begin', 'subdue_end']] - - module = AnsibleModule(argument_spec=arg_spec, - required_together=required_together, - supports_check_mode=True) - if module.params['state'] != 'absent' and module.params['command'] is None: - module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) - - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] + arg_spec = { + "name": {"type": "str", "required": True}, + "path": {"type": "str", "default": "/etc/sensu/conf.d/checks.json"}, + "state": {"type": "str", "default": "present", "choices": ["present", "absent"]}, + "backup": {"type": "bool", "default": False}, + "command": {"type": "str"}, + "handlers": {"type": "list", "elements": "str"}, + "subscribers": {"type": "list", "elements": "str"}, + "interval": {"type": "int"}, + "timeout": {"type": "int"}, + "ttl": {"type": "int"}, + "handle": {"type": "bool"}, + "subdue_begin": {"type": "str"}, + "subdue_end": {"type": "str"}, + "dependencies": {"type": "list", "elements": "str"}, + "metric": {"type": "bool", "default": False}, + "standalone": {"type": "bool"}, + "publish": {"type": "bool"}, + "occurrences": {"type": "int"}, + "refresh": {"type": "int"}, + "aggregate": {"type": "bool"}, + "low_flap_threshold": {"type": "int"}, + "high_flap_threshold": {"type": "int"}, + "custom": {"type": "dict"}, + "source": {"type": "str"}, + } + + required_together = [["subdue_begin", "subdue_end"]] + + module = AnsibleModule(argument_spec=arg_spec, required_together=required_together, supports_check_mode=True) + if module.params["state"] != "absent" and module.params["command"] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(["command"])) + + path = module.params["path"] + name = module.params["name"] + state = module.params["state"] + backup = module.params["backup"] changed, reasons = sensu_check(module, path, name, state, backup) - module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + module.exit_json(path=path, changed=changed, msg="OK", name=name, reasons=reasons) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py index b54bdc864d9..c469c3682eb 100644 --- a/plugins/modules/sensu_client.py +++ b/plugins/modules/sensu_client.py @@ -180,97 +180,106 @@ def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', ), - address=dict(type='str', ), - subscriptions=dict(type='list', elements="str"), - safe_mode=dict(type='bool', default=False), - redact=dict(type='list', elements="str"), - socket=dict(type='dict'), - keepalives=dict(type='bool', default=True), - keepalive=dict(type='dict'), - registration=dict(type='dict'), - deregister=dict(type='bool'), - deregistration=dict(type='dict'), - ec2=dict(type='dict'), - chef=dict(type='dict'), - puppet=dict(type='dict'), - servicenow=dict(type='dict') + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict( + type="str", + ), + address=dict( + type="str", + ), + subscriptions=dict(type="list", elements="str"), + safe_mode=dict(type="bool", default=False), + redact=dict(type="list", elements="str"), + socket=dict(type="dict"), + keepalives=dict(type="bool", default=True), + keepalive=dict(type="dict"), + registration=dict(type="dict"), + deregister=dict(type="bool"), + deregistration=dict(type="dict"), + ec2=dict(type="dict"), + chef=dict(type="dict"), + puppet=dict(type="dict"), + servicenow=dict(type="dict"), ), - required_if=[ - ['state', 'present', ['subscriptions']] - ] + required_if=[["state", "present", ["subscriptions"]]], ) - state = module.params['state'] + state = module.params["state"] path = "/etc/sensu/conf.d/client.json" - if state == 'absent': + if state == "absent": if os.path.exists(path): if module.check_mode: - msg = f'{path} would have been deleted' + msg = f"{path} would have been deleted" module.exit_json(msg=msg, changed=True) else: try: os.remove(path) - msg = f'{path} deleted successfully' + msg = f"{path} deleted successfully" module.exit_json(msg=msg, changed=True) except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) + msg = "Exception when trying to delete {path}: {exception}" + module.fail_json(msg=msg.format(path=path, exception=str(e))) else: # Idempotency: it is okay if the file doesn't exist - msg = f'{path} already does not exist' + msg = f"{path} already does not exist" module.exit_json(msg=msg) # Build client configuration from module arguments - config = {'client': {}} - args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', - 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', - 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] + config = {"client": {}} + args = [ + "name", + "address", + "subscriptions", + "safe_mode", + "redact", + "socket", + "keepalives", + "keepalive", + "registration", + "deregister", + "deregistration", + "ec2", + "chef", + "puppet", + "servicenow", + ] for arg in args: if arg in module.params and module.params[arg] is not None: - config['client'][arg] = module.params[arg] + config["client"][arg] = module.params[arg] # Load the current config, if there is one, so we can compare current_config = None try: - current_config = json.load(open(path, 'r')) + current_config = json.load(open(path, "r")) except (IOError, ValueError): # File either doesn't exist or it is invalid JSON pass if current_config is not None and current_config == config: # Config is the same, let's not change anything - module.exit_json(msg='Client configuration is already up to date', - config=config['client'], - file=path) + module.exit_json(msg="Client configuration is already up to date", config=config["client"], file=path) # Validate that directory exists before trying to write to it if not module.check_mode and not os.path.exists(os.path.dirname(path)): try: os.makedirs(os.path.dirname(path)) except OSError as e: - module.fail_json(msg=f'Unable to create {os.path.dirname(path)}: {e}') + module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}") if module.check_mode: - module.exit_json(msg='Client configuration would have been updated', - changed=True, - config=config['client'], - file=path) + module.exit_json( + msg="Client configuration would have been updated", changed=True, config=config["client"], file=path + ) try: - with open(path, 'w') as client: + with open(path, "w") as client: client.write(json.dumps(config, indent=4)) - module.exit_json(msg='Client configuration updated', - changed=True, - config=config['client'], - file=path) + module.exit_json(msg="Client configuration updated", changed=True, config=config["client"], file=path) except (OSError, IOError) as e: - module.fail_json(msg=f'Unable to write file {path}: {e}') + module.fail_json(msg=f"Unable to write file {path}: {e}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py index 272b2e72bde..6a2b044116d 100644 --- a/plugins/modules/sensu_handler.py +++ b/plugins/modules/sensu_handler.py @@ -181,104 +181,113 @@ def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - name=dict(type='str', required=True), - type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), - filter=dict(type='str'), - filters=dict(type='list', elements='str'), - severities=dict(type='list', elements='str'), - mutator=dict(type='str'), - timeout=dict(type='int', default=10), - handle_silenced=dict(type='bool', default=False), - handle_flapping=dict(type='bool', default=False), - command=dict(type='str'), - socket=dict(type='dict'), - pipe=dict(type='dict'), - handlers=dict(type='list', elements='str'), + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(type="str", required=True), + type=dict(type="str", choices=["pipe", "tcp", "udp", "transport", "set"]), + filter=dict(type="str"), + filters=dict(type="list", elements="str"), + severities=dict(type="list", elements="str"), + mutator=dict(type="str"), + timeout=dict(type="int", default=10), + handle_silenced=dict(type="bool", default=False), + handle_flapping=dict(type="bool", default=False), + command=dict(type="str"), + socket=dict(type="dict"), + pipe=dict(type="dict"), + handlers=dict(type="list", elements="str"), ), required_if=[ - ['state', 'present', ['type']], - ['type', 'pipe', ['command']], - ['type', 'tcp', ['socket']], - ['type', 'udp', ['socket']], - ['type', 'transport', ['pipe']], - ['type', 'set', ['handlers']] - ] + ["state", "present", ["type"]], + ["type", "pipe", ["command"]], + ["type", "tcp", ["socket"]], + ["type", "udp", ["socket"]], + ["type", "transport", ["pipe"]], + ["type", "set", ["handlers"]], + ], ) - state = module.params['state'] - name = module.params['name'] - path = f'/etc/sensu/conf.d/handlers/{name}.json' + state = module.params["state"] + name = module.params["name"] + path = f"/etc/sensu/conf.d/handlers/{name}.json" - if state == 'absent': + if state == "absent": if os.path.exists(path): if module.check_mode: - msg = f'{path} would have been deleted' + msg = f"{path} would have been deleted" module.exit_json(msg=msg, changed=True) else: try: os.remove(path) - msg = f'{path} deleted successfully' + msg = f"{path} deleted successfully" module.exit_json(msg=msg, changed=True) except OSError as e: - msg = 'Exception when trying to delete {path}: {exception}' - module.fail_json( - msg=msg.format(path=path, exception=str(e))) + msg = "Exception when trying to delete {path}: {exception}" + module.fail_json(msg=msg.format(path=path, exception=str(e))) else: # Idempotency: it is okay if the file doesn't exist - msg = f'{path} already does not exist' + msg = f"{path} already does not exist" module.exit_json(msg=msg) # Build handler configuration from module arguments - config = {'handlers': {name: {}}} - args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', - 'handle_silenced', 'handle_flapping', 'command', 'socket', - 'pipe', 'handlers'] + config = {"handlers": {name: {}}} + args = [ + "type", + "filter", + "filters", + "severities", + "mutator", + "timeout", + "handle_silenced", + "handle_flapping", + "command", + "socket", + "pipe", + "handlers", + ] for arg in args: if arg in module.params and module.params[arg] is not None: - config['handlers'][name][arg] = module.params[arg] + config["handlers"][name][arg] = module.params[arg] # Load the current config, if there is one, so we can compare current_config = None try: - current_config = json.load(open(path, 'r')) + current_config = json.load(open(path, "r")) except (IOError, ValueError): # File either doesn't exist or it is invalid JSON pass if current_config is not None and current_config == config: # Config is the same, let's not change anything - module.exit_json(msg='Handler configuration is already up to date', - config=config['handlers'][name], - file=path, - name=name) + module.exit_json( + msg="Handler configuration is already up to date", config=config["handlers"][name], file=path, name=name + ) # Validate that directory exists before trying to write to it if not module.check_mode and not os.path.exists(os.path.dirname(path)): try: os.makedirs(os.path.dirname(path)) except OSError as e: - module.fail_json(msg=f'Unable to create {os.path.dirname(path)}: {e}') + module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}") if module.check_mode: - module.exit_json(msg='Handler configuration would have been updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) + module.exit_json( + msg="Handler configuration would have been updated", + changed=True, + config=config["handlers"][name], + file=path, + name=name, + ) try: - with open(path, 'w') as handler: + with open(path, "w") as handler: handler.write(json.dumps(config, indent=4)) - module.exit_json(msg='Handler configuration updated', - changed=True, - config=config['handlers'][name], - file=path, - name=name) + module.exit_json( + msg="Handler configuration updated", changed=True, config=config["handlers"][name], file=path, name=name + ) except (OSError, IOError) as e: - module.fail_json(msg=f'Unable to write file {path}: {e}') + module.fail_json(msg=f"Unable to write file {path}: {e}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py index f3270ab506a..9e4fa7e1af9 100644 --- a/plugins/modules/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -109,14 +109,14 @@ def query(module, url, check, subscription): headers = { - 'Content-Type': 'application/json', + "Content-Type": "application/json", } - url = url + '/silenced' + url = url + "/silenced" request_data = { - 'check': check, - 'subscription': subscription, + "check": check, + "subscription": subscription, } # Remove keys with None value @@ -124,15 +124,10 @@ def query(module, url, check, subscription): if v is None: del request_data[k] - response, info = fetch_url( - module, url, method='GET', - headers=headers, data=json.dumps(request_data) - ) + response, info = fetch_url(module, url, method="GET", headers=headers, data=json.dumps(request_data)) - if info['status'] == 500: - module.fail_json( - msg="Failed to query silence %s. Reason: %s" % (subscription, info) - ) + if info["status"] == 500: + module.fail_json(msg="Failed to query silence %s. Reason: %s" % (subscription, info)) try: json_out = json.loads(to_native(response.read())) @@ -146,10 +141,10 @@ def clear(module, url, check, subscription): # Test if silence exists before clearing (rc, out, changed) = query(module, url, check, subscription) - d = {i['subscription']: i['check'] for i in out} + d = {i["subscription"]: i["check"] for i in out} subscription_exists = subscription in d if check and subscription_exists: - exists = (check == d[subscription]) + exists = check == d[subscription] else: exists = subscription_exists @@ -161,14 +156,14 @@ def clear(module, url, check, subscription): # module.check_mode is inherited from the AnsibleMOdule class if not module.check_mode: headers = { - 'Content-Type': 'application/json', + "Content-Type": "application/json", } - url = url + '/silenced/clear' + url = url + "/silenced/clear" request_data = { - 'check': check, - 'subscription': subscription, + "check": check, + "subscription": subscription, } # Remove keys with None value @@ -176,15 +171,10 @@ def clear(module, url, check, subscription): if v is None: del request_data[k] - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) + response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data)) - if info['status'] != 204: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % (subscription, info) - ) + if info["status"] != 204: + module.fail_json(msg="Failed to silence %s. Reason: %s" % (subscription, info)) try: json_out = json.loads(to_native(response.read())) @@ -195,44 +185,34 @@ def clear(module, url, check, subscription): return False, out, True -def create( - module, url, check, creator, expire, - expire_on_resolve, reason, subscription): +def create(module, url, check, creator, expire, expire_on_resolve, reason, subscription): (rc, out, changed) = query(module, url, check, subscription) for i in out: - if i['subscription'] == subscription: + if i["subscription"] == subscription: if ( - (check is None or check == i['check']) and - ( - creator == '' or - creator == i['creator']) and - ( - reason == '' or - reason == i['reason']) and - ( - expire is None or expire == i['expire']) and - ( - expire_on_resolve is None or - expire_on_resolve == i['expire_on_resolve'] - ) + (check is None or check == i["check"]) + and (creator == "" or creator == i["creator"]) + and (reason == "" or reason == i["reason"]) + and (expire is None or expire == i["expire"]) + and (expire_on_resolve is None or expire_on_resolve == i["expire_on_resolve"]) ): return False, out, False # module.check_mode is inherited from the AnsibleMOdule class if not module.check_mode: headers = { - 'Content-Type': 'application/json', + "Content-Type": "application/json", } - url = url + '/silenced' + url = url + "/silenced" request_data = { - 'check': check, - 'creator': creator, - 'expire': expire, - 'expire_on_resolve': expire_on_resolve, - 'reason': reason, - 'subscription': subscription, + "check": check, + "creator": creator, + "expire": expire, + "expire_on_resolve": expire_on_resolve, + "reason": reason, + "subscription": subscription, } # Remove keys with None value @@ -240,16 +220,10 @@ def create( if v is None: del request_data[k] - response, info = fetch_url( - module, url, method='POST', - headers=headers, data=json.dumps(request_data) - ) + response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data)) - if info['status'] != 201: - module.fail_json( - msg="Failed to silence %s. Reason: %s" % - (subscription, info['msg']) - ) + if info["status"] != 201: + module.fail_json(msg="Failed to silence %s. Reason: %s" % (subscription, info["msg"])) try: json_out = json.loads(to_native(response.read())) @@ -265,32 +239,29 @@ def main(): argument_spec=dict( check=dict(), creator=dict(), - expire=dict(type='int'), - expire_on_resolve=dict(type='bool'), + expire=dict(type="int"), + expire_on_resolve=dict(type="bool"), reason=dict(), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), subscription=dict(required=True), - url=dict(default='http://127.0.01:4567'), + url=dict(default="http://127.0.01:4567"), ), - supports_check_mode=True + supports_check_mode=True, ) - url = module.params['url'] - check = module.params['check'] - creator = module.params['creator'] - expire = module.params['expire'] - expire_on_resolve = module.params['expire_on_resolve'] - reason = module.params['reason'] - subscription = module.params['subscription'] - state = module.params['state'] - - if state == 'present': - (rc, out, changed) = create( - module, url, check, creator, - expire, expire_on_resolve, reason, subscription - ) - - if state == 'absent': + url = module.params["url"] + check = module.params["check"] + creator = module.params["creator"] + expire = module.params["expire"] + expire_on_resolve = module.params["expire_on_resolve"] + reason = module.params["reason"] + subscription = module.params["subscription"] + state = module.params["state"] + + if state == "present": + (rc, out, changed) = create(module, url, check, creator, expire, expire_on_resolve, reason, subscription) + + if state == "absent": (rc, out, changed) = clear(module, url, check, subscription) if rc != 0: @@ -298,5 +269,5 @@ def main(): module.exit_json(msg="success", result=out, changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py index 9117fdb7b51..3ac22bced1f 100644 --- a/plugins/modules/sensu_subscription.py +++ b/plugins/modules/sensu_subscription.py @@ -78,7 +78,7 @@ from ansible.module_utils.common.text.converters import to_native -def sensu_subscription(module, path, name, state='present', backup=False): +def sensu_subscription(module, path, name, state="present", backup=False): changed = False reasons = [] @@ -86,42 +86,42 @@ def sensu_subscription(module, path, name, state='present', backup=False): config = json.load(open(path)) except IOError as e: if e.errno == 2: # File not found, non-fatal - if state == 'absent': + if state == "absent": reasons.append("file did not exist and state is 'absent'") return changed, reasons config = {} else: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except ValueError: - msg = f'{path} contains invalid JSON' + msg = f"{path} contains invalid JSON" module.fail_json(msg=msg) - if 'client' not in config: - if state == 'absent': + if "client" not in config: + if state == "absent": reasons.append("'client' did not exist and state is 'absent'") return changed, reasons - config['client'] = {} + config["client"] = {} changed = True reasons.append("'client' did not exist") - if 'subscriptions' not in config['client']: - if state == 'absent': + if "subscriptions" not in config["client"]: + if state == "absent": reasons.append("'client.subscriptions' did not exist and state is 'absent'") return changed, reasons - config['client']['subscriptions'] = [] + config["client"]["subscriptions"] = [] changed = True reasons.append("'client.subscriptions' did not exist") - if name not in config['client']['subscriptions']: - if state == 'absent': + if name not in config["client"]["subscriptions"]: + if state == "absent": reasons.append("channel subscription was absent") return changed, reasons - config['client']['subscriptions'].append(name) + config["client"]["subscriptions"].append(name) changed = True reasons.append("channel subscription was absent and state is 'present'") else: - if state == 'absent': - config['client']['subscriptions'].remove(name) + if state == "absent": + config["client"]["subscriptions"].remove(name) changed = True reasons.append("channel subscription was present and state is 'absent'") @@ -129,33 +129,34 @@ def sensu_subscription(module, path, name, state='present', backup=False): if backup: module.backup_local(path) try: - open(path, 'w').write(json.dumps(config, indent=2) + '\n') + open(path, "w").write(json.dumps(config, indent=2) + "\n") except IOError as e: - module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), - exception=traceback.format_exc()) + module.fail_json( + msg="Failed to write to file %s: %s" % (path, to_native(e)), exception=traceback.format_exc() + ) return changed, reasons def main(): - arg_spec = {'name': {'type': 'str', 'required': True}, - 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, - 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': False}, - } + arg_spec = { + "name": {"type": "str", "required": True}, + "path": {"type": "str", "default": "/etc/sensu/conf.d/subscriptions.json"}, + "state": {"type": "str", "default": "present", "choices": ["present", "absent"]}, + "backup": {"type": "bool", "default": False}, + } - module = AnsibleModule(argument_spec=arg_spec, - supports_check_mode=True) + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - path = module.params['path'] - name = module.params['name'] - state = module.params['state'] - backup = module.params['backup'] + path = module.params["path"] + name = module.params["name"] + state = module.params["state"] + backup = module.params["backup"] changed, reasons = sensu_subscription(module, path, name, state, backup) - module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) + module.exit_json(path=path, name=name, changed=changed, msg="OK", reasons=reasons) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/seport.py b/plugins/modules/seport.py index 5f03d65e2c1..28c4ffae1de 100644 --- a/plugins/modules/seport.py +++ b/plugins/modules/seport.py @@ -114,6 +114,7 @@ SELINUX_IMP_ERR = None try: import selinux + HAVE_SELINUX = True except ImportError: SELINUX_IMP_ERR = traceback.format_exc() @@ -122,6 +123,7 @@ SEOBJECT_IMP_ERR = None try: import seobject + HAVE_SEOBJECT = True except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() @@ -135,7 +137,7 @@ def get_runtime_status(ignore_selinux_state=False): def semanage_port_get_ports(seport, setype, proto, local): - """ Get the list of ports that have the specified type definition. + """Get the list of ports that have the specified type definition. :param community.general.seport: Instance of seobject.portRecords @@ -156,7 +158,7 @@ def semanage_port_get_ports(seport, setype, proto, local): def semanage_port_get_type(seport, port, proto): - """ Get the SELinux type of the specified port. + """Get the SELinux type of the specified port. :param community.general.seport: Instance of seobject.portRecords @@ -170,7 +172,7 @@ def semanage_port_get_type(seport, port, proto): :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found. """ if isinstance(port, str): - ports = port.split('-', 1) + ports = port.split("-", 1) if len(ports) == 1: ports.extend(ports) else: @@ -182,8 +184,8 @@ def semanage_port_get_type(seport, port, proto): return records.get(key) -def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore='', local=False): - """ Add SELinux port type definition to the policy. +def semanage_port_add(module, ports, proto, setype, do_reload, serange="s0", sestore="", local=False): + """Add SELinux port type definition to the policy. :type module: AnsibleModule :param module: Ansible module @@ -233,8 +235,8 @@ def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', ses return change -def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local=False): - """ Delete SELinux port type definition from the policy. +def semanage_port_del(module, ports, proto, setype, do_reload, sestore="", local=False): + """Delete SELinux port type definition from the policy. :type module: AnsibleModule :param module: Ansible module @@ -277,13 +279,13 @@ def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local def main(): module = AnsibleModule( argument_spec=dict( - ignore_selinux_state=dict(type='bool', default=False), - ports=dict(type='list', elements='str', required=True), - proto=dict(type='str', required=True, choices=['tcp', 'udp']), - setype=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - reload=dict(type='bool', default=True), - local=dict(type='bool', default=False) + ignore_selinux_state=dict(type="bool", default=False), + ports=dict(type="list", elements="str", required=True), + proto=dict(type="str", required=True, choices=["tcp", "udp"]), + setype=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + reload=dict(type="bool", default=True), + local=dict(type="bool", default=False), ), supports_check_mode=True, ) @@ -294,34 +296,34 @@ def main(): if not HAVE_SEOBJECT: module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) - ignore_selinux_state = module.params['ignore_selinux_state'] + ignore_selinux_state = module.params["ignore_selinux_state"] if not get_runtime_status(ignore_selinux_state): module.fail_json(msg="SELinux is disabled on this host.") - ports = module.params['ports'] - proto = module.params['proto'] - setype = module.params['setype'] - state = module.params['state'] - do_reload = module.params['reload'] - local = module.params['local'] + ports = module.params["ports"] + proto = module.params["proto"] + setype = module.params["setype"] + state = module.params["state"] + do_reload = module.params["reload"] + local = module.params["local"] result = { - 'ports': ports, - 'proto': proto, - 'setype': setype, - 'state': state, + "ports": ports, + "proto": proto, + "setype": setype, + "state": state, } - if state == 'present': - result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload, local=local) - elif state == 'absent': - result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload, local=local) + if state == "present": + result["changed"] = semanage_port_add(module, ports, proto, setype, do_reload, local=local) + elif state == "absent": + result["changed"] = semanage_port_del(module, ports, proto, setype, do_reload, local=local) else: module.fail_json(msg=f'Invalid value of argument "state": {state}') module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py index 5f3572e0222..92766d29f5f 100644 --- a/plugins/modules/serverless.py +++ b/plugins/modules/serverless.py @@ -122,6 +122,7 @@ try: import yaml + HAS_YAML = True except ImportError: HAS_YAML = False @@ -130,8 +131,8 @@ def read_serverless_config(module): - path = module.params.get('service_path') - full_path = os.path.join(path, 'serverless.yml') + path = module.params.get("service_path") + full_path = os.path.join(path, "serverless.yml") try: with open(full_path) as sls_config: @@ -143,7 +144,7 @@ def read_serverless_config(module): def get_service_name(module, stage): config = read_serverless_config(module) - if config.get('service') is None: + if config.get("service") is None: module.fail_json(msg="Could not read `service` key from serverless.yml file") if stage: @@ -155,66 +156,68 @@ def get_service_name(module, stage): def main(): module = AnsibleModule( argument_spec=dict( - service_path=dict(type='path', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - region=dict(type='str', default=''), - stage=dict(type='str', default=''), - deploy=dict(type='bool', default=True), - serverless_bin_path=dict(type='path'), - force=dict(type='bool', default=False), - verbose=dict(type='bool', default=False), + service_path=dict(type="path", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + region=dict(type="str", default=""), + stage=dict(type="str", default=""), + deploy=dict(type="bool", default=True), + serverless_bin_path=dict(type="path"), + force=dict(type="bool", default=False), + verbose=dict(type="bool", default=False), ), ) if not HAS_YAML: - module.fail_json(msg='yaml is required for this module') + module.fail_json(msg="yaml is required for this module") - service_path = module.params.get('service_path') - state = module.params.get('state') - region = module.params.get('region') - stage = module.params.get('stage') - deploy = module.params.get('deploy', True) - force = module.params.get('force', False) - verbose = module.params.get('verbose', False) - serverless_bin_path = module.params.get('serverless_bin_path') + service_path = module.params.get("service_path") + state = module.params.get("state") + region = module.params.get("region") + stage = module.params.get("stage") + deploy = module.params.get("deploy", True) + force = module.params.get("force", False) + verbose = module.params.get("verbose", False) + serverless_bin_path = module.params.get("serverless_bin_path") if serverless_bin_path is not None: command = f"{serverless_bin_path} " else: command = f"{module.get_bin_path('serverless')} " - if state == 'present': - command += 'deploy ' - elif state == 'absent': - command += 'remove ' + if state == "present": + command += "deploy " + elif state == "absent": + command += "remove " else: module.fail_json(msg=f"State must either be 'present' or 'absent'. Received: {state}") - if state == 'present': + if state == "present": if not deploy: - command += '--noDeploy ' + command += "--noDeploy " elif force: - command += '--force ' + command += "--force " if region: - command += f'--region {region} ' + command += f"--region {region} " if stage: - command += f'--stage {stage} ' + command += f"--stage {stage} " if verbose: - command += '--verbose ' + command += "--verbose " rc, out, err = module.run_command(command, cwd=service_path) if rc != 0: - if state == 'absent' and f"-{stage}' does not exist" in out: - module.exit_json(changed=False, state='absent', command=command, - out=out, service_name=get_service_name(module, stage)) + if state == "absent" and f"-{stage}' does not exist" in out: + module.exit_json( + changed=False, state="absent", command=command, out=out, service_name=get_service_name(module, stage) + ) module.fail_json(msg=f"Failure when executing Serverless command. Exited {rc}.\nstdout: {out}\nstderr: {err}") # gather some facts about the deployment - module.exit_json(changed=True, state='present', out=out, command=command, - service_name=get_service_name(module, stage)) + module.exit_json( + changed=True, state="present", out=out, command=command, service_name=get_service_name(module, stage) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py index 642bcc0797d..9b8c49eb6a3 100644 --- a/plugins/modules/simpleinit_msb.py +++ b/plugins/modules/simpleinit_msb.py @@ -86,9 +86,9 @@ class SimpleinitMSB: def __init__(self, module): self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - self.enable = module.params['enabled'] + self.name = module.params["name"] + self.state = module.params["state"] + self.enable = module.params["enabled"] self.changed = False self.running = None self.action = None @@ -112,54 +112,54 @@ def check_service_changed(self): elif self.state == "restarted": self.svc_change = True if self.module.check_mode and self.svc_change: - self.module.exit_json(changed=True, msg='service state changed') + self.module.exit_json(changed=True, msg="service state changed") def modify_service_state(self): # Only do something if state will change if self.svc_change: # Control service - if self.state in ['started', 'running']: + if self.state in ["started", "running"]: self.action = "start" - elif not self.running and self.state == 'reloaded': + elif not self.running and self.state == "reloaded": self.action = "start" - elif self.state == 'stopped': + elif self.state == "stopped": self.action = "stop" - elif self.state == 'reloaded': + elif self.state == "reloaded": self.action = "reload" - elif self.state == 'restarted': + elif self.state == "restarted": self.action = "restart" if self.module.check_mode: - self.module.exit_json(changed=True, msg='changing service state') + self.module.exit_json(changed=True, msg="changing service state") return self.service_control() else: # If nothing needs to change just say all is well rc = 0 - err = '' - out = '' + err = "" + out = "" return rc, out, err def get_service_tools(self): - paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin'] - binaries = ['telinit'] + paths = ["/sbin", "/usr/sbin", "/bin", "/usr/bin"] + binaries = ["telinit"] location = dict() for binary in binaries: location[binary] = self.module.get_bin_path(binary, opt_dirs=paths) - if location.get('telinit', False) and os.path.exists("/etc/init.d/smgl_init"): - self.telinit_cmd = location['telinit'] + if location.get("telinit", False) and os.path.exists("/etc/init.d/smgl_init"): + self.telinit_cmd = location["telinit"] if self.telinit_cmd is None: - self.module.fail_json(msg='cannot find telinit script for simpleinit-msb, aborting...') + self.module.fail_json(msg="cannot find telinit script for simpleinit-msb, aborting...") def get_service_status(self): self.action = "status" rc, status_stdout, status_stderr = self.service_control() - if self.running is None and status_stdout.count('\n') <= 1: - cleanout = status_stdout.lower().replace(self.name.lower(), '') + if self.running is None and status_stdout.count("\n") <= 1: + cleanout = status_stdout.lower().replace(self.name.lower(), "") if "is not running" in cleanout: self.running = False @@ -180,10 +180,10 @@ def service_enable(self): self.changed = True for line in err.splitlines(): - if self.enable and line.find('already enabled') != -1: + if self.enable and line.find("already enabled") != -1: self.changed = False break - if not self.enable and line.find('already disabled') != -1: + if not self.enable and line.find("already disabled") != -1: self.changed = False break @@ -199,7 +199,7 @@ def service_enabled(self): service_enabled = False if self.enable else True - rex = re.compile(rf'^{self.name}$') + rex = re.compile(rf"^{self.name}$") for line in out.splitlines(): if rex.match(line): @@ -213,7 +213,7 @@ def service_exists(self): service_exists = False - rex = re.compile(rf'^\w+\s+{self.name}$') + rex = re.compile(rf"^\w+\s+{self.name}$") for line in out.splitlines(): if rex.match(line): @@ -221,7 +221,7 @@ def service_exists(self): break if not service_exists: - self.module.fail_json(msg=f'telinit could not find the requested service: {self.name}') + self.module.fail_json(msg=f"telinit could not find the requested service: {self.name}") def service_control(self): self.service_exists() @@ -236,12 +236,12 @@ def service_control(self): def build_module(): return AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['service']), - state=dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), - enabled=dict(type='bool'), + name=dict(required=True, aliases=["service"]), + state=dict(choices=["running", "started", "stopped", "restarted", "reloaded"]), + enabled=dict(type="bool"), ), supports_check_mode=True, - required_one_of=[['state', 'enabled']], + required_one_of=[["state", "enabled"]], ) @@ -251,25 +251,25 @@ def main(): service = SimpleinitMSB(module) rc = 0 - out = '' - err = '' + out = "" + err = "" result = {} - result['name'] = service.name + result["name"] = service.name # Find service management tools service.get_service_tools() # Enable/disable service startup at boot if requested - if service.module.params['enabled'] is not None: + if service.module.params["enabled"] is not None: service.service_enable() - result['enabled'] = service.enable + result["enabled"] = service.enable - if module.params['state'] is None: + if module.params["state"] is None: # Not changing the running state, so bail out now. - result['changed'] = service.changed + result["changed"] = service.changed module.exit_json(**result) - result['state'] = service.state + result["state"] = service.state service.get_service_status() @@ -285,28 +285,28 @@ def main(): else: module.fail_json(msg=out) - result['changed'] = service.changed | service.svc_change - if service.module.params['enabled'] is not None: - result['enabled'] = service.module.params['enabled'] + result["changed"] = service.changed | service.svc_change + if service.module.params["enabled"] is not None: + result["enabled"] = service.module.params["enabled"] - if not service.module.params['state']: + if not service.module.params["state"]: status = service.get_service_status() if status is None: - result['state'] = 'absent' + result["state"] = "absent" elif status is False: - result['state'] = 'started' + result["state"] = "started" else: - result['state'] = 'stopped' + result["state"] = "stopped" else: # as we may have just bounced the service the service command may not # report accurate state at this moment so just show what we ran - if service.module.params['state'] in ['started', 'restarted', 'running', 'reloaded']: - result['state'] = 'started' + if service.module.params["state"] in ["started", "restarted", "running", "reloaded"]: + result["state"] = "started" else: - result['state'] = 'stopped' + result["state"] = "stopped" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py index 9ae0def5c47..0861ab1a5df 100644 --- a/plugins/modules/sl_vm.py +++ b/plugins/modules/sl_vm.py @@ -289,11 +289,48 @@ # TODO: get this info from API -STATES = ['present', 'absent'] -DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', - 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', - 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', - 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] +STATES = ["present", "absent"] +DATACENTERS = [ + "ams01", + "ams03", + "che01", + "dal01", + "dal05", + "dal06", + "dal09", + "dal10", + "dal12", + "dal13", + "fra02", + "fra04", + "fra05", + "hkg02", + "hou02", + "lon02", + "lon04", + "lon06", + "mel01", + "mex01", + "mil01", + "mon01", + "osl01", + "par01", + "sao01", + "sea01", + "seo01", + "sjc01", + "sjc03", + "sjc04", + "sng01", + "syd01", + "syd04", + "tok02", + "tor01", + "wdc01", + "wdc04", + "wdc06", + "wdc07", +] CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] INITIALDISK_SIZES = [25, 100] @@ -303,52 +340,51 @@ def create_virtual_instance(module): - instances = vsManager.list_instances( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - datacenter=module.params.get('datacenter') + hostname=module.params.get("hostname"), + domain=module.params.get("domain"), + datacenter=module.params.get("datacenter"), ) if instances: return False, None # Check if OS or Image Template is provided (Can't be both, defaults to OS) - if module.params.get('os_code') is not None and module.params.get('os_code') != '': - module.params['image_id'] = '' - elif module.params.get('image_id') is not None and module.params.get('image_id') != '': - module.params['os_code'] = '' - module.params['disks'] = [] # Blank out disks since it will use the template + if module.params.get("os_code") is not None and module.params.get("os_code") != "": + module.params["image_id"] = "" + elif module.params.get("image_id") is not None and module.params.get("image_id") != "": + module.params["os_code"] = "" + module.params["disks"] = [] # Blank out disks since it will use the template else: return False, None - tags = module.params.get('tags') + tags = module.params.get("tags") if isinstance(tags, list): - tags = ','.join(map(str, module.params.get('tags'))) + tags = ",".join(map(str, module.params.get("tags"))) instance = vsManager.create_instance( - hostname=module.params.get('hostname'), - domain=module.params.get('domain'), - cpus=module.params.get('cpus'), - memory=module.params.get('memory'), - flavor=module.params.get('flavor'), - hourly=module.params.get('hourly'), - datacenter=module.params.get('datacenter'), - os_code=module.params.get('os_code'), - image_id=module.params.get('image_id'), - local_disk=module.params.get('local_disk'), - disks=module.params.get('disks'), - ssh_keys=module.params.get('ssh_keys'), - nic_speed=module.params.get('nic_speed'), - private=module.params.get('private'), - public_vlan=module.params.get('public_vlan'), - private_vlan=module.params.get('private_vlan'), - dedicated=module.params.get('dedicated'), - post_uri=module.params.get('post_uri'), + hostname=module.params.get("hostname"), + domain=module.params.get("domain"), + cpus=module.params.get("cpus"), + memory=module.params.get("memory"), + flavor=module.params.get("flavor"), + hourly=module.params.get("hourly"), + datacenter=module.params.get("datacenter"), + os_code=module.params.get("os_code"), + image_id=module.params.get("image_id"), + local_disk=module.params.get("local_disk"), + disks=module.params.get("disks"), + ssh_keys=module.params.get("ssh_keys"), + nic_speed=module.params.get("nic_speed"), + private=module.params.get("private"), + public_vlan=module.params.get("public_vlan"), + private_vlan=module.params.get("private_vlan"), + dedicated=module.params.get("dedicated"), + post_uri=module.params.get("post_uri"), tags=tags, ) - if instance is not None and instance['id'] > 0: + if instance is not None and instance["id"] > 0: return True, instance else: return False, None @@ -357,7 +393,7 @@ def create_virtual_instance(module): def wait_for_instance(module, id): instance = None completed = False - wait_timeout = time.time() + module.params.get('wait_time') + wait_timeout = time.time() + module.params.get("wait_time") while not completed and wait_timeout > time.time(): try: completed = vsManager.wait_for_ready(id, 10, 2) @@ -371,19 +407,23 @@ def wait_for_instance(module, id): def cancel_instance(module): canceled = True - if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): - tags = module.params.get('tags') + if module.params.get("instance_id") is None and ( + module.params.get("tags") or module.params.get("hostname") or module.params.get("domain") + ): + tags = module.params.get("tags") if isinstance(tags, str): - tags = [module.params.get('tags')] - instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) + tags = [module.params.get("tags")] + instances = vsManager.list_instances( + tags=tags, hostname=module.params.get("hostname"), domain=module.params.get("domain") + ) for instance in instances: try: - vsManager.cancel_instance(instance['id']) + vsManager.cancel_instance(instance["id"]) except Exception: canceled = False - elif module.params.get('instance_id') and module.params.get('instance_id') != 0: + elif module.params.get("instance_id") and module.params.get("instance_id") != 0: try: - vsManager.cancel_instance(instance['id']) + vsManager.cancel_instance(instance["id"]) except Exception: canceled = False else: @@ -393,48 +433,47 @@ def cancel_instance(module): def main(): - module = AnsibleModule( argument_spec=dict( - instance_id=dict(type='str'), - hostname=dict(type='str'), - domain=dict(type='str'), - datacenter=dict(type='str', choices=DATACENTERS), - tags=dict(type='str'), - hourly=dict(type='bool', default=True), - private=dict(type='bool', default=False), - dedicated=dict(type='bool', default=False), - local_disk=dict(type='bool', default=True), - cpus=dict(type='int', choices=CPU_SIZES), - memory=dict(type='int', choices=MEMORY_SIZES), - flavor=dict(type='str'), - disks=dict(type='list', elements='int', default=[25]), - os_code=dict(type='str'), - image_id=dict(type='str'), - nic_speed=dict(type='int', choices=NIC_SPEEDS), - public_vlan=dict(type='str'), - private_vlan=dict(type='str'), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - post_uri=dict(type='str'), - state=dict(type='str', default='present', choices=STATES), - wait=dict(type='bool', default=True), - wait_time=dict(type='int', default=600), + instance_id=dict(type="str"), + hostname=dict(type="str"), + domain=dict(type="str"), + datacenter=dict(type="str", choices=DATACENTERS), + tags=dict(type="str"), + hourly=dict(type="bool", default=True), + private=dict(type="bool", default=False), + dedicated=dict(type="bool", default=False), + local_disk=dict(type="bool", default=True), + cpus=dict(type="int", choices=CPU_SIZES), + memory=dict(type="int", choices=MEMORY_SIZES), + flavor=dict(type="str"), + disks=dict(type="list", elements="int", default=[25]), + os_code=dict(type="str"), + image_id=dict(type="str"), + nic_speed=dict(type="int", choices=NIC_SPEEDS), + public_vlan=dict(type="str"), + private_vlan=dict(type="str"), + ssh_keys=dict(type="list", elements="str", default=[], no_log=False), + post_uri=dict(type="str"), + state=dict(type="str", default="present", choices=STATES), + wait=dict(type="bool", default=True), + wait_time=dict(type="int", default=600), ) ) if not HAS_SL: - module.fail_json(msg='softlayer python library required for this module') + module.fail_json(msg="softlayer python library required for this module") - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": (changed, instance) = cancel_instance(module) - elif module.params.get('state') == 'present': + elif module.params.get("state") == "present": (changed, instance) = create_virtual_instance(module) - if module.params.get('wait') is True and instance: - (changed, instance) = wait_for_instance(module, instance['id']) + if module.params.get("wait") is True and instance: + (changed, instance) = wait_for_instance(module, instance["id"]) module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py index fb96c783cab..78eb6c4f842 100644 --- a/plugins/modules/slack.py +++ b/plugins/modules/slack.py @@ -269,22 +269,22 @@ from ansible.module_utils.urls import fetch_url from urllib.parse import urlencode -OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' -SLACK_INCOMING_WEBHOOK = 'https://hooks.%s/services/%s' -SLACK_POSTMESSAGE_WEBAPI = 'https://%s/api/chat.postMessage' -SLACK_UPDATEMESSAGE_WEBAPI = 'https://%s/api/chat.update' -SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://%s/api/conversations.history' +OLD_SLACK_INCOMING_WEBHOOK = "https://%s/services/hooks/incoming-webhook?token=%s" +SLACK_INCOMING_WEBHOOK = "https://hooks.%s/services/%s" +SLACK_POSTMESSAGE_WEBAPI = "https://%s/api/chat.postMessage" +SLACK_UPDATEMESSAGE_WEBAPI = "https://%s/api/chat.update" +SLACK_CONVERSATIONS_HISTORY_WEBAPI = "https://%s/api/conversations.history" # Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. # We do not escape other characters used as Slack metacharacters (e.g. &, <, >). escape_table = { - '"': "\"", - "'": "\'", + '"': '"', + "'": "'", } def is_valid_hex_color(color_choice): - if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): + if re.match(r"^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$", color_choice): return True return False @@ -310,8 +310,21 @@ def recursive_escape_quotes(obj, keys): return escaped -def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id, prepend_hash): +def build_payload_for_slack( + text, + channel, + thread_id, + username, + icon_url, + icon_emoji, + link_names, + parse, + color, + attachments, + blocks, + message_id, + prepend_hash, +): payload = {} if color == "normal" and text is not None: payload = dict(text=escape_quotes(text)) @@ -319,175 +332,178 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) if channel is not None: - if prepend_hash == 'auto': - if channel.startswith(('#', '@', 'C0', 'GF', 'G0', 'CP')): - payload['channel'] = channel + if prepend_hash == "auto": + if channel.startswith(("#", "@", "C0", "GF", "G0", "CP")): + payload["channel"] = channel else: - payload['channel'] = f"#{channel}" - elif prepend_hash == 'always': - payload['channel'] = f"#{channel}" - elif prepend_hash == 'never': - payload['channel'] = channel + payload["channel"] = f"#{channel}" + elif prepend_hash == "always": + payload["channel"] = f"#{channel}" + elif prepend_hash == "never": + payload["channel"] = channel if thread_id is not None: - payload['thread_ts'] = thread_id + payload["thread_ts"] = thread_id if username is not None: - payload['username'] = username + payload["username"] = username if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji + payload["icon_emoji"] = icon_emoji else: - payload['icon_url'] = icon_url + payload["icon_url"] = icon_url if link_names is not None: - payload['link_names'] = link_names + payload["link_names"] = link_names if parse is not None: - payload['parse'] = parse + payload["parse"] = parse if message_id is not None: - payload['ts'] = message_id + payload["ts"] = message_id if attachments is not None: - if 'attachments' not in payload: - payload['attachments'] = [] + if "attachments" not in payload: + payload["attachments"] = [] if attachments is not None: attachment_keys_to_escape = [ - 'title', - 'text', - 'author_name', - 'pretext', - 'fallback', + "title", + "text", + "author_name", + "pretext", + "fallback", ] for attachment in attachments: for key in attachment_keys_to_escape: if key in attachment: attachment[key] = escape_quotes(attachment[key]) - if 'fallback' not in attachment: - attachment['fallback'] = attachment['text'] + if "fallback" not in attachment: + attachment["fallback"] = attachment["text"] - payload['attachments'].append(attachment) + payload["attachments"].append(attachment) if blocks is not None: - block_keys_to_escape = [ - 'text', - 'alt_text' - ] - payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape) + block_keys_to_escape = ["text", "alt_text"] + payload["blocks"] = recursive_escape_quotes(blocks, block_keys_to_escape) return payload def validate_slack_domain(domain): - return (domain if domain in ('slack.com', 'slack-gov.com') else 'slack.com') + return domain if domain in ("slack.com", "slack-gov.com") else "slack.com" def get_slack_message(module, domain, token, channel, ts): headers = { - 'Content-Type': 'application/json; charset=UTF-8', - 'Accept': 'application/json', - 'Authorization': f"Bearer {token}" + "Content-Type": "application/json; charset=UTF-8", + "Accept": "application/json", + "Authorization": f"Bearer {token}", } - qs = urlencode({ - 'channel': channel, - 'ts': ts, - 'limit': 1, - 'inclusive': 'true', - }) + qs = urlencode( + { + "channel": channel, + "ts": ts, + "limit": 1, + "inclusive": "true", + } + ) domain = validate_slack_domain(domain) url = f"{SLACK_CONVERSATIONS_HISTORY_WEBAPI % domain}?{qs}" - response, info = fetch_url(module=module, url=url, headers=headers, method='GET') - if info['status'] != 200: + response, info = fetch_url(module=module, url=url, headers=headers, method="GET") + if info["status"] != 200: module.fail_json(msg="failed to get slack message") data = module.from_json(response.read()) - if data.get('ok') is False: + if data.get("ok") is False: module.fail_json(msg=f"failed to get slack message: {data}") - if len(data['messages']) < 1: + if len(data["messages"]) < 1: module.fail_json(msg=f"no messages matching ts: {ts}") - if len(data['messages']) > 1: + if len(data["messages"]) > 1: module.fail_json(msg=f"more than 1 message matching ts: {ts}") - return data['messages'][0] + return data["messages"][0] def do_notify_slack(module, domain, token, payload): use_webapi = False - if token.count('/') >= 2: + if token.count("/") >= 2: # New style webhook token domain = validate_slack_domain(domain) slack_uri = SLACK_INCOMING_WEBHOOK % (domain, token) - elif re.match(r'^xox[abp]-\S+$', token): + elif re.match(r"^xox[abp]-\S+$", token): domain = validate_slack_domain(domain) - slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI) % domain + slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if "ts" in payload else SLACK_POSTMESSAGE_WEBAPI) % domain use_webapi = True else: if not domain: - module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " - "XXXX/YYYY/ZZZZ in your playbook") + module.fail_json( + msg="Slack has updated its webhook API. You need to specify a token of the form " + "XXXX/YYYY/ZZZZ in your playbook" + ) slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) headers = { - 'Content-Type': 'application/json; charset=UTF-8', - 'Accept': 'application/json', + "Content-Type": "application/json; charset=UTF-8", + "Accept": "application/json", } if use_webapi: - headers['Authorization'] = f"Bearer {token}" + headers["Authorization"] = f"Bearer {token}" data = module.jsonify(payload) - response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data) + response, info = fetch_url(module=module, url=slack_uri, headers=headers, method="POST", data=data) - if info['status'] != 200: + if info["status"] != 200: if use_webapi: obscured_incoming_webhook = slack_uri else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, "[obscured]") module.fail_json(msg=f" failed to send {data} to {obscured_incoming_webhook}: {info['msg']}") # each API requires different handling if use_webapi: return module.from_json(response.read()) else: - return {'webhook': 'ok'} + return {"webhook": "ok"} def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str'), - token=dict(type='str', required=True, no_log=True), - msg=dict(type='str'), - channel=dict(type='str'), - thread_id=dict(type='str'), - username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), - icon_emoji=dict(type='str'), - link_names=dict(type='int', default=1, choices=[0, 1]), - parse=dict(type='str', choices=['none', 'full']), - validate_certs=dict(default=True, type='bool'), - color=dict(type='str', default='normal'), - attachments=dict(type='list', elements='dict'), - blocks=dict(type='list', elements='dict'), - message_id=dict(type='str'), - prepend_hash=dict(type='str', choices=['always', 'never', 'auto'], default='never'), + domain=dict(type="str"), + token=dict(type="str", required=True, no_log=True), + msg=dict(type="str"), + channel=dict(type="str"), + thread_id=dict(type="str"), + username=dict(type="str", default="Ansible"), + icon_url=dict(type="str", default="https://docs.ansible.com/favicon.ico"), + icon_emoji=dict(type="str"), + link_names=dict(type="int", default=1, choices=[0, 1]), + parse=dict(type="str", choices=["none", "full"]), + validate_certs=dict(default=True, type="bool"), + color=dict(type="str", default="normal"), + attachments=dict(type="list", elements="dict"), + blocks=dict(type="list", elements="dict"), + message_id=dict(type="str"), + prepend_hash=dict(type="str", choices=["always", "never", "auto"], default="never"), ), supports_check_mode=True, ) - domain = module.params['domain'] - token = module.params['token'] - text = module.params['msg'] - channel = module.params['channel'] - thread_id = module.params['thread_id'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - parse = module.params['parse'] - color = module.params['color'] - attachments = module.params['attachments'] - blocks = module.params['blocks'] - message_id = module.params['message_id'] - prepend_hash = module.params['prepend_hash'] - - color_choices = ['normal', 'good', 'warning', 'danger'] + domain = module.params["domain"] + token = module.params["token"] + text = module.params["msg"] + channel = module.params["channel"] + thread_id = module.params["thread_id"] + username = module.params["username"] + icon_url = module.params["icon_url"] + icon_emoji = module.params["icon_emoji"] + link_names = module.params["link_names"] + parse = module.params["parse"] + color = module.params["color"] + attachments = module.params["attachments"] + blocks = module.params["blocks"] + message_id = module.params["message_id"] + prepend_hash = module.params["prepend_hash"] + + color_choices = ["normal", "good", "warning", "danger"] if color not in color_choices and not is_valid_hex_color(color): - module.fail_json(msg=f"Color value specified should be either one of {color_choices} or any valid hex value with length 3 or 6.") + module.fail_json( + msg=f"Color value specified should be either one of {color_choices} or any valid hex value with length 3 or 6." + ) changed = True @@ -495,35 +511,53 @@ def main(): if message_id is not None: changed = False msg = get_slack_message(module, domain, token, channel, message_id) - for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): + for key in ("icon_url", "icon_emoji", "link_names", "color", "attachments", "blocks"): if msg.get(key) != module.params.get(key): changed = True break # if check mode is active, we shouldn't do anything regardless. # if changed=False, we don't need to do anything, so don't do it. if module.check_mode or not changed: - module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel']) + module.exit_json(changed=changed, ts=msg["ts"], channel=msg["channel"]) elif module.check_mode: module.exit_json(changed=changed) - payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id, prepend_hash) + payload = build_payload_for_slack( + text, + channel, + thread_id, + username, + icon_url, + icon_emoji, + link_names, + parse, + color, + attachments, + blocks, + message_id, + prepend_hash, + ) slack_response = do_notify_slack(module, domain, token, payload) - if 'ok' in slack_response: + if "ok" in slack_response: # Evaluate WebAPI response - if slack_response['ok']: + if slack_response["ok"]: # return payload as a string for backwards compatibility payload_json = module.jsonify(payload) - module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'], - api=slack_response, payload=payload_json) + module.exit_json( + changed=changed, + ts=slack_response["ts"], + channel=slack_response["channel"], + api=slack_response, + payload=payload_json, + ) else: - module.fail_json(msg="Slack API error", error=slack_response['error']) + module.fail_json(msg="Slack API error", error=slack_response["error"]) else: # Exit with plain OK from WebHook, since we don't have more information # If we get 200 from webhook, the only answer is OK module.exit_json(msg="OK") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/slackpkg.py b/plugins/modules/slackpkg.py index 9b2b913c62d..a2c14742906 100644 --- a/plugins/modules/slackpkg.py +++ b/plugins/modules/slackpkg.py @@ -74,17 +74,16 @@ def query_package(module, slackpkg_path, name): - import platform import os import re machine = platform.machine() # Exception for kernel-headers package on x86_64 - if name == 'kernel-headers' and machine == 'x86_64': - machine = 'x86' - pattern = re.compile(f'^{re.escape(name)}-[^-]+-({re.escape(machine)}|noarch|fw)-[^-]+$') - packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)] + if name == "kernel-headers" and machine == "x86_64": + machine = "x86" + pattern = re.compile(f"^{re.escape(name)}-[^-]+-({re.escape(machine)}|noarch|fw)-[^-]+$") + packages = [f for f in os.listdir("/var/log/packages") if pattern.match(f)] if len(packages) > 0: return True @@ -93,7 +92,6 @@ def query_package(module, slackpkg_path, name): def remove_packages(module, slackpkg_path, packages): - remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: @@ -102,24 +100,20 @@ def remove_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command( - [slackpkg_path, "-default_answer=y", "-batch=on", "remove", package]) + rc, out, err = module.run_command([slackpkg_path, "-default_answer=y", "-batch=on", "remove", package]) - if not module.check_mode and query_package(module, slackpkg_path, - package): + if not module.check_mode and query_package(module, slackpkg_path, package): module.fail_json(msg=f"failed to remove {package}: {out}") remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg=f"removed {remove_c} package(s)") module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, slackpkg_path, packages): - install_c = 0 for package in packages: @@ -127,13 +121,10 @@ def install_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command( - [slackpkg_path, "-default_answer=y", "-batch=on", "install", package]) + rc, out, err = module.run_command([slackpkg_path, "-default_answer=y", "-batch=on", "install", package]) - if not module.check_mode and not query_package(module, slackpkg_path, - package): - module.fail_json(msg=f"failed to install {package}: {out}", - stderr=err) + if not module.check_mode and not query_package(module, slackpkg_path, package): + module.fail_json(msg=f"failed to install {package}: {out}", stderr=err) install_c += 1 @@ -148,13 +139,10 @@ def upgrade_packages(module, slackpkg_path, packages): for package in packages: if not module.check_mode: - rc, out, err = module.run_command( - [slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package]) + rc, out, err = module.run_command([slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package]) - if not module.check_mode and not query_package(module, slackpkg_path, - package): - module.fail_json(msg=f"failed to install {package}: {out}", - stderr=err) + if not module.check_mode and not query_package(module, slackpkg_path, package): + module.fail_json(msg=f"failed to install {package}: {out}", stderr=err) install_c += 1 @@ -165,8 +153,7 @@ def upgrade_packages(module, slackpkg_path, packages): def update_cache(module, slackpkg_path): - rc, out, err = module.run_command( - [slackpkg_path, "-batch=on", "update"]) + rc, out, err = module.run_command([slackpkg_path, "-batch=on", "update"]) if rc != 0: module.fail_json(msg="Could not update package cache") @@ -174,30 +161,31 @@ def update_cache(module, slackpkg_path): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']), - name=dict(aliases=["pkg"], required=True, type='list', elements='str'), - update_cache=dict(default=False, type='bool'), + state=dict(default="present", choices=["installed", "removed", "absent", "present", "latest"]), + name=dict(aliases=["pkg"], required=True, type="list", elements="str"), + update_cache=dict(default=False, type="bool"), ), - supports_check_mode=True) + supports_check_mode=True, + ) - slackpkg_path = module.get_bin_path('slackpkg', True) + slackpkg_path = module.get_bin_path("slackpkg", True) p = module.params - pkgs = p['name'] + pkgs = p["name"] if p["update_cache"]: update_cache(module, slackpkg_path) - if p['state'] == 'latest': + if p["state"] == "latest": upgrade_packages(module, slackpkg_path, pkgs) - elif p['state'] in ['present', 'installed']: + elif p["state"] in ["present", "installed"]: install_packages(module, slackpkg_path, pkgs) - elif p["state"] in ['removed', 'absent']: + elif p["state"] in ["removed", "absent"]: remove_packages(module, slackpkg_path, pkgs) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py index 5ac18c98f38..2ccbc4c32fb 100644 --- a/plugins/modules/smartos_image_info.py +++ b/plugins/modules/smartos_image_info.py @@ -70,14 +70,13 @@ class ImageFacts: - def __init__(self, module): self.module = module - self.filters = module.params['filters'] + self.filters = module.params["filters"] def return_all_installed_images(self): - cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] + cmd = [self.module.get_bin_path("imgadm"), "list", "-j"] if self.filters: cmd.append(self.filters) @@ -85,17 +84,16 @@ def return_all_installed_images(self): (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.exit_json( - msg='Failed to get all installed images', stderr=err) + self.module.exit_json(msg="Failed to get all installed images", stderr=err) images = json.loads(out) result = {} for image in images: - result[image['manifest']['uuid']] = image['manifest'] + result[image["manifest"]["uuid"]] = image["manifest"] # Merge additional attributes with the image manifest. - for attrib in ['clones', 'source', 'zpool']: - result[image['manifest']['uuid']][attrib] = image[attrib] + for attrib in ["clones", "source", "zpool"]: + result[image["manifest"]["uuid"]][attrib] = image[attrib] return result @@ -115,5 +113,5 @@ def main(): module.exit_json(**data) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py index 737fd73d3d7..c957351091e 100644 --- a/plugins/modules/snap.py +++ b/plugins/modules/snap.py @@ -187,17 +187,17 @@ class Snap(StateModuleHelper): CHANNEL_MISMATCH = 1 INSTALLED = 2 - __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') - __set_param_re = re.compile(r'(?P\S+:)?(?P\S+)\s*=\s*(?P.+)') - __list_re = re.compile(r'^(?P\S+)\s+\S+\s+\S+\s+(?P\S+)') + __disable_re = re.compile(r"(?:\S+\s+){5}(?P\S+)") + __set_param_re = re.compile(r"(?P\S+:)?(?P\S+)\s*=\s*(?P.+)") + __list_re = re.compile(r"^(?P\S+)\s+\S+\s+\S+\s+(?P\S+)") module = dict( argument_spec={ - 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']), - 'classic': dict(type='bool', default=False), - 'channel': dict(type='str'), - 'options': dict(type='list', elements='str'), - 'dangerous': dict(type='bool', default=False), + "name": dict(type="list", elements="str", required=True), + "state": dict(type="str", default="present", choices=["absent", "present", "enabled", "disabled"]), + "classic": dict(type="bool", default=False), + "channel": dict(type="str"), + "options": dict(type="list", elements="str"), + "dangerous": dict(type="bool", default=False), }, supports_check_mode=True, ) @@ -227,7 +227,12 @@ def __init_module__(self): else: status_var = "name" self.vars.set("status_var", status_var, output=False) - self.vars.set("snap_status", self.snap_status(self.vars[self.vars.status_var], self.vars.channel), output=False, change=True) + self.vars.set( + "snap_status", + self.snap_status(self.vars[self.vars.status_var], self.vars.channel), + output=False, + change=True, + ) self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False, change=True) def __quit_module__(self): @@ -262,10 +267,10 @@ def _run_multiple_commands(self, commands, actionable_names, bundle=True, refres results_run_info.append(ctx.run_info) return ( - '; '.join([to_native(x) for x in results_cmd]), + "; ".join([to_native(x) for x in results_cmd]), self._first_non_zero(results_rc), - '\n'.join(results_out), - '\n'.join(results_err), + "\n".join(results_out), + "\n".join(results_err), results_run_info, ) @@ -273,8 +278,10 @@ def convert_json_subtree_to_map(self, json_subtree, prefix=None): option_map = {} if not isinstance(json_subtree, dict): - self.do_raise("Non-dict non-leaf element encountered while parsing option map. " - "The output format of 'snap set' may have changed. Aborting!") + self.do_raise( + "Non-dict non-leaf element encountered while parsing option map. " + "The output format of 'snap set' may have changed. Aborting!" + ) for key, value in json_subtree.items(): full_key = key if prefix is None else f"{prefix}.{key}" @@ -307,7 +314,8 @@ def retrieve_option_map(self, snap_name): return option_map except Exception as e: self.do_raise( - msg=f"Parsing option map returned by 'snap get {snap_name}' triggers exception '{e}', output:\n'{out}'") + msg=f"Parsing option map returned by 'snap get {snap_name}' triggers exception '{e}', output:\n'{out}'" + ) def names_from_snaps(self, snaps): def process_one(rc, out, err): @@ -334,9 +342,11 @@ def process(rc, out, err): process_ = process_many if "warning: no snap found" in check_error: - self.do_raise("Snaps not found: {0}.".format([x.split()[-1] - for x in out.split('\n') - if x.startswith("warning: no snap found")])) + self.do_raise( + "Snaps not found: {0}.".format( + [x.split()[-1] for x in out.split("\n") if x.startswith("warning: no snap found")] + ) + ) return process_(rc, out, err) names = [] @@ -360,9 +370,9 @@ def _status_check(name, channel, installed): with self.runner("_list") as ctx: rc, out, err = ctx.run(check_rc=True) - list_out = out.split('\n')[1:] + list_out = out.split("\n")[1:] list_out = [self.__list_re.match(x) for x in list_out] - list_out = [(m.group('name'), m.group('channel')) for m in list_out if m] + list_out = [(m.group("name"), m.group("channel")) for m in list_out if m] self.vars.status_out = list_out self.vars.status_run_info = ctx.run_info @@ -377,8 +387,8 @@ def is_snap_enabled(self, snap_name): match = self.__disable_re.match(result) if not match: self.do_raise(msg=f"Unable to parse 'snap list {snap_name}' output:\n{out}") - notes = match.group('notes') - return "disabled" not in notes.split(',') + notes = match.group("notes") + return "disabled" not in notes.split(",") def _present(self, actionable_snaps, refresh=False): self.changed = True @@ -387,35 +397,42 @@ def _present(self, actionable_snaps, refresh=False): if self.check_mode: return - params = ['state', 'classic', 'channel', 'dangerous'] # get base cmd parts - has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + params = ["state", "classic", "channel", "dangerous"] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != "stable" has_multiple_snaps = len(actionable_snaps) > 1 if has_one_pkg_params and has_multiple_snaps: - self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, bundle=False, refresh=refresh) + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands( + params, actionable_snaps, bundle=False, refresh=refresh + ) else: - self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, refresh=refresh) + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands( + params, actionable_snaps, refresh=refresh + ) self.vars.run_info = run_info if rc == 0: return - classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' - r' was published using classic confinement') + classic_snap_pattern = re.compile( + r'^error: This revision of snap "(?P\w+)"' + r" was published using classic confinement" + ) match = classic_snap_pattern.match(err) if match: - err_pkg = match.group('package_name') + err_pkg = match.group("package_name") msg = f"Couldn't install {err_pkg} because it requires classic confinement" else: msg = f"Ooops! Snap installation failed while executing '{self.vars.cmd}', please examine logs and error output for more details." self.do_raise(msg=msg) def state_present(self): + self.vars.set_meta("classic", output=True) + self.vars.set_meta("channel", output=True) - self.vars.set_meta('classic', output=True) - self.vars.set_meta('channel', output=True) - - actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH] + actionable_refresh = [ + snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH + ] if actionable_refresh: self._present(actionable_refresh, refresh=True) actionable_install = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.NOT_INSTALLED] @@ -456,7 +473,9 @@ def set_options(self): if key not in option_map or key in option_map and option_map[key] != value: option_without_prefix = f"{key}={value}" - option_with_prefix = option_string if selected_snap_name is not None else f"{snap_name}:{option_string}" + option_with_prefix = ( + option_string if selected_snap_name is not None else f"{snap_name}:{option_string}" + ) options_changed.append(option_without_prefix) overall_options_changed.append(option_with_prefix) @@ -493,18 +512,24 @@ def _generic_state_action(self, actionable_func, actionable_var, params): self.do_raise(msg=msg) def state_absent(self): - self._generic_state_action(lambda s: self.vars.snap_status_map[s] != Snap.NOT_INSTALLED, "snaps_removed", ['classic', 'channel', 'state']) + self._generic_state_action( + lambda s: self.vars.snap_status_map[s] != Snap.NOT_INSTALLED, + "snaps_removed", + ["classic", "channel", "state"], + ) def state_enabled(self): - self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) + self._generic_state_action( + lambda s: not self.is_snap_enabled(s), "snaps_enabled", ["classic", "channel", "state"] + ) def state_disabled(self): - self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ["classic", "channel", "state"]) def main(): Snap.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py index 4a68671a063..e1447095d88 100644 --- a/plugins/modules/snap_alias.py +++ b/plugins/modules/snap_alias.py @@ -97,13 +97,13 @@ class SnapAlias(StateModuleHelper): module = dict( argument_spec={ - 'state': dict(type='str', choices=['absent', 'present'], default='present'), - 'name': dict(type='str'), - 'alias': dict(type='list', elements='str', aliases=['aliases']), + "state": dict(type="str", choices=["absent", "present"], default="present"), + "name": dict(type="str"), + "alias": dict(type="list", elements="str", aliases=["aliases"]), }, required_if=[ - ('state', 'present', ['name', 'alias']), - ('state', 'absent', ['name', 'alias'], True), + ("state", "present", ["name", "alias"]), + ("state", "absent", ["name", "alias"], True), ], supports_check_mode=True, ) @@ -181,5 +181,5 @@ def main(): SnapAlias.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py index c1c39e89145..e7d730f9bda 100644 --- a/plugins/modules/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -196,7 +196,6 @@ class DefineOid: - def __init__(self, dotprefix=False): if dotprefix: dp = "." @@ -228,7 +227,6 @@ def __init__(self, dotprefix=False): def decode_hex(hexstring): - if len(hexstring) < 3: return hexstring if hexstring[:2] == "0x": @@ -237,7 +235,6 @@ def decode_hex(hexstring): def decode_mac(hexstring): - if len(hexstring) != 14: return hexstring if hexstring[:2] == "0x": @@ -246,11 +243,7 @@ def decode_mac(hexstring): def lookup_adminstatus(int_adminstatus): - adminstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing' - } + adminstatus_options = {1: "up", 2: "down", 3: "testing"} if int_adminstatus in adminstatus_options: return adminstatus_options[int_adminstatus] return "" @@ -258,13 +251,13 @@ def lookup_adminstatus(int_adminstatus): def lookup_operstatus(int_operstatus): operstatus_options = { - 1: 'up', - 2: 'down', - 3: 'testing', - 4: 'unknown', - 5: 'dormant', - 6: 'notPresent', - 7: 'lowerLayerDown' + 1: "up", + 2: "down", + 3: "testing", + 4: "unknown", + 5: "dormant", + 6: "notPresent", + 7: "lowerLayerDown", } if int_operstatus in operstatus_options: return operstatus_options[int_operstatus] @@ -274,21 +267,21 @@ def lookup_operstatus(int_operstatus): def main(): module = AnsibleModule( argument_spec=dict( - host=dict(type='str', required=True), - version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), - community=dict(type='str'), - username=dict(type='str'), - level=dict(type='str', choices=['authNoPriv', 'authPriv']), - integrity=dict(type='str', choices=['md5', 'sha']), - privacy=dict(type='str', choices=['aes', 'des']), - authkey=dict(type='str', no_log=True), - privkey=dict(type='str', no_log=True), - timeout=dict(type='int'), - retries=dict(type='int'), + host=dict(type="str", required=True), + version=dict(type="str", required=True, choices=["v2", "v2c", "v3"]), + community=dict(type="str"), + username=dict(type="str"), + level=dict(type="str", choices=["authNoPriv", "authPriv"]), + integrity=dict(type="str", choices=["md5", "sha"]), + privacy=dict(type="str", choices=["aes", "des"]), + authkey=dict(type="str", no_log=True), + privkey=dict(type="str", no_log=True), + timeout=dict(type="int"), + retries=dict(type="int"), ), required_together=( - ['username', 'level', 'integrity', 'authkey'], - ['privacy', 'privkey'], + ["username", "level", "integrity", "authkey"], + ["privacy", "privkey"], ), supports_check_mode=True, ) @@ -298,48 +291,49 @@ def main(): deps.validate(module) cmdGen = cmdgen.CommandGenerator() - transport_opts = { - k: m_args[k] - for k in ('timeout', 'retries') - if m_args[k] is not None - } + transport_opts = {k: m_args[k] for k in ("timeout", "retries") if m_args[k] is not None} # Verify that we receive a community when using snmp v2 - if m_args['version'] in ("v2", "v2c"): - if m_args['community'] is None: - module.fail_json(msg='Community not set when using snmp version 2') + if m_args["version"] in ("v2", "v2c"): + if m_args["community"] is None: + module.fail_json(msg="Community not set when using snmp version 2") integrity_proto = None privacy_proto = None - if m_args['version'] == "v3": - if m_args['username'] is None: - module.fail_json(msg='Username not set when using snmp version 3') + if m_args["version"] == "v3": + if m_args["username"] is None: + module.fail_json(msg="Username not set when using snmp version 3") - if m_args['level'] == "authPriv" and m_args['privacy'] is None: - module.fail_json(msg='Privacy algorithm not set when using authPriv') + if m_args["level"] == "authPriv" and m_args["privacy"] is None: + module.fail_json(msg="Privacy algorithm not set when using authPriv") - if m_args['integrity'] == "sha": + if m_args["integrity"] == "sha": integrity_proto = cmdgen.usmHMACSHAAuthProtocol - elif m_args['integrity'] == "md5": + elif m_args["integrity"] == "md5": integrity_proto = cmdgen.usmHMACMD5AuthProtocol - if m_args['privacy'] == "aes": + if m_args["privacy"] == "aes": privacy_proto = cmdgen.usmAesCfb128Protocol - elif m_args['privacy'] == "des": + elif m_args["privacy"] == "des": privacy_proto = cmdgen.usmDESPrivProtocol # Use SNMP Version 2 - if m_args['version'] in ("v2", "v2c"): - snmp_auth = cmdgen.CommunityData(m_args['community']) + if m_args["version"] in ("v2", "v2c"): + snmp_auth = cmdgen.CommunityData(m_args["community"]) # Use SNMP Version 3 with authNoPriv - elif m_args['level'] == "authNoPriv": - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) + elif m_args["level"] == "authNoPriv": + snmp_auth = cmdgen.UsmUserData(m_args["username"], authKey=m_args["authkey"], authProtocol=integrity_proto) # Use SNMP Version 3 with authPriv else: - snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, - privProtocol=privacy_proto) + snmp_auth = cmdgen.UsmUserData( + m_args["username"], + authKey=m_args["authkey"], + privKey=m_args["privkey"], + authProtocol=integrity_proto, + privProtocol=privacy_proto, + ) # Use p to prefix OIDs with a dot for polling p = DefineOid(dotprefix=True) @@ -353,14 +347,26 @@ def Tree(): errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.sysDescr,), - cmdgen.MibVariable(p.sysObjectId,), - cmdgen.MibVariable(p.sysUpTime,), - cmdgen.MibVariable(p.sysContact,), - cmdgen.MibVariable(p.sysName,), - cmdgen.MibVariable(p.sysLocation,), - lookupMib=False + cmdgen.UdpTransportTarget((m_args["host"], 161), **transport_opts), + cmdgen.MibVariable( + p.sysDescr, + ), + cmdgen.MibVariable( + p.sysObjectId, + ), + cmdgen.MibVariable( + p.sysUpTime, + ), + cmdgen.MibVariable( + p.sysContact, + ), + cmdgen.MibVariable( + p.sysName, + ), + cmdgen.MibVariable( + p.sysLocation, + ), + lookupMib=False, ) if errorIndication: @@ -370,34 +376,55 @@ def Tree(): current_oid = oid.prettyPrint() current_val = val.prettyPrint() if current_oid == v.sysDescr: - results['ansible_sysdescr'] = decode_hex(current_val) + results["ansible_sysdescr"] = decode_hex(current_val) elif current_oid == v.sysObjectId: - results['ansible_sysobjectid'] = current_val + results["ansible_sysobjectid"] = current_val elif current_oid == v.sysUpTime: - results['ansible_sysuptime'] = current_val + results["ansible_sysuptime"] = current_val elif current_oid == v.sysContact: - results['ansible_syscontact'] = current_val + results["ansible_syscontact"] = current_val elif current_oid == v.sysName: - results['ansible_sysname'] = current_val + results["ansible_sysname"] = current_val elif current_oid == v.sysLocation: - results['ansible_syslocation'] = current_val + results["ansible_syslocation"] = current_val errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), - cmdgen.MibVariable(p.ifIndex,), - cmdgen.MibVariable(p.ifDescr,), - cmdgen.MibVariable(p.ifMtu,), - cmdgen.MibVariable(p.ifSpeed,), - cmdgen.MibVariable(p.ifPhysAddress,), - cmdgen.MibVariable(p.ifAdminStatus,), - cmdgen.MibVariable(p.ifOperStatus,), - cmdgen.MibVariable(p.ipAdEntAddr,), - cmdgen.MibVariable(p.ipAdEntIfIndex,), - cmdgen.MibVariable(p.ipAdEntNetMask,), - - cmdgen.MibVariable(p.ifAlias,), - lookupMib=False + cmdgen.UdpTransportTarget((m_args["host"], 161), **transport_opts), + cmdgen.MibVariable( + p.ifIndex, + ), + cmdgen.MibVariable( + p.ifDescr, + ), + cmdgen.MibVariable( + p.ifMtu, + ), + cmdgen.MibVariable( + p.ifSpeed, + ), + cmdgen.MibVariable( + p.ifPhysAddress, + ), + cmdgen.MibVariable( + p.ifAdminStatus, + ), + cmdgen.MibVariable( + p.ifOperStatus, + ), + cmdgen.MibVariable( + p.ipAdEntAddr, + ), + cmdgen.MibVariable( + p.ipAdEntIfIndex, + ), + cmdgen.MibVariable( + p.ipAdEntNetMask, + ), + cmdgen.MibVariable( + p.ifAlias, + ), + lookupMib=False, ) if errorIndication: @@ -415,51 +442,51 @@ def Tree(): current_oid = oid.prettyPrint() current_val = val.prettyPrint() if v.ifIndex in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['ifindex'] = current_val + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["ifindex"] = current_val interface_indexes.append(ifIndex) if v.ifDescr in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['name'] = current_val + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["name"] = current_val if v.ifMtu in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mtu'] = current_val + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["mtu"] = current_val if v.ifSpeed in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['speed'] = current_val + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["speed"] = current_val if v.ifPhysAddress in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["mac"] = decode_mac(current_val) if v.ifAdminStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["adminstatus"] = lookup_adminstatus(int(current_val)) if v.ifOperStatus in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["operstatus"] = lookup_operstatus(int(current_val)) if v.ipAdEntAddr in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] + curIPList = current_oid.rsplit(".", 4)[-4:] curIP = ".".join(curIPList) - ipv4_networks[curIP]['address'] = current_val + ipv4_networks[curIP]["address"] = current_val all_ipv4_addresses.append(current_val) if v.ipAdEntIfIndex in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] + curIPList = current_oid.rsplit(".", 4)[-4:] curIP = ".".join(curIPList) - ipv4_networks[curIP]['interface'] = current_val + ipv4_networks[curIP]["interface"] = current_val if v.ipAdEntNetMask in current_oid: - curIPList = current_oid.rsplit('.', 4)[-4:] + curIPList = current_oid.rsplit(".", 4)[-4:] curIP = ".".join(curIPList) - ipv4_networks[curIP]['netmask'] = current_val + ipv4_networks[curIP]["netmask"] = current_val if v.ifAlias in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['ansible_interfaces'][ifIndex]['description'] = current_val + ifIndex = int(current_oid.rsplit(".", 1)[-1]) + results["ansible_interfaces"][ifIndex]["description"] = current_val interface_to_ipv4 = {} for ipv4_network in ipv4_networks: - current_interface = ipv4_networks[ipv4_network]['interface'] + current_interface = ipv4_networks[ipv4_network]["interface"] current_network = { - 'address': ipv4_networks[ipv4_network]['address'], - 'netmask': ipv4_networks[ipv4_network]['netmask'] + "address": ipv4_networks[ipv4_network]["address"], + "netmask": ipv4_networks[ipv4_network]["netmask"], } if current_interface not in interface_to_ipv4: interface_to_ipv4[current_interface] = [] @@ -468,12 +495,12 @@ def Tree(): interface_to_ipv4[current_interface].append(current_network) for interface in interface_to_ipv4: - results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] + results["ansible_interfaces"][int(interface)]["ipv4"] = interface_to_ipv4[interface] - results['ansible_all_ipv4_addresses'] = all_ipv4_addresses + results["ansible_all_ipv4_addresses"] = all_ipv4_addresses module.exit_json(ansible_facts=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py index 2390d3268da..beaa5635c2d 100644 --- a/plugins/modules/solaris_zone.py +++ b/plugins/modules/solaris_zone.py @@ -159,146 +159,148 @@ def __init__(self, module): self.msg = [] self.module = module - self.path = self.module.params['path'] - self.name = self.module.params['name'] - self.sparse = self.module.params['sparse'] - self.root_password = self.module.params['root_password'] - self.timeout = self.module.params['timeout'] - self.config = self.module.params['config'] - self.create_options = self.module.params['create_options'] - self.install_options = self.module.params['install_options'] - self.attach_options = self.module.params['attach_options'] - - self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True) - self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True) - self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True) + self.path = self.module.params["path"] + self.name = self.module.params["name"] + self.sparse = self.module.params["sparse"] + self.root_password = self.module.params["root_password"] + self.timeout = self.module.params["timeout"] + self.config = self.module.params["config"] + self.create_options = self.module.params["create_options"] + self.install_options = self.module.params["install_options"] + self.attach_options = self.module.params["attach_options"] + + self.zoneadm_cmd = self.module.get_bin_path("zoneadm", True) + self.zonecfg_cmd = self.module.get_bin_path("zonecfg", True) + self.ssh_keygen_cmd = self.module.get_bin_path("ssh-keygen", True) if self.module.check_mode: - self.msg.append('Running in check mode') + self.msg.append("Running in check mode") - if platform.system() != 'SunOS': - self.module.fail_json(msg='This module requires Solaris') + if platform.system() != "SunOS": + self.module.fail_json(msg="This module requires Solaris") - (self.os_major, self.os_minor) = platform.release().split('.') + (self.os_major, self.os_minor) = platform.release().split(".") if int(self.os_minor) < 10: - self.module.fail_json(msg='This module requires Solaris 10 or later') + self.module.fail_json(msg="This module requires Solaris 10 or later") - match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name) + match = re.match("^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$", self.name) if not match: - self.module.fail_json(msg="Provided zone name is not a valid zone name. " - "Please refer documentation for correct zone name specifications.") + self.module.fail_json( + msg="Provided zone name is not a valid zone name. " + "Please refer documentation for correct zone name specifications." + ) def configure(self): if not self.path: - self.module.fail_json(msg='Missing required argument: path') + self.module.fail_json(msg="Missing required argument: path") if not self.module.check_mode: - t = tempfile.NamedTemporaryFile(delete=False, mode='wt') + t = tempfile.NamedTemporaryFile(delete=False, mode="wt") if self.sparse: - t.write(f'create {self.create_options}\n') - self.msg.append('creating sparse-root zone') + t.write(f"create {self.create_options}\n") + self.msg.append("creating sparse-root zone") else: - t.write(f'create -b {self.create_options}\n') - self.msg.append('creating whole-root zone') + t.write(f"create -b {self.create_options}\n") + self.msg.append("creating whole-root zone") - t.write(f'set zonepath={self.path}\n') - t.write(f'{self.config}\n') + t.write(f"set zonepath={self.path}\n") + t.write(f"{self.config}\n") t.close() - cmd = [self.zonecfg_cmd, '-z', self.name, '-f', t.name] + cmd = [self.zonecfg_cmd, "-z", self.name, "-f", t.name] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to create zone. {out + err}') + self.module.fail_json(msg=f"Failed to create zone. {out + err}") os.unlink(t.name) self.changed = True - self.msg.append('zone configured') + self.msg.append("zone configured") def install(self): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'install', self.install_options] + cmd = [self.zoneadm_cmd, "-z", self.name, "install", self.install_options] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to install zone. {out + err}') + self.module.fail_json(msg=f"Failed to install zone. {out + err}") if int(self.os_minor) == 10: self.configure_sysid() self.configure_password() self.configure_ssh_keys() self.changed = True - self.msg.append('zone installed') + self.msg.append("zone installed") def uninstall(self): if self.is_installed(): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'uninstall', '-F'] + cmd = [self.zoneadm_cmd, "-z", self.name, "uninstall", "-F"] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to uninstall zone. {out + err}') + self.module.fail_json(msg=f"Failed to uninstall zone. {out + err}") self.changed = True - self.msg.append('zone uninstalled') + self.msg.append("zone uninstalled") def configure_sysid(self): - if os.path.isfile(f'{self.path}/root/etc/.UNCONFIGURED'): - os.unlink(f'{self.path}/root/etc/.UNCONFIGURED') + if os.path.isfile(f"{self.path}/root/etc/.UNCONFIGURED"): + os.unlink(f"{self.path}/root/etc/.UNCONFIGURED") - open(f'{self.path}/root/noautoshutdown', 'w').close() + open(f"{self.path}/root/noautoshutdown", "w").close() - with open(f'{self.path}/root/etc/nodename', 'w') as node: + with open(f"{self.path}/root/etc/nodename", "w") as node: node.write(self.name) - with open(f'{self.path}/root/etc/.sysIDtool.state', 'w') as id: - id.write('1 # System previously configured?\n') - id.write('1 # Bootparams succeeded?\n') - id.write('1 # System is on a network?\n') - id.write('1 # Extended network information gathered?\n') - id.write('0 # Autobinder succeeded?\n') - id.write('1 # Network has subnets?\n') - id.write('1 # root password prompted for?\n') - id.write('1 # locale and term prompted for?\n') - id.write('1 # security policy in place\n') - id.write('1 # NFSv4 domain configured\n') - id.write('0 # Auto Registration Configured\n') - id.write('vt100') + with open(f"{self.path}/root/etc/.sysIDtool.state", "w") as id: + id.write("1 # System previously configured?\n") + id.write("1 # Bootparams succeeded?\n") + id.write("1 # System is on a network?\n") + id.write("1 # Extended network information gathered?\n") + id.write("0 # Autobinder succeeded?\n") + id.write("1 # Network has subnets?\n") + id.write("1 # root password prompted for?\n") + id.write("1 # locale and term prompted for?\n") + id.write("1 # security policy in place\n") + id.write("1 # NFSv4 domain configured\n") + id.write("0 # Auto Registration Configured\n") + id.write("vt100") def configure_ssh_keys(self): - rsa_key_file = f'{self.path}/root/etc/ssh/ssh_host_rsa_key' - dsa_key_file = f'{self.path}/root/etc/ssh/ssh_host_dsa_key' + rsa_key_file = f"{self.path}/root/etc/ssh/ssh_host_rsa_key" + dsa_key_file = f"{self.path}/root/etc/ssh/ssh_host_dsa_key" if not os.path.isfile(rsa_key_file): - cmd = [self.ssh_keygen_cmd, '-f', rsa_key_file, '-t', 'rsa', '-N', ''] + cmd = [self.ssh_keygen_cmd, "-f", rsa_key_file, "-t", "rsa", "-N", ""] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to create rsa key. {out + err}') + self.module.fail_json(msg=f"Failed to create rsa key. {out + err}") if not os.path.isfile(dsa_key_file): - cmd = [self.ssh_keygen_cmd, '-f', dsa_key_file, '-t', 'dsa', '-N', ''] + cmd = [self.ssh_keygen_cmd, "-f", dsa_key_file, "-t", "dsa", "-N", ""] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to create dsa key. {out + err}') + self.module.fail_json(msg=f"Failed to create dsa key. {out + err}") def configure_password(self): - shadow = f'{self.path}/root/etc/shadow' + shadow = f"{self.path}/root/etc/shadow" if self.root_password: - with open(shadow, 'r') as f: + with open(shadow, "r") as f: lines = f.readlines() for i in range(0, len(lines)): - fields = lines[i].split(':') - if fields[0] == 'root': + fields = lines[i].split(":") + if fields[0] == "root": fields[1] = self.root_password - lines[i] = ':'.join(fields) + lines[i] = ":".join(fields) - with open(shadow, 'w') as f: + with open(shadow, "w") as f: for line in lines: f.write(line) def boot(self): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'boot'] + cmd = [self.zoneadm_cmd, "-z", self.name, "boot"] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to boot zone. {out + err}') + self.module.fail_json(msg=f"Failed to boot zone. {out + err}") """ The boot command can return before the zone has fully booted. This is especially @@ -310,14 +312,14 @@ def boot(self): elapsed = 0 while True: if elapsed > self.timeout: - self.module.fail_json(msg='timed out waiting for zone to boot') + self.module.fail_json(msg="timed out waiting for zone to boot") rc = os.system(f'ps -z {self.name} -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null') if rc == 0: break time.sleep(10) elapsed += 10 self.changed = True - self.msg.append('zone booted') + self.msg.append("zone booted") def destroy(self): if self.is_running(): @@ -325,42 +327,42 @@ def destroy(self): if self.is_installed(): self.uninstall() if not self.module.check_mode: - cmd = [self.zonecfg_cmd, '-z', self.name, 'delete', '-F'] + cmd = [self.zonecfg_cmd, "-z", self.name, "delete", "-F"] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to delete zone. {out + err}') + self.module.fail_json(msg=f"Failed to delete zone. {out + err}") self.changed = True - self.msg.append('zone deleted') + self.msg.append("zone deleted") def stop(self): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'halt'] + cmd = [self.zoneadm_cmd, "-z", self.name, "halt"] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to stop zone. {out + err}') + self.module.fail_json(msg=f"Failed to stop zone. {out + err}") self.changed = True - self.msg.append('zone stopped') + self.msg.append("zone stopped") def detach(self): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'detach'] + cmd = [self.zoneadm_cmd, "-z", self.name, "detach"] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to detach zone. {out + err}') + self.module.fail_json(msg=f"Failed to detach zone. {out + err}") self.changed = True - self.msg.append('zone detached') + self.msg.append("zone detached") def attach(self): if not self.module.check_mode: - cmd = [self.zoneadm_cmd, '-z', self.name, 'attach', self.attach_options] + cmd = [self.zoneadm_cmd, "-z", self.name, "attach", self.attach_options] (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Failed to attach zone. {out + err}') + self.module.fail_json(msg=f"Failed to attach zone. {out + err}") self.changed = True - self.msg.append('zone attached') + self.msg.append("zone attached") def exists(self): - cmd = [self.zoneadm_cmd, '-z', self.name, 'list'] + cmd = [self.zoneadm_cmd, "-z", self.name, "list"] (rc, out, err) = self.module.run_command(cmd) if rc == 0: return True @@ -368,25 +370,25 @@ def exists(self): return False def is_running(self): - return self.status() == 'running' + return self.status() == "running" def is_installed(self): - return self.status() == 'installed' + return self.status() == "installed" def is_configured(self): - return self.status() == 'configured' + return self.status() == "configured" def status(self): - cmd = [self.zoneadm_cmd, '-z', self.name, 'list', '-p'] + cmd = [self.zoneadm_cmd, "-z", self.name, "list", "-p"] (rc, out, err) = self.module.run_command(cmd) if rc == 0: - return out.split(':')[2] + return out.split(":")[2] else: - return 'undefined' + return "undefined" def state_present(self): if self.exists(): - self.msg.append('zone already exists') + self.msg.append("zone already exists") else: self.configure() self.install() @@ -394,7 +396,7 @@ def state_present(self): def state_running(self): self.state_present() if self.is_running(): - self.msg.append('zone already running') + self.msg.append("zone already running") else: self.boot() @@ -402,7 +404,7 @@ def state_stopped(self): if self.exists(): self.stop() else: - self.module.fail_json(msg='zone does not exist') + self.module.fail_json(msg="zone does not exist") def state_absent(self): if self.exists(): @@ -410,73 +412,86 @@ def state_absent(self): self.stop() self.destroy() else: - self.msg.append('zone does not exist') + self.msg.append("zone does not exist") def state_configured(self): if self.exists(): - self.msg.append('zone already exists') + self.msg.append("zone already exists") else: self.configure() def state_detached(self): if not self.exists(): - self.module.fail_json(msg='zone does not exist') + self.module.fail_json(msg="zone does not exist") if self.is_configured(): - self.msg.append('zone already detached') + self.msg.append("zone already detached") else: self.stop() self.detach() def state_attached(self): if not self.exists(): - self.msg.append('zone does not exist') + self.msg.append("zone does not exist") if self.is_configured(): self.attach() else: - self.msg.append('zone already attached') + self.msg.append("zone already attached") def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', - choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']), - path=dict(type='str'), - sparse=dict(type='bool', default=False), - root_password=dict(type='str', no_log=True), - timeout=dict(type='int', default=600), - config=dict(type='str', default=''), - create_options=dict(type='str', default=''), - install_options=dict(type='str', default=''), - attach_options=dict(type='str', default=''), + name=dict(type="str", required=True), + state=dict( + type="str", + default="present", + choices=[ + "absent", + "attached", + "configured", + "detached", + "installed", + "present", + "running", + "started", + "stopped", + ], + ), + path=dict(type="str"), + sparse=dict(type="bool", default=False), + root_password=dict(type="str", no_log=True), + timeout=dict(type="int", default=600), + config=dict(type="str", default=""), + create_options=dict(type="str", default=""), + install_options=dict(type="str", default=""), + attach_options=dict(type="str", default=""), ), supports_check_mode=True, ) zone = Zone(module) - state = module.params['state'] + state = module.params["state"] - if state == 'running' or state == 'started': + if state == "running" or state == "started": zone.state_running() - elif state == 'present' or state == 'installed': + elif state == "present" or state == "installed": zone.state_present() - elif state == 'stopped': + elif state == "stopped": zone.state_stopped() - elif state == 'absent': + elif state == "absent": zone.state_absent() - elif state == 'configured': + elif state == "configured": zone.state_configured() - elif state == 'detached': + elif state == "detached": zone.state_detached() - elif state == 'attached': + elif state == "attached": zone.state_attached() else: - module.fail_json(msg=f'Invalid state: {state}') + module.fail_json(msg=f"Invalid state: {state}") - module.exit_json(changed=zone.changed, msg=', '.join(zone.msg)) + module.exit_json(changed=zone.changed, msg=", ".join(zone.msg)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py index d4d34340e3e..706669f59b0 100644 --- a/plugins/modules/sorcery.py +++ b/plugins/modules/sorcery.py @@ -194,13 +194,7 @@ # auto-filled at module init -SORCERY = { - 'sorcery': None, - 'scribe': None, - 'cast': None, - 'dispel': None, - 'gaze': None -} +SORCERY = {"sorcery": None, "scribe": None, "cast": None, "dispel": None, "gaze": None} SORCERY_LOG_DIR = "/var/log/sorcery" SORCERY_STATE_DIR = "/var/state/sorcery" @@ -209,7 +203,7 @@ def get_sorcery_ver(module): - """ Get Sorcery version. """ + """Get Sorcery version.""" cmd_sorcery = f"{SORCERY['sorcery']} --version" @@ -222,16 +216,15 @@ def get_sorcery_ver(module): def codex_fresh(codex, module): - """ Check if grimoire collection is fresh enough. """ + """Check if grimoire collection is fresh enough.""" - if not module.params['cache_valid_time']: + if not module.params["cache_valid_time"]: return False - timedelta = datetime.timedelta(seconds=module.params['cache_valid_time']) + timedelta = datetime.timedelta(seconds=module.params["cache_valid_time"]) for grimoire in codex: - lastupdate_path = os.path.join(SORCERY_STATE_DIR, - f"{grimoire}.lastupdate") + lastupdate_path = os.path.join(SORCERY_STATE_DIR, f"{grimoire}.lastupdate") try: mtime = os.stat(lastupdate_path).st_mtime @@ -248,7 +241,7 @@ def codex_fresh(codex, module): def codex_list(module, skip_new=False): - """ List valid grimoire collection. """ + """List valid grimoire collection.""" params = module.params @@ -268,11 +261,11 @@ def codex_list(module, skip_new=False): match = rex.match(line) if match: - codex[match.group('grim')] = match.group('ver') + codex[match.group("grim")] = match.group("ver") # return only specified grimoires unless requested to skip new - if params['repository'] and not skip_new: - codex = {x: codex.get(x, NA) for x in params['name']} + if params["repository"] and not skip_new: + codex = {x: codex.get(x, NA) for x in params["name"]} if not codex: module.fail_json(msg="no grimoires to operate on; add at least one") @@ -281,7 +274,7 @@ def codex_list(module, skip_new=False): def update_sorcery(module): - """ Update sorcery scripts. + """Update sorcery scripts. This runs 'sorcery update' ('sorcery -u'). Check mode always returns a positive change value. @@ -309,7 +302,7 @@ def update_sorcery(module): def update_codex(module): - """ Update grimoire collections. + """Update grimoire collections. This runs 'scribe update'. Check mode always returns a positive change value when 'cache_valid_time' is used. @@ -331,11 +324,11 @@ def update_codex(module): else: if not fresh: # SILENT is required as a workaround for query() in libgpg - module.run_command_environ_update.update(dict(SILENT='1')) + module.run_command_environ_update.update(dict(SILENT="1")) cmd_scribe = f"{SORCERY['scribe']} update" - if params['repository']: + if params["repository"]: cmd_scribe += f" {' '.join(codex.keys())}" rc, stdout, stderr = module.run_command(cmd_scribe) @@ -350,7 +343,7 @@ def update_codex(module): def match_depends(module): - """ Check for matching dependencies. + """Check for matching dependencies. This inspects spell's dependencies with the desired states and returns 'False' if a recast is needed to match them. It also adds required lines @@ -359,13 +352,13 @@ def match_depends(module): """ params = module.params - spells = params['name'] + spells = params["name"] depends = {} depends_ok = True - if len(spells) > 1 or not params['depends']: + if len(spells) > 1 or not params["depends"]: return depends_ok spell = spells[0] @@ -383,22 +376,22 @@ def match_depends(module): rex = re.compile(r"^(?P\+?|\-){1}(?P[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$") - for d in params['depends'].split(','): + for d in params["depends"].split(","): match = rex.match(d) if not match: module.fail_json(msg=f"wrong depends line for spell '{spell}'") # normalize status - if not match.group('status') or match.group('status') == '+': - status = 'on' + if not match.group("status") or match.group("status") == "+": + status = "on" else: - status = 'off' + status = "off" - depends[match.group('depend')] = status + depends[match.group("depend")] = status # drop providers spec - depends_list = [s.split('(')[0] for s in depends] + depends_list = [s.split("(")[0] for s in depends] cmd_gaze = f"{SORCERY['gaze']} -q version {' '.join(depends_list)}" @@ -418,10 +411,10 @@ def match_depends(module): for d in depends: # when local status is 'off' and dependency is provider, # use only provider value - d_offset = d.find('(') + d_offset = d.find("(") if d_offset == -1: - d_p = '' + d_p = "" else: d_p = re.escape(d[d_offset:]) @@ -434,7 +427,7 @@ def match_depends(module): if match: # if we also matched the local status, mark dependency # as empty and put it back into depends file - if match.group('lstatus') == depends[d]: + if match.group("lstatus") == depends[d]: depends[d] = None sys.stdout.write(line) @@ -457,7 +450,7 @@ def match_depends(module): if depends_new: try: - with open(sorcery_depends, 'a') as fl: + with open(sorcery_depends, "a") as fl: for k in depends_new: fl.write(f"{spell}:{k}:{depends[k]}:optional::\n") except IOError: @@ -475,17 +468,17 @@ def match_depends(module): def manage_grimoires(module): - """ Add or remove grimoires. """ + """Add or remove grimoires.""" params = module.params - grimoires = params['name'] - url = params['repository'] + grimoires = params["name"] + url = params["repository"] codex = codex_list(module, True) - if url == '*': - if params['state'] in ('present', 'latest', 'absent'): - if params['state'] == 'absent': + if url == "*": + if params["state"] in ("present", "latest", "absent"): + if params["state"] == "absent": action = "remove" todo = set(grimoires) & set(codex) else: @@ -509,7 +502,7 @@ def manage_grimoires(module): else: module.fail_json(msg="unsupported operation on '*' repository value") else: - if params['state'] in ('present', 'latest'): + if params["state"] in ("present", "latest"): if len(grimoires) > 1: module.fail_json(msg="using multiple items with repository is invalid") @@ -534,7 +527,7 @@ def manage_grimoires(module): def manage_spells(module): - """ Cast or dispel spells. + """Cast or dispel spells. This manages the whole system ('*'), list or a single spell. Command 'cast' is used to install or rebuild spells, while 'dispel' takes care of theirs @@ -543,12 +536,12 @@ def manage_spells(module): """ params = module.params - spells = params['name'] + spells = params["name"] sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install") - if spells == '*': - if params['state'] == 'latest': + if spells == "*": + if params["state"] == "latest": # back up original queue try: os.rename(sorcery_queue, f"{sorcery_queue}.backup") @@ -556,7 +549,7 @@ def manage_spells(module): module.fail_json(msg="failed to backup the update queue") # see update_codex() - module.run_command_environ_update.update(dict(SILENT='1')) + module.run_command_environ_update.update(dict(SILENT="1")) cmd_sorcery = f"{SORCERY['sorcery']} queue" @@ -589,7 +582,7 @@ def manage_spells(module): return (True, "successfully updated the system") else: return (False, "the system is already up to date") - elif params['state'] == 'rebuild': + elif params["state"] == "rebuild": if module.check_mode: return (True, "would have rebuilt the system") @@ -604,7 +597,7 @@ def manage_spells(module): else: module.fail_json(msg="unsupported operation on '*' name value") else: - if params['state'] in ('present', 'latest', 'rebuild', 'absent'): + if params["state"] in ("present", "latest", "rebuild", "absent"): # extract versions from the 'gaze' command cmd_gaze = f"{SORCERY['gaze']} -q version {' '.join(spells)}" @@ -625,9 +618,9 @@ def manage_spells(module): cast = False - if params['state'] == 'present': + if params["state"] == "present": # spell is not installed.. - if match.group('inst_ver') == '-': + if match.group("inst_ver") == "-": # ..so set up depends reqs for it match_depends(module) @@ -637,9 +630,9 @@ def manage_spells(module): # ..but does not conform depends reqs if not match_depends(module): cast = True - elif params['state'] == 'latest': + elif params["state"] == "latest": # grimoire and installed versions do not match.. - if match.group('grim_ver') != match.group('inst_ver'): + if match.group("grim_ver") != match.group("inst_ver"): # ..so check for depends reqs first and set them up match_depends(module) @@ -649,15 +642,15 @@ def manage_spells(module): # ..but the spell does not conform depends reqs if not match_depends(module): cast = True - elif params['state'] == 'rebuild': + elif params["state"] == "rebuild": cast = True # 'absent' else: - if match.group('inst_ver') != '-': - dispel_queue.append(match.group('spell')) + if match.group("inst_ver") != "-": + dispel_queue.append(match.group("spell")) if cast: - cast_queue.append(match.group('spell')) + cast_queue.append(match.group("spell")) if cast_queue: if module.check_mode: @@ -671,7 +664,7 @@ def manage_spells(module): module.fail_json(msg=f"failed to cast spell(s): {stdout}") return (True, "successfully cast spell(s)") - elif params['state'] != 'absent': + elif params["state"] != "absent": return (False, "spell(s) are already cast") if dispel_queue: @@ -693,17 +686,16 @@ def manage_spells(module): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['spell', 'grimoire'], type='list', elements='str'), - repository=dict(type='str'), - state=dict(default='present', choices=['present', 'latest', - 'absent', 'cast', 'dispelled', 'rebuild']), + name=dict(aliases=["spell", "grimoire"], type="list", elements="str"), + repository=dict(type="str"), + state=dict(default="present", choices=["present", "latest", "absent", "cast", "dispelled", "rebuild"]), depends=dict(), - update=dict(default=False, type='bool'), - update_cache=dict(default=False, aliases=['update_codex'], type='bool'), - cache_valid_time=dict(default=0, type='int') + update=dict(default=False, type="bool"), + update_cache=dict(default=False, aliases=["update_codex"], type="bool"), + cache_valid_time=dict(default=0, type="int"), ), - required_one_of=[['name', 'update', 'update_cache']], - supports_check_mode=True + required_one_of=[["name", "update", "update_cache"]], + supports_check_mode=True, ) if os.geteuid() != 0: @@ -713,34 +705,29 @@ def main(): SORCERY[c] = module.get_bin_path(c, True) # prepare environment: run sorcery commands without asking questions - module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') + module.run_command_environ_update = dict(PROMPT_DELAY="0", VOYEUR="0") params = module.params # normalize 'state' parameter - if params['state'] in ('present', 'cast'): - params['state'] = 'present' - elif params['state'] in ('absent', 'dispelled'): - params['state'] = 'absent' + if params["state"] in ("present", "cast"): + params["state"] = "present" + elif params["state"] in ("absent", "dispelled"): + params["state"] = "absent" - changed = { - 'sorcery': (False, NA), - 'grimoires': (False, NA), - 'codex': (False, NA), - 'spells': (False, NA) - } + changed = {"sorcery": (False, NA), "grimoires": (False, NA), "codex": (False, NA), "spells": (False, NA)} - if params['update']: - changed['sorcery'] = update_sorcery(module) + if params["update"]: + changed["sorcery"] = update_sorcery(module) - if params['name'] and params['repository']: - changed['grimoires'] = manage_grimoires(module) + if params["name"] and params["repository"]: + changed["grimoires"] = manage_grimoires(module) - if params['update_cache']: - changed['codex'] = update_codex(module) + if params["update_cache"]: + changed["codex"] = update_codex(module) - if params['name'] and not params['repository']: - changed['spells'] = manage_spells(module) + if params["name"] and not params["repository"]: + changed["spells"] = manage_spells(module) if any(x[0] for x in changed.values()): state_msg = "state changed" @@ -752,5 +739,5 @@ def main(): module.exit_json(changed=state_changed, msg=f"{state_msg}: {'; '.join((x[1] for x in changed.values()))}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py index 35ad7f448a0..6e8861ca34d 100644 --- a/plugins/modules/spectrum_device.py +++ b/plugins/modules/spectrum_device.py @@ -133,35 +133,32 @@ def request(resource, xml=None, method=None): - headers = { - "Content-Type": "application/xml", - "Accept": "application/xml" - } + headers = {"Content-Type": "application/xml", "Accept": "application/xml"} url = f"{module.params['oneclick_url']}/spectrum/restful/{resource}" response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) - if info['status'] == 401: + if info["status"] == 401: module.fail_json(msg="failed to authenticate to Oneclick server") - if info['status'] not in (200, 201, 204): - module.fail_json(msg=info['msg']) + if info["status"] not in (200, 201, 204): + module.fail_json(msg=info["msg"]) return response.read() def post(resource, xml=None): - return request(resource, xml=xml, method='POST') + return request(resource, xml=xml, method="POST") def delete(resource): - return request(resource, xml=None, method='DELETE') + return request(resource, xml=None, method="DELETE") def get_ip(): try: - device_ip = gethostbyname(module.params.get('device')) + device_ip = gethostbyname(module.params.get("device")) except gaierror: module.fail_json(msg=f"failed to resolve device ip address for '{module.params.get('device')}'") @@ -170,7 +167,7 @@ def get_ip(): def get_device(device_ip): """Query OneClick for the device using the IP Address""" - resource = '/models' + resource = "/models" landscape_min = f"0x{int(module.params.get('landscape'), 16):x}" landscape_max = f"0x{int(module.params.get('landscape'), 16) + 0x100000:x}" @@ -216,29 +213,26 @@ def get_device(device_ip): root = ET.fromstring(result) - if root.get('total-models') == '0': + if root.get("total-models") == "0": return None - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") # get the first device - model = root.find('ca:model-responses', namespace).find('ca:model', namespace) + model = root.find("ca:model-responses", namespace).find("ca:model", namespace) - if model.get('error'): + if model.get("error"): module.fail_json(msg=f"error checking device: {model.get('error')}") # get the attributes - model_handle = model.get('mh') + model_handle = model.get("mh") model_address = model.find('./*[@id="0x12d7f"]').text # derive the landscape handler from the model handler of the device model_landscape = f"0x{int(model_handle, 16) // 0x100000 * 0x100000:x}" - device = dict( - model_handle=model_handle, - address=model_address, - landscape=model_landscape) + device = dict(model_handle=model_handle, address=model_address, landscape=model_landscape) return device @@ -251,28 +245,25 @@ def add_device(): module.exit_json(changed=False, device=device) if module.check_mode: - device = dict( - model_handle=None, - address=device_ip, - landscape=f"0x{int(module.params.get('landscape'), 16):x}") + device = dict(model_handle=None, address=device_ip, landscape=f"0x{int(module.params.get('landscape'), 16):x}") module.exit_json(changed=True, device=device) resource = f"model?ipaddress={device_ip}&commstring={module.params.get('community')}" resource += f"&landscapeid={module.params.get('landscape')}" - if module.params.get('agentport', None): + if module.params.get("agentport", None): resource += f"&agentport={module.params.get('agentport', 161)}" result = post(resource) root = ET.fromstring(result) - if root.get('error') != 'Success': - module.fail_json(msg=root.get('error-message')) + if root.get("error") != "Success": + module.fail_json(msg=root.get("error-message")) - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - model = root.find('ca:model', namespace) + namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") + model = root.find("ca:model", namespace) - model_handle = model.get('mh') + model_handle = model.get("mh") model_landscape = f"0x{int(model_handle, 16) // 0x100000 * 0x100000:x}" device = dict( @@ -299,11 +290,11 @@ def remove_device(): root = ET.fromstring(result) - namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') - error = root.find('ca:error', namespace).text + namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") + error = root.find("ca:error", namespace).text - if error != 'Success': - error_message = root.find('ca:error-message', namespace).text + if error != "Success": + error_message = root.find("ca:error-message", namespace).text module.fail_json(msg=f"{error} {error_message}") module.exit_json(changed=True) @@ -313,26 +304,26 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - device=dict(required=True, aliases=['host', 'name']), + device=dict(required=True, aliases=["host", "name"]), landscape=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? - agentport=dict(type='int', default=161), - url=dict(required=True, aliases=['oneclick_url']), - url_username=dict(required=True, aliases=['oneclick_user']), - url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), - use_proxy=dict(type='bool', default=True), - validate_certs=dict(type='bool', default=True), + state=dict(choices=["present", "absent"], default="present"), + community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? + agentport=dict(type="int", default=161), + url=dict(required=True, aliases=["oneclick_url"]), + url_username=dict(required=True, aliases=["oneclick_user"]), + url_password=dict(required=True, no_log=True, aliases=["oneclick_password"]), + use_proxy=dict(type="bool", default=True), + validate_certs=dict(type="bool", default=True), ), - required_if=[('state', 'present', ['community'])], - supports_check_mode=True + required_if=[("state", "present", ["community"])], + supports_check_mode=True, ) - if module.params.get('state') == 'present': + if module.params.get("state") == "present": add_device() else: remove_device() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py index 1b935d8a95b..e6234f7da54 100644 --- a/plugins/modules/spectrum_model_attrs.py +++ b/plugins/modules/spectrum_model_attrs.py @@ -153,45 +153,66 @@ class spectrum_model_attrs: def __init__(self, module): self.module = module - self.url = module.params['url'] + self.url = module.params["url"] # If the user did not define a full path to the restul space in url: # params, add what we believe it to be. - if not re.search('\\/.+', self.url.split('://')[1]): + if not re.search("\\/.+", self.url.split("://")[1]): self.url = f"{self.url.rstrip('/')}/spectrum/restful" # Align these with what is defined in OneClick's UI under: # Locator -> Devices -> By Model Name -> -> # Attributes tab. - self.attr_map = dict(App_Manufacturer=hex(0x230683), - CollectionsModelNameString=hex(0x12adb), - Condition=hex(0x1000a), - Criticality=hex(0x1290c), - DeviceType=hex(0x23000e), - isManaged=hex(0x1295d), - Model_Class=hex(0x11ee8), - Model_Handle=hex(0x129fa), - Model_Name=hex(0x1006e), - Modeltype_Handle=hex(0x10001), - Modeltype_Name=hex(0x10000), - Network_Address=hex(0x12d7f), - Notes=hex(0x11564), - ServiceDesk_Asset_ID=hex(0x12db9), - TopologyModelNameString=hex(0x129e7), - sysDescr=hex(0x10052), - sysName=hex(0x10b5b), - Vendor_Name=hex(0x11570), - Description=hex(0x230017)) + self.attr_map = dict( + App_Manufacturer=hex(0x230683), + CollectionsModelNameString=hex(0x12ADB), + Condition=hex(0x1000A), + Criticality=hex(0x1290C), + DeviceType=hex(0x23000E), + isManaged=hex(0x1295D), + Model_Class=hex(0x11EE8), + Model_Handle=hex(0x129FA), + Model_Name=hex(0x1006E), + Modeltype_Handle=hex(0x10001), + Modeltype_Name=hex(0x10000), + Network_Address=hex(0x12D7F), + Notes=hex(0x11564), + ServiceDesk_Asset_ID=hex(0x12DB9), + TopologyModelNameString=hex(0x129E7), + sysDescr=hex(0x10052), + sysName=hex(0x10B5B), + Vendor_Name=hex(0x11570), + Description=hex(0x230017), + ) self.search_qualifiers = [ - "and", "or", "not", "greater-than", "greater-than-or-equals", - "less-than", "less-than-or-equals", "equals", "equals-ignore-case", - "does-not-equal", "does-not-equal-ignore-case", "has-prefix", - "does-not-have-prefix", "has-prefix-ignore-case", - "does-not-have-prefix-ignore-case", "has-substring", - "does-not-have-substring", "has-substring-ignore-case", - "does-not-have-substring-ignore-case", "has-suffix", - "does-not-have-suffix", "has-suffix-ignore-case", - "does-not-have-suffix-ignore-case", "has-pcre", - "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", - "is-derived-from", "not-is-derived-from"] + "and", + "or", + "not", + "greater-than", + "greater-than-or-equals", + "less-than", + "less-than-or-equals", + "equals", + "equals-ignore-case", + "does-not-equal", + "does-not-equal-ignore-case", + "has-prefix", + "does-not-have-prefix", + "has-prefix-ignore-case", + "does-not-have-prefix-ignore-case", + "has-substring", + "does-not-have-substring", + "has-substring-ignore-case", + "does-not-have-substring-ignore-case", + "has-suffix", + "does-not-have-suffix", + "has-suffix-ignore-case", + "does-not-have-suffix-ignore-case", + "has-pcre", + "has-pcre-ignore-case", + "has-wildcard", + "has-wildcard-ignore-case", + "is-derived-from", + "not-is-derived-from", + ] self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") @@ -266,23 +287,26 @@ def update_model(self, model_handle, attrs): # None values should be converted to empty strings val = "" val = self.urlencode(str(val)) - if not update_url.endswith('?'): + if not update_url.endswith("?"): update_url += "&" update_url += f"attr={self.attr_id(name) or name}&val={val}" # POST to /model to update the attributes, or fail. - resp, info = fetch_url(self.module, update_url, method="PUT", - headers={"Content-Type": "application/json", - "Accept": "application/json"}, - use_proxy=self.module.params['use_proxy']) + resp, info = fetch_url( + self.module, + update_url, + method="PUT", + headers={"Content-Type": "application/json", "Accept": "application/json"}, + use_proxy=self.module.params["use_proxy"], + ) status_code = info["status"] if status_code >= 400: - body = info['body'] + body = info["body"] else: body = "" if resp is None else resp.read() if status_code != 200: - self.result['msg'] = f"HTTP PUT error {status_code}: {update_url}: {body}" + self.result["msg"] = f"HTTP PUT error {status_code}: {update_url}: {body}" self.module.fail_json(**self.result) # Load and parse the JSON response and either fail or set results. @@ -293,17 +317,17 @@ def update_model(self, model_handle, attrs): Example failure response: {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} """ # noqa - model_resp = json_resp['model-update-response-list']['model-responses']['model'] - if model_resp['@error'] != "Success": + model_resp = json_resp["model-update-response-list"]["model-responses"]["model"] + if model_resp["@error"] != "Success": # I'm not 100% confident on the expected failure structure so just # dump all of ['attribute']. - self.result['msg'] = str(model_resp['attribute']) + self.result["msg"] = str(model_resp["attribute"]) self.module.fail_json(**self.result) # Should be OK if we get to here, set results. - self.result['msg'] = self.success_msg - self.result['changed_attrs'].update(attrs) - self.result['changed'] = True + self.result["msg"] = self.success_msg + self.result["changed_attrs"].update(attrs) + self.result["changed"] = True def find_model(self, search_criteria, ret_attrs=None): """ @@ -319,7 +343,7 @@ def find_model(self, search_criteria, ret_attrs=None): # If no return attributes were asked for, return Model_Handle. if ret_attrs is None: - ret_attrs = ['Model_Handle'] + ret_attrs = ["Model_Handle"] # Set the XML > tags. If no hex ID # is found for the name, assume it is already in hex. {name: hex ID} @@ -347,44 +371,52 @@ def find_model(self, search_criteria, ret_attrs=None): # POST to /models and fail on errors. url = self.build_url("/models") - resp, info = fetch_url(self.module, url, data=xml, method="POST", - use_proxy=self.module.params['use_proxy'], - headers={"Content-Type": "application/xml", - "Accept": "application/xml"}) + resp, info = fetch_url( + self.module, + url, + data=xml, + method="POST", + use_proxy=self.module.params["use_proxy"], + headers={"Content-Type": "application/xml", "Accept": "application/xml"}, + ) status_code = info["status"] if status_code >= 400: - body = info['body'] + body = info["body"] else: body = "" if resp is None else resp.read() if status_code != 200: - self.result['msg'] = f"HTTP POST error {status_code}: {url}: {body}" + self.result["msg"] = f"HTTP POST error {status_code}: {url}: {body}" self.module.fail_json(**self.result) # Parse through the XML response and fail on any detected errors. root = ET.fromstring(body) - total_models = int(root.attrib['total-models']) - error = root.attrib['error'] - model_responses = root.find('ca:model-responses', self.resp_namespace) + total_models = int(root.attrib["total-models"]) + error = root.attrib["error"] + model_responses = root.find("ca:model-responses", self.resp_namespace) if total_models < 1: - self.result['msg'] = f"No models found matching search criteria `{search_criteria}'" + self.result["msg"] = f"No models found matching search criteria `{search_criteria}'" self.module.fail_json(**self.result) elif total_models > 1: - self.result['msg'] = f"More than one model found ({total_models}): `{ET.tostring(model_responses, encoding='unicode')}'" + self.result["msg"] = ( + f"More than one model found ({total_models}): `{ET.tostring(model_responses, encoding='unicode')}'" + ) self.module.fail_json(**self.result) if error != "EndOfResults": - self.result['msg'] = f"Unexpected search response `{error}': {ET.tostring(model_responses, encoding='unicode')}" + self.result["msg"] = ( + f"Unexpected search response `{error}': {ET.tostring(model_responses, encoding='unicode')}" + ) self.module.fail_json(**self.result) - model = model_responses.find('ca:model', self.resp_namespace) - attrs = model.findall('ca:attribute', self.resp_namespace) + model = model_responses.find("ca:model", self.resp_namespace) + attrs = model.findall("ca:attribute", self.resp_namespace) if not attrs: - self.result['msg'] = "No attributes returned." + self.result["msg"] = "No attributes returned." self.module.fail_json(**self.result) # XML response should be successful. Iterate and set each returned # attribute ID/name and value for return. ret = dict() for attr in attrs: - attr_id = attr.get('id') + attr_id = attr.get("id") attr_name = self.attr_name(attr_id) # Note: all values except empty strings (None) are strings only! attr_val = attr.text @@ -410,7 +442,7 @@ def find_model_by_name_type(self, mname, mtype, ret_attrs=None): # If no return attributes were asked for, return Model_Handle. if ret_attrs is None: - ret_attrs = ['Model_Handle'] + ret_attrs = ["Model_Handle"] """This is basically as follows: @@ -428,58 +460,51 @@ def find_model_by_name_type(self, mname, mtype, ret_attrs=None): """ # Parent filter tag - filtered_models = ET.Element('filtered-models') + filtered_models = ET.Element("filtered-models") # Logically and - _and = ET.SubElement(filtered_models, 'and') + _and = ET.SubElement(filtered_models, "and") # Model Name - MN_equals = ET.SubElement(_and, 'equals') - Model_Name = ET.SubElement(MN_equals, 'attribute', - {'id': self.attr_map['Model_Name']}) - MN_value = ET.SubElement(Model_Name, 'value') + MN_equals = ET.SubElement(_and, "equals") + Model_Name = ET.SubElement(MN_equals, "attribute", {"id": self.attr_map["Model_Name"]}) + MN_value = ET.SubElement(Model_Name, "value") MN_value.text = mname # Model Type Name - MTN_equals = ET.SubElement(_and, 'equals') - Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', - {'id': self.attr_map['Modeltype_Name']}) - MTN_value = ET.SubElement(Modeltype_Name, 'value') + MTN_equals = ET.SubElement(_and, "equals") + Modeltype_Name = ET.SubElement(MTN_equals, "attribute", {"id": self.attr_map["Modeltype_Name"]}) + MTN_value = ET.SubElement(Modeltype_Name, "value") MTN_value.text = mtype - return self.find_model(ET.tostring(filtered_models, - encoding='unicode'), - ret_attrs) + return self.find_model(ET.tostring(filtered_models, encoding="unicode"), ret_attrs) def ensure_model_attrs(self): - # Get a list of all requested attribute names/IDs plus Model_Handle and # use them to query the values currently set. Store finding in a # dictionary. req_attrs = [] - for attr in self.module.params['attributes']: - req_attrs.append(attr['name']) - if 'Model_Handle' not in req_attrs: - req_attrs.append('Model_Handle') + for attr in self.module.params["attributes"]: + req_attrs.append(attr["name"]) + if "Model_Handle" not in req_attrs: + req_attrs.append("Model_Handle") # Survey attributes currently set and store in a dict. - cur_attrs = self.find_model_by_name_type(self.module.params['name'], - self.module.params['type'], - req_attrs) + cur_attrs = self.find_model_by_name_type(self.module.params["name"], self.module.params["type"], req_attrs) # Iterate through the requested attributes names/IDs values pair and # compare with those currently set. If different, attempt to change. Model_Handle = cur_attrs.pop("Model_Handle") - for attr in self.module.params['attributes']: - req_name = attr['name'] - req_val = attr['value'] + for attr in self.module.params["attributes"]: + req_name = attr["name"] + req_val = attr["value"] if req_val == "": # The API will return None on empty string req_val = None if cur_attrs[req_name] != req_val: if self.module.check_mode: - self.result['changed_attrs'][req_name] = req_val - self.result['msg'] = self.success_msg - self.result['changed'] = True + self.result["changed_attrs"][req_name] = req_val + self.result["msg"] = self.success_msg + self.result["changed"] = True continue resp = self.update_model(Model_Handle, {req_name: req_val}) @@ -488,21 +513,19 @@ def ensure_model_attrs(self): def run_module(): argument_spec = dict( - url=dict(type='str', required=True), - url_username=dict(type='str', required=True, aliases=['username']), - url_password=dict(type='str', required=True, aliases=['password'], - no_log=True), - validate_certs=dict(type='bool', default=True), - use_proxy=dict(type='bool', default=True), - name=dict(type='str', required=True), - type=dict(type='str', required=True), - attributes=dict(type='list', - required=True, - elements='dict', - options=dict( - name=dict(type='str', required=True), - value=dict(type='str', required=True) - )), + url=dict(type="str", required=True), + url_username=dict(type="str", required=True, aliases=["username"]), + url_password=dict(type="str", required=True, aliases=["password"], no_log=True), + validate_certs=dict(type="bool", default=True), + use_proxy=dict(type="bool", default=True), + name=dict(type="str", required=True), + type=dict(type="str", required=True), + attributes=dict( + type="list", + required=True, + elements="dict", + options=dict(name=dict(type="str", required=True), value=dict(type="str", required=True)), + ), ) module = AnsibleModule( supports_check_mode=True, diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py index 36363165671..3b4e5b0918a 100644 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -726,163 +726,150 @@ except ImportError: pass -eni_fields = ('description', - 'device_index', - 'secondary_private_ip_address_count', - 'associate_public_ip_address', - 'delete_on_termination', - 'groups', - 'network_interface_id', - 'private_ip_address', - 'subnet_id', - 'associate_ipv6_address') - -private_ip_fields = ('private_ip_address', - 'primary') - -capacity_fields = (dict(ansible_field_name='min_size', - spotinst_field_name='minimum'), - dict(ansible_field_name='max_size', - spotinst_field_name='maximum'), - 'target', - 'unit') - -lspec_fields = ('user_data', - 'key_pair', - 'tenancy', - 'shutdown_script', - 'monitoring', - 'ebs_optimized', - 'image_id', - 'health_check_type', - 'health_check_grace_period', - 'health_check_unhealthy_duration_before_replacement', - 'security_group_ids') - -iam_fields = (dict(ansible_field_name='iam_role_name', - spotinst_field_name='name'), - dict(ansible_field_name='iam_role_arn', - spotinst_field_name='arn')) - -scheduled_task_fields = ('adjustment', - 'adjustment_percentage', - 'batch_size_percentage', - 'cron_expression', - 'frequency', - 'grace_period', - 'task_type', - 'is_enabled', - 'scale_target_capacity', - 'scale_min_capacity', - 'scale_max_capacity') - -scaling_policy_fields = ('policy_name', - 'namespace', - 'metric_name', - 'dimensions', - 'statistic', - 'evaluation_periods', - 'period', - 'threshold', - 'cooldown', - 'unit', - 'operator') - -tracking_policy_fields = ('policy_name', - 'namespace', - 'source', - 'metric_name', - 'statistic', - 'unit', - 'cooldown', - 'target', - 'threshold') - -action_fields = (dict(ansible_field_name='action_type', - spotinst_field_name='type'), - 'adjustment', - 'min_target_capacity', - 'max_target_capacity', - 'target', - 'minimum', - 'maximum') - -signal_fields = ('name', - 'timeout') - -multai_lb_fields = ('balancer_id', - 'project_id', - 'target_set_id', - 'az_awareness', - 'auto_weight') - -persistence_fields = ('should_persist_root_device', - 'should_persist_block_devices', - 'should_persist_private_ip') - -strategy_fields = ('risk', - 'utilize_reserved_instances', - 'fallback_to_od', - 'on_demand_count', - 'availability_vs_cost', - 'draining_timeout', - 'spin_up_time', - 'lifetime_period') - -ebs_fields = ('delete_on_termination', - 'encrypted', - 'iops', - 'snapshot_id', - 'volume_type', - 'volume_size') - -bdm_fields = ('device_name', - 'virtual_name', - 'no_device') - -kubernetes_fields = ('api_server', - 'token') - -right_scale_fields = ('account_id', - 'refresh_token') - -rancher_fields = ('access_key', - 'secret_key', - 'master_host', - 'version') - -chef_fields = ('chef_server', - 'organization', - 'user', - 'pem_key', - 'chef_version') - -az_fields = ('name', - 'subnet_id', - 'placement_group_name') - -opsworks_fields = ('layer_id',) - -scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) - -mesosphere_fields = ('api_server',) - -ecs_fields = ('cluster_name',) - -multai_fields = ('multai_token',) +eni_fields = ( + "description", + "device_index", + "secondary_private_ip_address_count", + "associate_public_ip_address", + "delete_on_termination", + "groups", + "network_interface_id", + "private_ip_address", + "subnet_id", + "associate_ipv6_address", +) + +private_ip_fields = ("private_ip_address", "primary") + +capacity_fields = ( + dict(ansible_field_name="min_size", spotinst_field_name="minimum"), + dict(ansible_field_name="max_size", spotinst_field_name="maximum"), + "target", + "unit", +) + +lspec_fields = ( + "user_data", + "key_pair", + "tenancy", + "shutdown_script", + "monitoring", + "ebs_optimized", + "image_id", + "health_check_type", + "health_check_grace_period", + "health_check_unhealthy_duration_before_replacement", + "security_group_ids", +) + +iam_fields = ( + dict(ansible_field_name="iam_role_name", spotinst_field_name="name"), + dict(ansible_field_name="iam_role_arn", spotinst_field_name="arn"), +) + +scheduled_task_fields = ( + "adjustment", + "adjustment_percentage", + "batch_size_percentage", + "cron_expression", + "frequency", + "grace_period", + "task_type", + "is_enabled", + "scale_target_capacity", + "scale_min_capacity", + "scale_max_capacity", +) + +scaling_policy_fields = ( + "policy_name", + "namespace", + "metric_name", + "dimensions", + "statistic", + "evaluation_periods", + "period", + "threshold", + "cooldown", + "unit", + "operator", +) + +tracking_policy_fields = ( + "policy_name", + "namespace", + "source", + "metric_name", + "statistic", + "unit", + "cooldown", + "target", + "threshold", +) + +action_fields = ( + dict(ansible_field_name="action_type", spotinst_field_name="type"), + "adjustment", + "min_target_capacity", + "max_target_capacity", + "target", + "minimum", + "maximum", +) + +signal_fields = ("name", "timeout") + +multai_lb_fields = ("balancer_id", "project_id", "target_set_id", "az_awareness", "auto_weight") + +persistence_fields = ("should_persist_root_device", "should_persist_block_devices", "should_persist_private_ip") + +strategy_fields = ( + "risk", + "utilize_reserved_instances", + "fallback_to_od", + "on_demand_count", + "availability_vs_cost", + "draining_timeout", + "spin_up_time", + "lifetime_period", +) + +ebs_fields = ("delete_on_termination", "encrypted", "iops", "snapshot_id", "volume_type", "volume_size") + +bdm_fields = ("device_name", "virtual_name", "no_device") + +kubernetes_fields = ("api_server", "token") + +right_scale_fields = ("account_id", "refresh_token") + +rancher_fields = ("access_key", "secret_key", "master_host", "version") + +chef_fields = ("chef_server", "organization", "user", "pem_key", "chef_version") + +az_fields = ("name", "subnet_id", "placement_group_name") + +opsworks_fields = ("layer_id",) + +scaling_strategy_fields = ("terminate_at_end_of_billing_hour",) + +mesosphere_fields = ("api_server",) + +ecs_fields = ("cluster_name",) + +multai_fields = ("multai_token",) def handle_elastigroup(client, module): has_changed = False group_id = None - message = 'None' + message = "None" - name = module.params.get('name') - state = module.params.get('state') - uniqueness_by = module.params.get('uniqueness_by') - external_group_id = module.params.get('id') + name = module.params.get("name") + state = module.params.get("state") + uniqueness_by = module.params.get("uniqueness_by") + external_group_id = module.params.get("id") - if uniqueness_by == 'id': + if uniqueness_by == "id": if external_group_id is None: should_create = True else: @@ -893,40 +880,40 @@ def handle_elastigroup(client, module): should_create, group_id = find_group_with_same_name(groups, name) if should_create is True: - if state == 'present': + if state == "present": eg = expand_elastigroup(module, is_update=False) module.debug(f" [INFO] {message}\n") group = client.create_elastigroup(group=eg) - group_id = group['id'] - message = 'Created group Successfully.' + group_id = group["id"] + message = "Created group Successfully." has_changed = True - elif state == 'absent': - message = 'Cannot delete non-existent group.' + elif state == "absent": + message = "Cannot delete non-existent group." has_changed = False else: eg = expand_elastigroup(module, is_update=True) - if state == 'present': + if state == "present": group = client.update_elastigroup(group_update=eg, group_id=group_id) - message = 'Updated group successfully.' + message = "Updated group successfully." try: - roll_config = module.params.get('roll_config') + roll_config = module.params.get("roll_config") if roll_config: eg_roll = spotinst.aws_elastigroup.Roll( - batch_size_percentage=roll_config.get('batch_size_percentage'), - grace_period=roll_config.get('grace_period'), - health_check_type=roll_config.get('health_check_type') + batch_size_percentage=roll_config.get("batch_size_percentage"), + grace_period=roll_config.get("grace_period"), + health_check_type=roll_config.get("health_check_type"), ) roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) - message = 'Updated and started rolling the group successfully.' + message = "Updated and started rolling the group successfully." except SpotinstClientException as exc: message = f"Updated group successfully, but failed to perform roll. Error:{exc}" has_changed = True - elif state == 'absent': + elif state == "absent": try: client.delete_elastigroup(group_id=group_id) except SpotinstClientException as exc: @@ -935,28 +922,27 @@ def handle_elastigroup(client, module): else: module.fail_json(msg=f"Error while attempting to delete group : {exc.message}") - message = 'Deleted group successfully.' + message = "Deleted group successfully." has_changed = True return group_id, message, has_changed def retrieve_group_instances(client, module, group_id): - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') + wait_timeout = module.params.get("wait_timeout") + wait_for_instances = module.params.get("wait_for_instances") - health_check_type = module.params.get('health_check_type') + health_check_type = module.params.get("health_check_type") if wait_timeout is None: wait_timeout = 300 wait_timeout = time.time() + wait_timeout - target = module.params.get('target') - state = module.params.get('state') + target = module.params.get("target") + state = module.params.get("state") instances = list() - if state == 'present' and group_id is not None and wait_for_instances is True: - + if state == "present" and group_id is not None and wait_for_instances is True: is_amount_fulfilled = False while is_amount_fulfilled is False and wait_timeout > time.time(): instances = list() @@ -966,7 +952,7 @@ def retrieve_group_instances(client, module, group_id): healthy_instances = client.get_instance_healthiness(group_id=group_id) for healthy_instance in healthy_instances: - if healthy_instance.get('healthStatus') == 'HEALTHY': + if healthy_instance.get("healthStatus") == "HEALTHY": amount_of_fulfilled_instances += 1 instances.append(healthy_instance) @@ -974,7 +960,7 @@ def retrieve_group_instances(client, module, group_id): active_instances = client.get_elastigroup_active_instances(group_id=group_id) for active_instance in active_instances: - if active_instance.get('private_ip') is not None: + if active_instance.get("private_ip") is not None: amount_of_fulfilled_instances += 1 instances.append(active_instance) @@ -988,18 +974,18 @@ def retrieve_group_instances(client, module, group_id): def find_group_with_same_name(groups, name): for group in groups: - if group['name'] == name: - return False, group.get('id') + if group["name"] == name: + return False, group.get("id") return True, None def expand_elastigroup(module, is_update): - do_not_update = module.params['do_not_update'] - name = module.params.get('name') + do_not_update = module.params["do_not_update"] + name = module.params.get("name") eg = spotinst.aws_elastigroup.Elastigroup() - description = module.params.get('description') + description = module.params.get("description") if name is not None: eg.name = name @@ -1025,12 +1011,12 @@ def expand_elastigroup(module, is_update): def expand_compute(eg, module, is_update, do_not_update): - elastic_ips = module.params['elastic_ips'] - on_demand_instance_type = module.params.get('on_demand_instance_type') - spot_instance_types = module.params['spot_instance_types'] - ebs_volume_pool = module.params['ebs_volume_pool'] - availability_zones_list = module.params['availability_zones'] - product = module.params.get('product') + elastic_ips = module.params["elastic_ips"] + on_demand_instance_type = module.params.get("on_demand_instance_type") + spot_instance_types = module.params["spot_instance_types"] + ebs_volume_pool = module.params["ebs_volume_pool"] + availability_zones_list = module.params["availability_zones"] + product = module.params.get("product") eg_compute = spotinst.aws_elastigroup.Compute() @@ -1055,7 +1041,7 @@ def expand_compute(eg, module, is_update, do_not_update): expand_ebs_volume_pool(eg_compute, ebs_volume_pool) - eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') + eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, "AvailabilityZone") expand_launch_spec(eg_compute, module, is_update, do_not_update) @@ -1069,10 +1055,10 @@ def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): for volume in ebs_volumes_list: eg_volume = spotinst.aws_elastigroup.EbsVolume() - if volume.get('device_name') is not None: - eg_volume.device_name = volume.get('device_name') - if volume.get('volume_ids') is not None: - eg_volume.volume_ids = volume.get('volume_ids') + if volume.get("device_name") is not None: + eg_volume.device_name = volume.get("device_name") + if volume.get("volume_ids") is not None: + eg_volume.volume_ids = volume.get("volume_ids") if eg_volume.device_name is not None: eg_volumes.append(eg_volume) @@ -1082,20 +1068,20 @@ def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): def expand_launch_spec(eg_compute, module, is_update, do_not_update): - eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') + eg_launch_spec = expand_fields(lspec_fields, module.params, "LaunchSpecification") - if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: - eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') + if module.params["iam_role_arn"] is not None or module.params["iam_role_name"] is not None: + eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, "IamRole") - tags = module.params['tags'] - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - block_device_mappings = module.params['block_device_mappings'] - network_interfaces = module.params['network_interfaces'] + tags = module.params["tags"] + load_balancers = module.params["load_balancers"] + target_group_arns = module.params["target_group_arns"] + block_device_mappings = module.params["block_device_mappings"] + network_interfaces = module.params["network_interfaces"] if is_update is True: - if 'image_id' in do_not_update: - delattr(eg_launch_spec, 'image_id') + if "image_id" in do_not_update: + delattr(eg_launch_spec, "image_id") expand_tags(eg_launch_spec, tags) @@ -1109,44 +1095,44 @@ def expand_launch_spec(eg_compute, module, is_update, do_not_update): def expand_integrations(eg, module): - rancher = module.params.get('rancher') - mesosphere = module.params.get('mesosphere') - ecs = module.params.get('ecs') - kubernetes = module.params.get('kubernetes') - right_scale = module.params.get('right_scale') - opsworks = module.params.get('opsworks') - chef = module.params.get('chef') + rancher = module.params.get("rancher") + mesosphere = module.params.get("mesosphere") + ecs = module.params.get("ecs") + kubernetes = module.params.get("kubernetes") + right_scale = module.params.get("right_scale") + opsworks = module.params.get("opsworks") + chef = module.params.get("chef") integration_exists = False eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() if mesosphere is not None: - eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') + eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, "Mesosphere") integration_exists = True if ecs is not None: - eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') + eg_integrations.ecs = expand_fields(ecs_fields, ecs, "EcsConfiguration") integration_exists = True if kubernetes is not None: - eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') + eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, "KubernetesConfiguration") integration_exists = True if right_scale is not None: - eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') + eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, "RightScaleConfiguration") integration_exists = True if opsworks is not None: - eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') + eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, "OpsWorksConfiguration") integration_exists = True if rancher is not None: - eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') + eg_integrations.rancher = expand_fields(rancher_fields, rancher, "Rancher") integration_exists = True if chef is not None: - eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') + eg_integrations.chef = expand_fields(chef_fields, chef, "ChefConfiguration") integration_exists = True if integration_exists: @@ -1154,34 +1140,33 @@ def expand_integrations(eg, module): def expand_capacity(eg, module, is_update, do_not_update): - eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') + eg_capacity = expand_fields(capacity_fields, module.params, "Capacity") if is_update is True: - delattr(eg_capacity, 'unit') + delattr(eg_capacity, "unit") - if 'target' in do_not_update: - delattr(eg_capacity, 'target') + if "target" in do_not_update: + delattr(eg_capacity, "target") eg.capacity = eg_capacity def expand_strategy(eg, module): - persistence = module.params.get('persistence') - signals = module.params.get('signals') + persistence = module.params.get("persistence") + signals = module.params.get("signals") - eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') + eg_strategy = expand_fields(strategy_fields, module.params, "Strategy") - terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') + terminate_at_end_of_billing_hour = module.params.get("terminate_at_end_of_billing_hour") if terminate_at_end_of_billing_hour is not None: - eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, - module.params, 'ScalingStrategy') + eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, module.params, "ScalingStrategy") if persistence is not None: - eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') + eg_strategy.persistence = expand_fields(persistence_fields, persistence, "Persistence") if signals is not None: - eg_signals = expand_list(signals, signal_fields, 'Signal') + eg_signals = expand_list(signals, signal_fields, "Signal") if len(eg_signals) > 0: eg_strategy.signals = eg_signals @@ -1190,12 +1175,12 @@ def expand_strategy(eg, module): def expand_multai(eg, module): - multai_load_balancers = module.params.get('multai_load_balancers') + multai_load_balancers = module.params.get("multai_load_balancers") - eg_multai = expand_fields(multai_fields, module.params, 'Multai') + eg_multai = expand_fields(multai_fields, module.params, "Multai") if multai_load_balancers is not None: - eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') + eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, "MultaiLoadBalancer") if len(eg_multai_load_balancers) > 0: eg_multai.balancers = eg_multai_load_balancers @@ -1203,12 +1188,12 @@ def expand_multai(eg, module): def expand_scheduled_tasks(eg, module): - scheduled_tasks = module.params.get('scheduled_tasks') + scheduled_tasks = module.params.get("scheduled_tasks") if scheduled_tasks is not None: eg_scheduling = spotinst.aws_elastigroup.Scheduling() - eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') + eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, "ScheduledTask") if len(eg_tasks) > 0: eg_scheduling.tasks = eg_tasks @@ -1225,7 +1210,7 @@ def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): eg_elb = spotinst.aws_elastigroup.LoadBalancer() if elb_name is not None: eg_elb.name = elb_name - eg_elb.type = 'CLASSIC' + eg_elb.type = "CLASSIC" eg_total_lbs.append(eg_elb) if target_group_arns is not None: @@ -1233,7 +1218,7 @@ def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): eg_elb = spotinst.aws_elastigroup.LoadBalancer() if target_arn is not None: eg_elb.arn = target_arn - eg_elb.type = 'TARGET_GROUP' + eg_elb.type = "TARGET_GROUP" eg_total_lbs.append(eg_elb) if len(eg_total_lbs) > 0: @@ -1261,10 +1246,10 @@ def expand_block_device_mappings(eg_launchspec, bdms): eg_bdms = [] for bdm in bdms: - eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') + eg_bdm = expand_fields(bdm_fields, bdm, "BlockDeviceMapping") - if bdm.get('ebs') is not None: - eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') + if bdm.get("ebs") is not None: + eg_bdm.ebs = expand_fields(ebs_fields, bdm.get("ebs"), "EBS") eg_bdms.append(eg_bdm) @@ -1277,9 +1262,9 @@ def expand_network_interfaces(eg_launchspec, enis): eg_enis = [] for eni in enis: - eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') + eg_eni = expand_fields(eni_fields, eni, "NetworkInterface") - eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') + eg_pias = expand_list(eni.get("private_ip_addresses"), private_ip_fields, "PrivateIpAddress") if eg_pias is not None: eg_eni.private_ip_addresses = eg_pias @@ -1291,9 +1276,9 @@ def expand_network_interfaces(eg_launchspec, enis): def expand_scaling(eg, module): - up_scaling_policies = module.params['up_scaling_policies'] - down_scaling_policies = module.params['down_scaling_policies'] - target_tracking_policies = module.params['target_tracking_policies'] + up_scaling_policies = module.params["up_scaling_policies"] + down_scaling_policies = module.params["down_scaling_policies"] + target_tracking_policies = module.params["target_tracking_policies"] eg_scaling = spotinst.aws_elastigroup.Scaling() @@ -1334,8 +1319,8 @@ def expand_fields(fields, item, class_name): if item is not None: for field in fields: if isinstance(field, dict): - ansible_field_name = field['ansible_field_name'] - spotinst_field_name = field['spotinst_field_name'] + ansible_field_name = field["ansible_field_name"] + spotinst_field_name = field["spotinst_field_name"] else: ansible_field_name = field spotinst_field_name = field @@ -1349,8 +1334,8 @@ def expand_scaling_policies(scaling_policies): eg_scaling_policies = [] for policy in scaling_policies: - eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') - eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') + eg_policy = expand_fields(scaling_policy_fields, policy, "ScalingPolicy") + eg_policy.action = expand_fields(action_fields, policy, "ScalingPolicyAction") eg_scaling_policies.append(eg_policy) return eg_scaling_policies @@ -1360,7 +1345,7 @@ def expand_target_tracking_policies(tracking_policies): eg_tracking_policies = [] for policy in tracking_policies: - eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') + eg_policy = expand_fields(tracking_policy_fields, policy, "TargetTrackingPolicy") eg_tracking_policies.append(eg_policy) return eg_tracking_policies @@ -1368,70 +1353,70 @@ def expand_target_tracking_policies(tracking_policies): def main(): fields = dict( - account_id=dict(type='str'), - availability_vs_cost=dict(type='str', required=True), - availability_zones=dict(type='list', elements='dict', required=True), - block_device_mappings=dict(type='list', elements='dict'), - chef=dict(type='dict'), - credentials_path=dict(type='path', default="~/.spotinst/credentials"), - do_not_update=dict(default=[], type='list', elements='str'), - down_scaling_policies=dict(type='list', elements='dict'), - draining_timeout=dict(type='int'), - ebs_optimized=dict(type='bool'), - ebs_volume_pool=dict(type='list', elements='dict'), - ecs=dict(type='dict'), - elastic_beanstalk=dict(type='dict'), - elastic_ips=dict(type='list', elements='str'), - fallback_to_od=dict(type='bool'), - id=dict(type='str'), - health_check_grace_period=dict(type='int'), - health_check_type=dict(type='str'), - health_check_unhealthy_duration_before_replacement=dict(type='int'), - iam_role_arn=dict(type='str'), - iam_role_name=dict(type='str'), - image_id=dict(type='str', required=True), - key_pair=dict(type='str', no_log=False), - kubernetes=dict(type='dict'), - lifetime_period=dict(type='int'), - load_balancers=dict(type='list', elements='str'), - max_size=dict(type='int', required=True), - mesosphere=dict(type='dict'), - min_size=dict(type='int', required=True), - monitoring=dict(type='str'), - multai_load_balancers=dict(type='list', elements='dict'), - multai_token=dict(type='str', no_log=True), - name=dict(type='str', required=True), - network_interfaces=dict(type='list', elements='dict'), - on_demand_count=dict(type='int'), - on_demand_instance_type=dict(type='str'), - opsworks=dict(type='dict'), - persistence=dict(type='dict'), - product=dict(type='str', required=True), - rancher=dict(type='dict'), - right_scale=dict(type='dict'), - risk=dict(type='int'), - roll_config=dict(type='dict'), - scheduled_tasks=dict(type='list', elements='dict'), - security_group_ids=dict(type='list', elements='str', required=True), - shutdown_script=dict(type='str'), - signals=dict(type='list', elements='dict'), - spin_up_time=dict(type='int'), - spot_instance_types=dict(type='list', elements='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', elements='dict'), - target=dict(type='int', required=True), - target_group_arns=dict(type='list', elements='str'), - tenancy=dict(type='str'), - terminate_at_end_of_billing_hour=dict(type='bool'), - token=dict(type='str', no_log=True), - unit=dict(type='str'), - user_data=dict(type='str'), - utilize_reserved_instances=dict(type='bool'), - uniqueness_by=dict(default='name', choices=['name', 'id']), - up_scaling_policies=dict(type='list', elements='dict'), - target_tracking_policies=dict(type='list', elements='dict'), - wait_for_instances=dict(type='bool', default=False), - wait_timeout=dict(type='int') + account_id=dict(type="str"), + availability_vs_cost=dict(type="str", required=True), + availability_zones=dict(type="list", elements="dict", required=True), + block_device_mappings=dict(type="list", elements="dict"), + chef=dict(type="dict"), + credentials_path=dict(type="path", default="~/.spotinst/credentials"), + do_not_update=dict(default=[], type="list", elements="str"), + down_scaling_policies=dict(type="list", elements="dict"), + draining_timeout=dict(type="int"), + ebs_optimized=dict(type="bool"), + ebs_volume_pool=dict(type="list", elements="dict"), + ecs=dict(type="dict"), + elastic_beanstalk=dict(type="dict"), + elastic_ips=dict(type="list", elements="str"), + fallback_to_od=dict(type="bool"), + id=dict(type="str"), + health_check_grace_period=dict(type="int"), + health_check_type=dict(type="str"), + health_check_unhealthy_duration_before_replacement=dict(type="int"), + iam_role_arn=dict(type="str"), + iam_role_name=dict(type="str"), + image_id=dict(type="str", required=True), + key_pair=dict(type="str", no_log=False), + kubernetes=dict(type="dict"), + lifetime_period=dict(type="int"), + load_balancers=dict(type="list", elements="str"), + max_size=dict(type="int", required=True), + mesosphere=dict(type="dict"), + min_size=dict(type="int", required=True), + monitoring=dict(type="str"), + multai_load_balancers=dict(type="list", elements="dict"), + multai_token=dict(type="str", no_log=True), + name=dict(type="str", required=True), + network_interfaces=dict(type="list", elements="dict"), + on_demand_count=dict(type="int"), + on_demand_instance_type=dict(type="str"), + opsworks=dict(type="dict"), + persistence=dict(type="dict"), + product=dict(type="str", required=True), + rancher=dict(type="dict"), + right_scale=dict(type="dict"), + risk=dict(type="int"), + roll_config=dict(type="dict"), + scheduled_tasks=dict(type="list", elements="dict"), + security_group_ids=dict(type="list", elements="str", required=True), + shutdown_script=dict(type="str"), + signals=dict(type="list", elements="dict"), + spin_up_time=dict(type="int"), + spot_instance_types=dict(type="list", elements="str", required=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="list", elements="dict"), + target=dict(type="int", required=True), + target_group_arns=dict(type="list", elements="str"), + tenancy=dict(type="str"), + terminate_at_end_of_billing_hour=dict(type="bool"), + token=dict(type="str", no_log=True), + unit=dict(type="str"), + user_data=dict(type="str"), + utilize_reserved_instances=dict(type="bool"), + uniqueness_by=dict(default="name", choices=["name", "id"]), + up_scaling_policies=dict(type="list", elements="dict"), + target_tracking_policies=dict(type="list", elements="dict"), + wait_for_instances=dict(type="bool", default=False), + wait_timeout=dict(type="int"), ) module = AnsibleModule(argument_spec=fields) @@ -1442,28 +1427,28 @@ def main(): # Retrieve creds file variables creds_file_loaded_vars = dict() - credentials_path = module.params.get('credentials_path') + credentials_path = module.params.get("credentials_path") try: with open(credentials_path, "r") as creds: for line in creds: - eq_index = line.find('=') + eq_index = line.find("=") var_name = line[:eq_index].strip() - string_value = line[eq_index + 1:].strip() + string_value = line[eq_index + 1 :].strip() creds_file_loaded_vars[var_name] = string_value except IOError: pass # End of creds file retrieval - token = module.params.get('token') + token = module.params.get("token") if not token: - token = os.environ.get('SPOTINST_TOKEN') + token = os.environ.get("SPOTINST_TOKEN") if not token: token = creds_file_loaded_vars.get("token") - account = module.params.get('account_id') + account = module.params.get("account_id") if not account: - account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') + account = os.environ.get("SPOTINST_ACCOUNT_ID") or os.environ.get("ACCOUNT") if not account: account = creds_file_loaded_vars.get("account") @@ -1479,5 +1464,5 @@ def main(): module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py index 4af50d8353f..f36cdbe87d0 100644 --- a/plugins/modules/ss_3par_cpg.py +++ b/plugins/modules/ss_3par_cpg.py @@ -124,9 +124,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par + try: from hpe3par_sdk import client from hpe3parclient import exceptions + HAS_3PARCLIENT = True except ImportError: HAS_3PARCLIENT = False @@ -134,63 +136,60 @@ def validate_set_size(raid_type, set_size): if raid_type: - set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] + set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]["set_sizes"] if set_size in set_size_array: return True return False def cpg_ldlayout_map(ldlayout_dict): - if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: - ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ - ldlayout_dict['RAIDType']]['raid_value'] - if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: - ldlayout_dict['HA'] = getattr( - client.HPE3ParClient, ldlayout_dict['HA']) + if ldlayout_dict["RAIDType"] is not None and ldlayout_dict["RAIDType"]: + ldlayout_dict["RAIDType"] = client.HPE3ParClient.RAID_MAP[ldlayout_dict["RAIDType"]]["raid_value"] + if ldlayout_dict["HA"] is not None and ldlayout_dict["HA"]: + ldlayout_dict["HA"] = getattr(client.HPE3ParClient, ldlayout_dict["HA"]) return ldlayout_dict def create_cpg( - client_obj, - cpg_name, - domain, - growth_increment, - growth_limit, - growth_warning, - raid_type, - set_size, - high_availability, - disk_type): + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type, +): try: if not validate_set_size(raid_type, set_size): return (False, False, f"Set size {set_size} not part of RAID set {raid_type}") if not client_obj.cpgExists(cpg_name): - disk_patterns = [] if disk_type: disk_type = getattr(client.HPE3ParClient, disk_type) - disk_patterns = [{'diskType': disk_type}] + disk_patterns = [{"diskType": disk_type}] ld_layout = { - 'RAIDType': raid_type, - 'setSize': set_size, - 'HA': high_availability, - 'diskPatterns': disk_patterns} + "RAIDType": raid_type, + "setSize": set_size, + "HA": high_availability, + "diskPatterns": disk_patterns, + } ld_layout = cpg_ldlayout_map(ld_layout) if growth_increment is not None: - growth_increment = hpe3par.convert_to_binary_multiple( - growth_increment) + growth_increment = hpe3par.convert_to_binary_multiple(growth_increment) if growth_limit is not None: - growth_limit = hpe3par.convert_to_binary_multiple( - growth_limit) + growth_limit = hpe3par.convert_to_binary_multiple(growth_limit) if growth_warning is not None: - growth_warning = hpe3par.convert_to_binary_multiple( - growth_warning) + growth_warning = hpe3par.convert_to_binary_multiple(growth_warning) optional = { - 'domain': domain, - 'growthIncrementMiB': growth_increment, - 'growthLimitMiB': growth_limit, - 'usedLDWarningAlertMiB': growth_warning, - 'LDLayout': ld_layout} + "domain": domain, + "growthIncrementMiB": growth_increment, + "growthLimitMiB": growth_limit, + "usedLDWarningAlertMiB": growth_warning, + "LDLayout": ld_layout, + } client_obj.createCPG(cpg_name, optional) else: return (True, False, "CPG already present") @@ -199,9 +198,7 @@ def create_cpg( return (True, True, f"Created CPG {cpg_name} successfully.") -def delete_cpg( - client_obj, - cpg_name): +def delete_cpg(client_obj, cpg_name): try: if client_obj.cpgExists(cpg_name): client_obj.deleteCPG(cpg_name) @@ -213,10 +210,9 @@ def delete_cpg( def main(): - module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), - required_together=[['raid_type', 'set_size']]) + module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), required_together=[["raid_type", "set_size"]]) if not HAS_3PARCLIENT: - module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') + module.fail_json(msg="the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)") if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") @@ -235,7 +231,7 @@ def main(): disk_type = module.params["disk_type"] secure = module.params["secure"] - wsapi_url = f'https://{storage_system_ip}:8080/api/v1' + wsapi_url = f"https://{storage_system_ip}:8080/api/v1" try: client_obj = client.HPE3ParClient(wsapi_url, secure) except exceptions.SSLCertFailed: @@ -266,7 +262,7 @@ def main(): raid_type, set_size, high_availability, - disk_type + disk_type, ) except Exception as e: module.fail_json(msg=f"CPG create failed | {e}") @@ -276,10 +272,7 @@ def main(): elif module.params["state"] == "absent": try: client_obj.login(storage_system_username, storage_system_password) - return_status, changed, msg = delete_cpg( - client_obj, - cpg_name - ) + return_status, changed, msg = delete_cpg(client_obj, cpg_name) except Exception as e: module.fail_json(msg=f"CPG create failed | {e}") finally: @@ -291,5 +284,5 @@ def main(): module.fail_json(msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py index 9570737835b..1514297d271 100644 --- a/plugins/modules/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -224,23 +224,27 @@ from copy import deepcopy from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR +from ansible_collections.community.general.plugins.module_utils._stormssh import ( + ConfigParser, + HAS_PARAMIKO, + PARAMIKO_IMPORT_ERROR, +) from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file def convert_bool(value): if value is True: - return 'yes' + return "yes" if value is False: - return 'no' + return "no" return None def fix_bool_str(value): - if value == 'True': - return 'yes' - if value == 'False': - return 'no' + if value == "True": + return "yes" + if value == "False": + return "no" return value @@ -248,13 +252,13 @@ class SSHConfig: def __init__(self, module): self.module = module if not HAS_PARAMIKO: - module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR) + module.fail_json(msg=missing_required_lib("PARAMIKO"), exception=PARAMIKO_IMPORT_ERROR) self.params = module.params - self.user = self.params.get('user') - self.group = self.params.get('group') or self.user - self.host = self.params.get('host') - self.config_file = self.params.get('ssh_config_file') - self.identity_file = self.params['identity_file'] + self.user = self.params.get("user") + self.group = self.params.get("group") or self.user + self.host = self.params.get("host") + self.config_file = self.params.get("ssh_config_file") + self.identity_file = self.params["identity_file"] self.check_ssh_config_path() try: self.config = ConfigParser(self.config_file) @@ -275,32 +279,34 @@ def check_ssh_config_path(self): def ensure_state(self): hosts_result = self.config.search_host(self.host) - state = self.params['state'] + state = self.params["state"] args = dict( - hostname=self.params.get('hostname'), - port=self.params.get('port'), - identity_file=self.params.get('identity_file'), - identities_only=convert_bool(self.params.get('identities_only')), - user=self.params.get('remote_user'), - strict_host_key_checking=self.params.get('strict_host_key_checking'), - user_known_hosts_file=self.params.get('user_known_hosts_file'), - proxycommand=self.params.get('proxycommand'), - proxyjump=self.params.get('proxyjump'), - host_key_algorithms=self.params.get('host_key_algorithms'), - forward_agent=convert_bool(self.params.get('forward_agent')), - add_keys_to_agent=convert_bool(self.params.get('add_keys_to_agent')), - controlmaster=self.params.get('controlmaster'), - controlpath=self.params.get('controlpath'), - controlpersist=fix_bool_str(self.params.get('controlpersist')), - dynamicforward=self.params.get('dynamicforward'), + hostname=self.params.get("hostname"), + port=self.params.get("port"), + identity_file=self.params.get("identity_file"), + identities_only=convert_bool(self.params.get("identities_only")), + user=self.params.get("remote_user"), + strict_host_key_checking=self.params.get("strict_host_key_checking"), + user_known_hosts_file=self.params.get("user_known_hosts_file"), + proxycommand=self.params.get("proxycommand"), + proxyjump=self.params.get("proxyjump"), + host_key_algorithms=self.params.get("host_key_algorithms"), + forward_agent=convert_bool(self.params.get("forward_agent")), + add_keys_to_agent=convert_bool(self.params.get("add_keys_to_agent")), + controlmaster=self.params.get("controlmaster"), + controlpath=self.params.get("controlpath"), + controlpersist=fix_bool_str(self.params.get("controlpersist")), + dynamicforward=self.params.get("dynamicforward"), ) - if self.params.get('other_options'): - for key, value in self.params.get('other_options').items(): + if self.params.get("other_options"): + for key, value in self.params.get("other_options").items(): if key.lower() != key: self.module.fail_json(msg=f"The other_options key {key} must be lower case") if key not in args: if not isinstance(value, str): - self.module.fail_json(msg=f"The other_options value provided for key {key} must be a string, got {type(value)}") + self.module.fail_json( + msg=f"The other_options value provided for key {key} must be a string, got {type(value)}" + ) args[key] = value else: self.module.fail_json(msg=f"Multiple values provided for key {key}") @@ -311,30 +317,32 @@ def ensure_state(self): hosts_removed = [] hosts_added = [] - hosts_result = [host for host in hosts_result if host['host'] == self.host] + hosts_result = [host for host in hosts_result if host["host"] == self.host] if hosts_result: for host in hosts_result: - if state == 'absent': + if state == "absent": # Delete host from the configuration config_changed = True - hosts_removed.append(host['host']) - self.config.delete_host(host['host']) + hosts_removed.append(host["host"]) + self.config.delete_host(host["host"]) else: # Update host in the configuration - changed, options = self.change_host(host['options'], **args) + changed, options = self.change_host(host["options"], **args) if changed: config_changed = True - self.config.update_host(host['host'], options) - hosts_changed.append(host['host']) - hosts_change_diff.append({ - host['host']: { - 'old': host['options'], - 'new': options, + self.config.update_host(host["host"], options) + hosts_changed.append(host["host"]) + hosts_change_diff.append( + { + host["host"]: { + "old": host["options"], + "new": options, + } } - }) - elif state == 'present': + ) + elif state == "present": changed, options = self.change_host(dict(), **args) if changed: @@ -346,30 +354,31 @@ def ensure_state(self): try: self.config.write_to_ssh_config() except PermissionError as perm_exec: - self.module.fail_json( - msg=f"Failed to write to {self.config_file} due to permission issue: {perm_exec}") + self.module.fail_json(msg=f"Failed to write to {self.config_file} due to permission issue: {perm_exec}") # Make sure we set the permission - perm_mode = '0600' - if self.config_file == '/etc/ssh/ssh_config': - perm_mode = '0644' + perm_mode = "0600" + if self.config_file == "/etc/ssh/ssh_config": + perm_mode = "0644" self.module.set_mode_if_different(self.config_file, perm_mode, False) # Make sure the file is owned by the right user and group self.module.set_owner_if_different(self.config_file, self.user, False) self.module.set_group_if_different(self.config_file, self.group, False) - self.module.exit_json(changed=config_changed, - hosts_changed=hosts_changed, - hosts_removed=hosts_removed, - hosts_change_diff=hosts_change_diff, - hosts_added=hosts_added) + self.module.exit_json( + changed=config_changed, + hosts_changed=hosts_changed, + hosts_removed=hosts_removed, + hosts_change_diff=hosts_change_diff, + hosts_added=hosts_added, + ) @staticmethod def change_host(options, **kwargs): options = deepcopy(options) changed = False for k, v in kwargs.items(): - if '_' in k: - k = k.replace('_', '') + if "_" in k: + k = k.replace("_", "") if not v: if options.get(k): @@ -385,33 +394,33 @@ def change_host(options, **kwargs): def main(): module = AnsibleModule( argument_spec=dict( - group=dict(type='str'), - host=dict(type='str', required=True), - hostname=dict(type='str'), - host_key_algorithms=dict(type='str', no_log=False), - identity_file=dict(type='path'), - identities_only=dict(type='bool'), - other_options=dict(type='dict'), - port=dict(type='str'), - proxycommand=dict(type='str'), - proxyjump=dict(type='str'), - forward_agent=dict(type='bool'), - add_keys_to_agent=dict(type='bool'), - remote_user=dict(type='str'), - ssh_config_file=dict(type='path'), - state=dict(type='str', default='present', choices=['present', 'absent']), - strict_host_key_checking=dict(type='str', choices=['yes', 'no', 'ask', 'accept-new']), - controlmaster=dict(type='str', choices=['yes', 'no', 'ask', 'auto', 'autoask']), - controlpath=dict(type='str'), - controlpersist=dict(type='str'), - dynamicforward=dict(type='str'), - user=dict(type='str'), - user_known_hosts_file=dict(type='str'), + group=dict(type="str"), + host=dict(type="str", required=True), + hostname=dict(type="str"), + host_key_algorithms=dict(type="str", no_log=False), + identity_file=dict(type="path"), + identities_only=dict(type="bool"), + other_options=dict(type="dict"), + port=dict(type="str"), + proxycommand=dict(type="str"), + proxyjump=dict(type="str"), + forward_agent=dict(type="bool"), + add_keys_to_agent=dict(type="bool"), + remote_user=dict(type="str"), + ssh_config_file=dict(type="path"), + state=dict(type="str", default="present", choices=["present", "absent"]), + strict_host_key_checking=dict(type="str", choices=["yes", "no", "ask", "accept-new"]), + controlmaster=dict(type="str", choices=["yes", "no", "ask", "auto", "autoask"]), + controlpath=dict(type="str"), + controlpersist=dict(type="str"), + dynamicforward=dict(type="str"), + user=dict(type="str"), + user_known_hosts_file=dict(type="str"), ), supports_check_mode=True, mutually_exclusive=[ - ['user', 'ssh_config_file'], - ['proxycommand', 'proxyjump'], + ["user", "ssh_config_file"], + ["proxycommand", "proxyjump"], ], ) @@ -419,5 +428,5 @@ def main(): ssh_config_obj.ensure_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py index 66df8a3e41d..89b2300d354 100644 --- a/plugins/modules/stacki_host.py +++ b/plugins/modules/stacki_host.py @@ -126,25 +126,23 @@ class StackiHost: - def __init__(self, module): self.module = module - self.hostname = module.params['name'] - self.rack = module.params['rack'] - self.rank = module.params['rank'] - self.appliance = module.params['appliance'] - self.prim_intf = module.params['prim_intf'] - self.prim_intf_ip = module.params['prim_intf_ip'] - self.network = module.params['network'] - self.prim_intf_mac = module.params['prim_intf_mac'] - self.endpoint = module.params['stacki_endpoint'] - - auth_creds = {'USERNAME': module.params['stacki_user'], - 'PASSWORD': module.params['stacki_password']} + self.hostname = module.params["name"] + self.rack = module.params["rack"] + self.rank = module.params["rank"] + self.appliance = module.params["appliance"] + self.prim_intf = module.params["prim_intf"] + self.prim_intf_ip = module.params["prim_intf_ip"] + self.network = module.params["network"] + self.prim_intf_mac = module.params["prim_intf_mac"] + self.endpoint = module.params["stacki_endpoint"] + + auth_creds = {"USERNAME": module.params["stacki_user"], "PASSWORD": module.params["stacki_password"]} # Get Initial CSRF cred_a = self.do_request(self.endpoint, method="GET") - cookie_a = cred_a.headers.get('Set-Cookie').split(';') + cookie_a = cred_a.headers.get("Set-Cookie").split(";") init_csrftoken = None for c in cookie_a: if "csrftoken" in c: @@ -153,16 +151,20 @@ def __init__(self, module): break # Make Header Dictionary with initial CSRF - header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, - 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')} + header = { + "csrftoken": init_csrftoken, + "X-CSRFToken": init_csrftoken, + "Content-type": "application/x-www-form-urlencoded", + "Cookie": cred_a.headers.get("Set-Cookie"), + } # Endpoint to get final authentication header login_endpoint = f"{self.endpoint}/login" # Get Final CSRF and Session ID - login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') + login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method="POST") - cookie_f = login_req.headers.get('Set-Cookie').split(';') + cookie_f = login_req.headers.get("Set-Cookie").split(";") csrftoken = None for f in cookie_f: if "csrftoken" in f: @@ -171,97 +173,103 @@ def __init__(self, module): sessionid = c.split("sessionid=", 1)[-1] sessionid = sessionid.rstrip("\r\n") - self.header = {'csrftoken': csrftoken, - 'X-CSRFToken': csrftoken, - 'sessionid': sessionid, - 'Content-type': 'application/json', - 'Cookie': login_req.headers.get('Set-Cookie')} + self.header = { + "csrftoken": csrftoken, + "X-CSRFToken": csrftoken, + "sessionid": sessionid, + "Content-type": "application/json", + "Cookie": login_req.headers.get("Set-Cookie"), + } def do_request(self, url, payload=None, headers=None, method=None): res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) - if info['status'] != 200: - self.module.fail_json(changed=False, msg=info['msg']) + if info["status"] != 200: + self.module.fail_json(changed=False, msg=info["msg"]) return res def stack_check_host(self): - res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + res = self.do_request( + self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST" + ) return self.hostname in res.read() def stack_sync(self): self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") - self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + self.do_request( + self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST" + ) def stack_force_install(self, result): - data = {'cmd': f"set host boot {self.hostname} action=install"} + data = {"cmd": f"set host boot {self.hostname} action=install"} self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") changed = True self.stack_sync() - result['changed'] = changed - result['stdout'] = "api call successful".rstrip("\r\n") + result["changed"] = changed + result["stdout"] = "api call successful".rstrip("\r\n") def stack_add(self, result): data = dict() changed = False - data['cmd'] = f"add host {self.hostname} rack={self.rack} rank={self.rank} appliance={self.appliance}" + data["cmd"] = f"add host {self.hostname} rack={self.rack} rank={self.rank} appliance={self.appliance}" self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() - result['changed'] = changed - result['stdout'] = "api call successful".rstrip("\r\n") + result["changed"] = changed + result["stdout"] = "api call successful".rstrip("\r\n") def stack_remove(self, result): data = dict() - data['cmd'] = f"remove host {self.hostname}" + data["cmd"] = f"remove host {self.hostname}" self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() - result['changed'] = True - result['stdout'] = "api call successful".rstrip("\r\n") + result["changed"] = True + result["stdout"] = "api call successful".rstrip("\r\n") def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - name=dict(type='str', required=True), - rack=dict(type='int', default=0), - rank=dict(type='int', default=0), - appliance=dict(type='str', default='backend'), - prim_intf=dict(type='str'), - prim_intf_ip=dict(type='str'), - network=dict(type='str', default='private'), - prim_intf_mac=dict(type='str'), - stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])), - stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True), - stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])), - force_install=dict(type='bool', default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), + name=dict(type="str", required=True), + rack=dict(type="int", default=0), + rank=dict(type="int", default=0), + appliance=dict(type="str", default="backend"), + prim_intf=dict(type="str"), + prim_intf_ip=dict(type="str"), + network=dict(type="str", default="private"), + prim_intf_mac=dict(type="str"), + stacki_user=dict(type="str", required=True, fallback=(env_fallback, ["stacki_user"])), + stacki_password=dict(type="str", required=True, fallback=(env_fallback, ["stacki_password"]), no_log=True), + stacki_endpoint=dict(type="str", required=True, fallback=(env_fallback, ["stacki_endpoint"])), + force_install=dict(type="bool", default=False), ), supports_check_mode=False, ) - result = {'changed': False} + result = {"changed": False} missing_params = list() stacki = StackiHost(module) host_exists = stacki.stack_check_host() # If state is present, but host exists, need force_install flag to put host back into install state - if module.params['state'] == 'present' and host_exists and module.params['force_install']: + if module.params["state"] == "present" and host_exists and module.params["force_install"]: stacki.stack_force_install(result) # If state is present, but host exists, and force_install and false, do nothing - elif module.params['state'] == 'present' and host_exists and not module.params['force_install']: - result['stdout'] = f"{module.params['name']} already exists. Set 'force_install' to true to bootstrap" + elif module.params["state"] == "present" and host_exists and not module.params["force_install"]: + result["stdout"] = f"{module.params['name']} already exists. Set 'force_install' to true to bootstrap" # Otherwise, state is present, but host doesn't exists, require more params to add host - elif module.params['state'] == 'present' and not host_exists: - for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: + elif module.params["state"] == "present" and not host_exists: + for param in ["appliance", "rack", "rank", "prim_intf", "prim_intf_ip", "network", "prim_intf_mac"]: if not module.params[param]: missing_params.append(param) if len(missing_params) > 0: @@ -269,11 +277,11 @@ def main(): stacki.stack_add(result) # If state is absent, and host exists, lets remove it. - elif module.params['state'] == 'absent' and host_exists: + elif module.params["state"] == "absent" and host_exists: stacki.stack_remove(result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py index 73f2fb4083e..824eafc4a35 100644 --- a/plugins/modules/statsd.py +++ b/plugins/modules/statsd.py @@ -100,10 +100,11 @@ """ -from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) +from ansible.module_utils.basic import AnsibleModule, missing_required_lib try: from statsd import StatsClient, TCPStatsClient + HAS_STATSD = True except ImportError: HAS_STATSD = False @@ -118,59 +119,58 @@ def tcp_statsd_client(**client_params): def main(): - module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['present']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8125), - protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), - timeout=dict(type='float', default=1.0), - metric=dict(type='str', required=True), - metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), - metric_prefix=dict(type='str', default=''), - value=dict(type='int', required=True), - delta=dict(type='bool', default=False), + state=dict(type="str", default="present", choices=["present"]), + host=dict(type="str", default="localhost"), + port=dict(type="int", default=8125), + protocol=dict(type="str", default="udp", choices=["udp", "tcp"]), + timeout=dict(type="float", default=1.0), + metric=dict(type="str", required=True), + metric_type=dict(type="str", required=True, choices=["counter", "gauge"]), + metric_prefix=dict(type="str", default=""), + value=dict(type="int", required=True), + delta=dict(type="bool", default=False), ), - supports_check_mode=False + supports_check_mode=False, ) if not HAS_STATSD: - module.fail_json(msg=missing_required_lib('statsd')) - - host = module.params.get('host') - port = module.params.get('port') - protocol = module.params.get('protocol') - timeout = module.params.get('timeout') - metric = module.params.get('metric') - metric_type = module.params.get('metric_type') - metric_prefix = module.params.get('metric_prefix') - value = module.params.get('value') - delta = module.params.get('delta') - - if protocol == 'udp': + module.fail_json(msg=missing_required_lib("statsd")) + + host = module.params.get("host") + port = module.params.get("port") + protocol = module.params.get("protocol") + timeout = module.params.get("timeout") + metric = module.params.get("metric") + metric_type = module.params.get("metric_type") + metric_prefix = module.params.get("metric_prefix") + value = module.params.get("value") + delta = module.params.get("delta") + + if protocol == "udp": client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) - elif protocol == 'tcp': + elif protocol == "tcp": client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) - metric_name = f'{metric_prefix}/{metric}' if metric_prefix else metric - metric_display_value = f'{value} (delta={delta})' if metric_type == 'gauge' else value + metric_name = f"{metric_prefix}/{metric}" if metric_prefix else metric + metric_display_value = f"{value} (delta={delta})" if metric_type == "gauge" else value try: - if metric_type == 'counter': + if metric_type == "counter": client.incr(metric, value) - elif metric_type == 'gauge': + elif metric_type == "gauge": client.gauge(metric, value, delta=delta) except Exception as exc: - module.fail_json(msg=f'Failed sending to StatsD {exc}') + module.fail_json(msg=f"Failed sending to StatsD {exc}") finally: - if protocol == 'tcp': + if protocol == "tcp": client.close() module.exit_json(msg=f"Sent {metric_type} {metric_name} -> {metric_display_value!s} to StatsD", changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py index 26f28516680..d0cebc148b1 100644 --- a/plugins/modules/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -187,20 +187,13 @@ def get_api_auth_headers(api_id, api_key, url, statuspage): - - headers = { - "x-api-id": api_id, - "x-api-key": api_key, - "Content-Type": "application/json" - } + headers = {"x-api-id": api_id, "x-api-key": api_key, "Content-Type": "application/json"} try: - response = open_url( - f"{url}/v2/component/list/{statuspage}", headers=headers) + response = open_url(f"{url}/v2/component/list/{statuspage}", headers=headers) data = json.loads(response.read()) - if data['status']['message'] == 'Authentication failed': - return 1, None, None, "Authentication failed: " \ - "Check api_id/api_key and statuspage id." + if data["status"]["message"] == "Authentication failed": + return 1, None, None, "Authentication failed: Check api_id/api_key and statuspage id." else: auth_headers = headers auth_content = data @@ -213,13 +206,10 @@ def get_component_ids(auth_content, components): host_ids = [] lower_components = [x.lower() for x in components] for result in auth_content["result"]: - if result['name'].lower() in lower_components: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } + if result["name"].lower() in lower_components: + data = {"component_id": result["_id"], "container_id": result["containers"][0]["_id"]} host_ids.append(data) - lower_components.remove(result['name'].lower()) + lower_components.remove(result["name"].lower()) if len(lower_components): # items not found in the api return 1, None, lower_components @@ -231,10 +221,7 @@ def get_container_ids(auth_content, containers): lower_containers = [x.lower() for x in containers] for result in auth_content["result"]: if result["containers"][0]["name"].lower() in lower_containers: - data = { - "component_id": result["_id"], - "container_id": result["containers"][0]["_id"] - } + data = {"component_id": result["_id"], "container_id": result["containers"][0]["_id"]} host_ids.append(data) lower_containers.remove(result["containers"][0]["name"].lower()) @@ -248,19 +235,18 @@ def get_date_time(start_date, start_time, minutes): returned_date = [] if start_date and start_time: try: - datetime.datetime.strptime(start_date, '%m/%d/%Y') + datetime.datetime.strptime(start_date, "%m/%d/%Y") returned_date.append(start_date) except (NameError, ValueError): return 1, None, "Not a valid start_date format." try: - datetime.datetime.strptime(start_time, '%H:%M') + datetime.datetime.strptime(start_time, "%H:%M") returned_date.append(start_time) except (NameError, ValueError): return 1, None, "Not a valid start_time format." try: # Work out end date/time based on minutes - date_time_start = datetime.datetime.strptime( - start_time + start_date, '%H:%M%m/%d/%Y') + date_time_start = datetime.datetime.strptime(start_time + start_date, "%H:%M%m/%d/%Y") delta = date_time_start + datetime.timedelta(minutes=minutes) returned_date.append(delta.strftime("%m/%d/%Y")) returned_date.append(delta.strftime("%H:%M")) @@ -278,37 +264,47 @@ def get_date_time(start_date, start_time, minutes): return 0, returned_date, None -def create_maintenance(auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, title, desc, - returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr): +def create_maintenance( + auth_headers, + url, + statuspage, + host_ids, + all_infrastructure_affected, + automation, + title, + desc, + returned_date, + maintenance_notify_now, + maintenance_notify_72_hr, + maintenance_notify_24_hr, + maintenance_notify_1_hr, +): component_id = [] container_id = [] for val in host_ids: - component_id.append(val['component_id']) - container_id.append(val['container_id']) + component_id.append(val["component_id"]) + container_id.append(val["container_id"]) infrastructure_id = [f"{i}-{j}" for i, j in zip(component_id, container_id)] try: - values = json.dumps({ - "statuspage_id": statuspage, - "all_infrastructure_affected": str(int(all_infrastructure_affected)), - "infrastructure_affected": infrastructure_id, - "automation": str(int(automation)), - "maintenance_name": title, - "maintenance_details": desc, - "date_planned_start": returned_date[0], - "time_planned_start": returned_date[1], - "date_planned_end": returned_date[2], - "time_planned_end": returned_date[3], - "maintenance_notify_now": str(int(maintenance_notify_now)), - "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), - "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), - "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) - }) - response = open_url( - f"{url}/v2/maintenance/schedule", data=values, - headers=auth_headers) + values = json.dumps( + { + "statuspage_id": statuspage, + "all_infrastructure_affected": str(int(all_infrastructure_affected)), + "infrastructure_affected": infrastructure_id, + "automation": str(int(automation)), + "maintenance_name": title, + "maintenance_details": desc, + "date_planned_start": returned_date[0], + "time_planned_start": returned_date[1], + "date_planned_end": returned_date[2], + "time_planned_end": returned_date[3], + "maintenance_notify_now": str(int(maintenance_notify_now)), + "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), + "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), + "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)), + } + ) + response = open_url(f"{url}/v2/maintenance/schedule", data=values, headers=auth_headers) data = json.loads(response.read()) if data["status"]["error"] == "yes": @@ -320,14 +316,13 @@ def create_maintenance(auth_headers, url, statuspage, host_ids, def delete_maintenance(auth_headers, url, statuspage, maintenance_id): try: - values = json.dumps({ - "statuspage_id": statuspage, - "maintenance_id": maintenance_id, - }) - response = open_url( - url=f"{url}/v2/maintenance/delete", - data=values, - headers=auth_headers) + values = json.dumps( + { + "statuspage_id": statuspage, + "maintenance_id": maintenance_id, + } + ) + response = open_url(url=f"{url}/v2/maintenance/delete", data=values, headers=auth_headers) data = json.loads(response.read()) if data["status"]["error"] == "yes": return 1, None, "Invalid maintenance_id" @@ -342,51 +337,49 @@ def main(): api_id=dict(required=True), api_key=dict(required=True, no_log=True), statuspage=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - url=dict(default='https://api.status.io'), - components=dict(type='list', elements='str', aliases=['component']), - containers=dict(type='list', elements='str', aliases=['container']), - all_infrastructure_affected=dict(type='bool', default=False), - automation=dict(type='bool', default=False), - title=dict(default='A new maintenance window'), - desc=dict(default='Created by Ansible'), - minutes=dict(type='int', default=10), - maintenance_notify_now=dict(type='bool', default=False), - maintenance_notify_72_hr=dict(type='bool', default=False), - maintenance_notify_24_hr=dict(type='bool', default=False), - maintenance_notify_1_hr=dict(type='bool', default=False), + state=dict(default="present", choices=["present", "absent"]), + url=dict(default="https://api.status.io"), + components=dict(type="list", elements="str", aliases=["component"]), + containers=dict(type="list", elements="str", aliases=["container"]), + all_infrastructure_affected=dict(type="bool", default=False), + automation=dict(type="bool", default=False), + title=dict(default="A new maintenance window"), + desc=dict(default="Created by Ansible"), + minutes=dict(type="int", default=10), + maintenance_notify_now=dict(type="bool", default=False), + maintenance_notify_72_hr=dict(type="bool", default=False), + maintenance_notify_24_hr=dict(type="bool", default=False), + maintenance_notify_1_hr=dict(type="bool", default=False), maintenance_id=dict(), start_date=dict(), - start_time=dict() + start_time=dict(), ), supports_check_mode=True, ) - api_id = module.params['api_id'] - api_key = module.params['api_key'] - statuspage = module.params['statuspage'] - state = module.params['state'] - url = module.params['url'] - components = module.params['components'] - containers = module.params['containers'] - all_infrastructure_affected = module.params['all_infrastructure_affected'] - automation = module.params['automation'] - title = module.params['title'] - desc = module.params['desc'] - minutes = module.params['minutes'] - maintenance_notify_now = module.params['maintenance_notify_now'] - maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] - maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] - maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] - maintenance_id = module.params['maintenance_id'] - start_date = module.params['start_date'] - start_time = module.params['start_time'] + api_id = module.params["api_id"] + api_key = module.params["api_key"] + statuspage = module.params["statuspage"] + state = module.params["state"] + url = module.params["url"] + components = module.params["components"] + containers = module.params["containers"] + all_infrastructure_affected = module.params["all_infrastructure_affected"] + automation = module.params["automation"] + title = module.params["title"] + desc = module.params["desc"] + minutes = module.params["minutes"] + maintenance_notify_now = module.params["maintenance_notify_now"] + maintenance_notify_72_hr = module.params["maintenance_notify_72_hr"] + maintenance_notify_24_hr = module.params["maintenance_notify_24_hr"] + maintenance_notify_1_hr = module.params["maintenance_notify_1_hr"] + maintenance_id = module.params["maintenance_id"] + start_date = module.params["start_date"] + start_time = module.params["start_time"] if state == "present": - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) + (rc, auth_headers, auth_content, error) = get_api_auth_headers(api_id, api_key, url, statuspage) if rc != 0: module.fail_json(msg=f"Failed to get auth keys: {error}") else: @@ -394,27 +387,22 @@ def main(): auth_content = {} if minutes or start_time and start_date: - (rc, returned_date, error) = get_date_time( - start_date, start_time, minutes) + (rc, returned_date, error) = get_date_time(start_date, start_time, minutes) if rc != 0: module.fail_json(msg=f"Failed to set date/time: {error}") if not components and not containers: - return module.fail_json(msg="A Component or Container must be " - "defined") + return module.fail_json(msg="A Component or Container must be defined") elif components and containers: - return module.fail_json(msg="Components and containers cannot " - "be used together") + return module.fail_json(msg="Components and containers cannot be used together") else: if components: - (rc, host_ids, error) = get_component_ids(auth_content, - components) + (rc, host_ids, error) = get_component_ids(auth_content, components) if rc != 0: module.fail_json(msg=f"Failed to find component {error}") if containers: - (rc, host_ids, error) = get_container_ids(auth_content, - containers) + (rc, host_ids, error) = get_container_ids(auth_content, containers) if rc != 0: module.fail_json(msg=f"Failed to find container {error}") @@ -422,22 +410,28 @@ def main(): module.exit_json(changed=True) else: (rc, dummy, error) = create_maintenance( - auth_headers, url, statuspage, host_ids, - all_infrastructure_affected, automation, - title, desc, returned_date, maintenance_notify_now, - maintenance_notify_72_hr, maintenance_notify_24_hr, - maintenance_notify_1_hr) + auth_headers, + url, + statuspage, + host_ids, + all_infrastructure_affected, + automation, + title, + desc, + returned_date, + maintenance_notify_now, + maintenance_notify_72_hr, + maintenance_notify_24_hr, + maintenance_notify_1_hr, + ) if rc == 0: - module.exit_json(changed=True, result="Successfully created " - "maintenance") + module.exit_json(changed=True, result="Successfully created maintenance") else: module.fail_json(msg=f"Failed to create maintenance: {error}") if state == "absent": - if api_id and api_key: - (rc, auth_headers, auth_content, error) = \ - get_api_auth_headers(api_id, api_key, url, statuspage) + (rc, auth_headers, auth_content, error) = get_api_auth_headers(api_id, api_key, url, statuspage) if rc != 0: module.fail_json(msg=f"Failed to get auth keys: {error}") else: @@ -446,17 +440,12 @@ def main(): if module.check_mode: module.exit_json(changed=True) else: - (rc, dummy, error) = delete_maintenance( - auth_headers, url, statuspage, maintenance_id) + (rc, dummy, error) = delete_maintenance(auth_headers, url, statuspage, maintenance_id) if rc == 0: - module.exit_json( - changed=True, - result="Successfully deleted maintenance" - ) + module.exit_json(changed=True, result="Successfully deleted maintenance") else: - module.fail_json( - msg=f"Failed to delete maintenance: {error}") + module.fail_json(msg=f"Failed to delete maintenance: {error}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py index dde17c74e52..48a87cf3b6d 100644 --- a/plugins/modules/sudoers.py +++ b/plugins/modules/sudoers.py @@ -163,32 +163,31 @@ class Sudoers: - FILE_MODE = 0o440 def __init__(self, module): self.module = module self.check_mode = module.check_mode - self.name = module.params['name'] - self.user = module.params['user'] - self.group = module.params['group'] - self.state = module.params['state'] - self.noexec = module.params['noexec'] - self.nopassword = module.params['nopassword'] - self.setenv = module.params['setenv'] - self.host = module.params['host'] - self.runas = module.params['runas'] - self.sudoers_path = module.params['sudoers_path'] + self.name = module.params["name"] + self.user = module.params["user"] + self.group = module.params["group"] + self.state = module.params["state"] + self.noexec = module.params["noexec"] + self.nopassword = module.params["nopassword"] + self.setenv = module.params["setenv"] + self.host = module.params["host"] + self.runas = module.params["runas"] + self.sudoers_path = module.params["sudoers_path"] self.file = os.path.join(self.sudoers_path, self.name) - self.commands = module.params['commands'] - self.validation = module.params['validation'] + self.commands = module.params["commands"] + self.validation = module.params["validation"] def write(self): if self.check_mode: return - with open(self.file, 'w') as f: + with open(self.file, "w") as f: f.write(self.content()) os.chmod(self.file, self.FILE_MODE) @@ -203,7 +202,7 @@ def exists(self): return os.path.exists(self.file) def matches(self): - with open(self.file, 'r') as f: + with open(self.file, "r") as f: content_matches = f.read() == self.content() current_mode = os.stat(self.file).st_mode & 0o777 @@ -215,31 +214,33 @@ def content(self): if self.user: owner = self.user elif self.group: - owner = f'%{self.group}' + owner = f"%{self.group}" - commands_str = ', '.join(self.commands) - noexec_str = 'NOEXEC:' if self.noexec else '' - nopasswd_str = 'NOPASSWD:' if self.nopassword else '' - setenv_str = 'SETENV:' if self.setenv else '' - runas_str = f'({self.runas})' if self.runas is not None else '' + commands_str = ", ".join(self.commands) + noexec_str = "NOEXEC:" if self.noexec else "" + nopasswd_str = "NOPASSWD:" if self.nopassword else "" + setenv_str = "SETENV:" if self.setenv else "" + runas_str = f"({self.runas})" if self.runas is not None else "" return f"{owner} {self.host}={runas_str}{noexec_str}{nopasswd_str}{setenv_str} {commands_str}\n" def validate(self): - if self.validation == 'absent': + if self.validation == "absent": return - visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required') + visudo_path = self.module.get_bin_path("visudo", required=self.validation == "required") if visudo_path is None: return - check_command = [visudo_path, '-c', '-f', '-'] + check_command = [visudo_path, "-c", "-f", "-"] rc, stdout, stderr = self.module.run_command(check_command, data=self.content()) if rc != 0: - self.module.fail_json(msg=f'Failed to validate sudoers rule:\n{stdout or stderr}', stdout=stdout, stderr=stderr) + self.module.fail_json( + msg=f"Failed to validate sudoers rule:\n{stdout or stderr}", stdout=stdout, stderr=stderr + ) def run(self): - if self.state == 'absent': + if self.state == "absent": if self.exists(): self.delete() return True @@ -257,54 +258,51 @@ def run(self): def main(): argument_spec = { - 'commands': { - 'type': 'list', - 'elements': 'str', - }, - 'group': {}, - 'name': { - 'required': True, + "commands": { + "type": "list", + "elements": "str", }, - 'noexec': { - 'type': 'bool', - 'default': False, + "group": {}, + "name": { + "required": True, }, - 'nopassword': { - 'type': 'bool', - 'default': True, + "noexec": { + "type": "bool", + "default": False, }, - 'setenv': { - 'type': 'bool', - 'default': False, + "nopassword": { + "type": "bool", + "default": True, }, - 'host': { - 'type': 'str', - 'default': 'ALL', + "setenv": { + "type": "bool", + "default": False, }, - 'runas': { - 'type': 'str', - 'default': None, + "host": { + "type": "str", + "default": "ALL", }, - 'sudoers_path': { - 'type': 'str', - 'default': '/etc/sudoers.d', + "runas": { + "type": "str", + "default": None, }, - 'state': { - 'default': 'present', - 'choices': ['present', 'absent'], + "sudoers_path": { + "type": "str", + "default": "/etc/sudoers.d", }, - 'user': {}, - 'validation': { - 'default': 'detect', - 'choices': ['absent', 'detect', 'required'] + "state": { + "default": "present", + "choices": ["present", "absent"], }, + "user": {}, + "validation": {"default": "detect", "choices": ["absent", "detect", "required"]}, } module = AnsibleModule( argument_spec=argument_spec, - mutually_exclusive=[['user', 'group']], + mutually_exclusive=[["user", "group"]], supports_check_mode=True, - required_if=[('state', 'present', ['commands'])], + required_if=[("state", "present", ["commands"])], ) sudoers = Sudoers(module) @@ -316,5 +314,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py index 928e3131a14..0ce17171ca0 100644 --- a/plugins/modules/supervisorctl.py +++ b/plugins/modules/supervisorctl.py @@ -119,57 +119,60 @@ def main(): arg_spec = dict( - name=dict(type='str', required=True), - config=dict(type='path'), - server_url=dict(type='str'), - username=dict(type='str'), - password=dict(type='str', no_log=True), - supervisorctl_path=dict(type='path'), - state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), - stop_before_removing=dict(type='bool', default=False), - signal=dict(type='str'), + name=dict(type="str", required=True), + config=dict(type="path"), + server_url=dict(type="str"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + supervisorctl_path=dict(type="path"), + state=dict( + type="str", required=True, choices=["present", "started", "restarted", "stopped", "absent", "signalled"] + ), + stop_before_removing=dict(type="bool", default=False), + signal=dict(type="str"), ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[('state', 'signalled', ['signal'])], + required_if=[("state", "signalled", ["signal"])], ) - name = module.params['name'] + name = module.params["name"] is_group = False - if name.endswith(':'): + if name.endswith(":"): is_group = True - name = name.rstrip(':') - state = module.params['state'] - stop_before_removing = module.params.get('stop_before_removing') - config = module.params.get('config') - server_url = module.params.get('server_url') - username = module.params.get('username') - password = module.params.get('password') - supervisorctl_path = module.params.get('supervisorctl_path') - signal = module.params.get('signal') + name = name.rstrip(":") + state = module.params["state"] + stop_before_removing = module.params.get("stop_before_removing") + config = module.params.get("config") + server_url = module.params.get("server_url") + username = module.params.get("username") + password = module.params.get("password") + supervisorctl_path = module.params.get("supervisorctl_path") + signal = module.params.get("signal") # we check error message for a pattern, so we need to make sure that's in C locale - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") if supervisorctl_path: if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path): supervisorctl_args = [supervisorctl_path] else: module.fail_json( - msg=f"Provided path to supervisorctl does not exist or isn't executable: {supervisorctl_path}") + msg=f"Provided path to supervisorctl does not exist or isn't executable: {supervisorctl_path}" + ) else: - supervisorctl_args = [module.get_bin_path('supervisorctl', True)] + supervisorctl_args = [module.get_bin_path("supervisorctl", True)] if config: - supervisorctl_args.extend(['-c', config]) + supervisorctl_args.extend(["-c", config]) if server_url: - supervisorctl_args.extend(['-s', server_url]) + supervisorctl_args.extend(["-s", server_url]) if username: - supervisorctl_args.extend(['-u', username]) + supervisorctl_args.extend(["-u", username]) if password: - supervisorctl_args.extend(['-p', password]) + supervisorctl_args.extend(["-p", password]) def run_supervisorctl(cmd, name=None, **kwargs): args = list(supervisorctl_args) # copy the master args @@ -180,21 +183,21 @@ def run_supervisorctl(cmd, name=None, **kwargs): def get_matched_processes(): matched = [] - rc, out, err = run_supervisorctl('status') + rc, out, err = run_supervisorctl("status") for line in out.splitlines(): # One status line may look like one of these two: # process not in group: # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 # process in group: # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 - fields = [field for field in line.split(' ') if field != ''] + fields = [field for field in line.split(" ") if field != ""] process_name = fields[0] status = fields[1] if is_group: # If there is ':', this process must be in a group. - if ':' in process_name: - group = process_name.split(':')[0] + if ":" in process_name: + group = process_name.split(":")[0] if group != name: continue else: @@ -222,47 +225,49 @@ def take_action_on_processes(processes, status_filter, action, expected_result, module.exit_json(changed=True) for process_name in to_take_action_on: rc, out, err = run_supervisorctl(action, process_name, check_rc=True) - if f'{process_name}: {expected_result}' not in out: + if f"{process_name}: {expected_result}" not in out: module.fail_json(msg=out) if exit_module: module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) - if state == 'restarted': - rc, out, err = run_supervisorctl('update', check_rc=True) + if state == "restarted": + rc, out, err = run_supervisorctl("update", check_rc=True) processes = get_matched_processes() if len(processes) == 0: module.fail_json(name=name, msg="ERROR (no such process)") - take_action_on_processes(processes, lambda s: True, 'restart', 'started') + take_action_on_processes(processes, lambda s: True, "restart", "started") processes = get_matched_processes() - if state == 'absent': + if state == "absent": if len(processes) == 0: module.exit_json(changed=False, name=name, state=state) if stop_before_removing: - take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped', exit_module=False) + take_action_on_processes( + processes, lambda s: s in ("RUNNING", "STARTING"), "stop", "stopped", exit_module=False + ) if module.check_mode: module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('remove', name) - if f'{name}: removed process group' in out: + run_supervisorctl("reread", check_rc=True) + rc, out, err = run_supervisorctl("remove", name) + if f"{name}: removed process group" in out: module.exit_json(changed=True, name=name, state=state) else: module.fail_json(msg=out, name=name, state=state) - if state == 'present': + if state == "present": if len(processes) > 0: module.exit_json(changed=False, name=name, state=state) if module.check_mode: module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - dummy, out, dummy = run_supervisorctl('add', name) - if f'{name}: added process group' in out: + run_supervisorctl("reread", check_rc=True) + dummy, out, dummy = run_supervisorctl("add", name) + if f"{name}: added process group" in out: module.exit_json(changed=True, name=name, state=state) else: module.fail_json(msg=out, name=name, state=state) @@ -271,15 +276,15 @@ def take_action_on_processes(processes, status_filter, action, expected_result, if len(processes) == 0: module.fail_json(name=name, msg="ERROR (no such process)") - if state == 'started': - take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') + if state == "started": + take_action_on_processes(processes, lambda s: s not in ("RUNNING", "STARTING"), "start", "started") - if state == 'stopped': - take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') + if state == "stopped": + take_action_on_processes(processes, lambda s: s in ("RUNNING", "STARTING"), "stop", "stopped") - if state == 'signalled': - take_action_on_processes(processes, lambda s: s in ('RUNNING',), f"signal {signal}", 'signalled') + if state == "signalled": + take_action_on_processes(processes, lambda s: s in ("RUNNING",), f"signal {signal}", "signalled") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py index e24746b0621..98aaa9e3188 100644 --- a/plugins/modules/svc.py +++ b/plugins/modules/svc.py @@ -98,12 +98,12 @@ def _load_dist_subclass(cls, *args, **kwargs): - ''' + """ Used for derivative implementations - ''' + """ subclass = None - distro = kwargs['module'].params['distro'] + distro = kwargs["module"].params["distro"] # get the most specific superclass for this platform if distro is not None: @@ -126,14 +126,14 @@ class Svc: # return _load_dist_subclass(cls, args, kwargs) def __init__(self, module): - self.extra_paths = ['/command', '/usr/local/bin'] - self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + self.extra_paths = ["/command", "/usr/local/bin"] + self.report_vars = ["state", "enabled", "downed", "svc_full", "src_full", "pid", "duration", "full_state"] self.module = module - self.name = module.params['name'] - self.service_dir = module.params['service_dir'] - self.service_src = module.params['service_src'] + self.name = module.params["name"] + self.service_dir = module.params["service_dir"] + self.service_src = module.params["service_src"] self.enabled = None self.downed = None self.full_state = None @@ -141,25 +141,25 @@ def __init__(self, module): self.pid = None self.duration = None - self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) - self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_cmd = module.get_bin_path("svc", opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path("svstat", opt_dirs=self.extra_paths) self.svc_full = f"{self.service_dir}/{self.name}" self.src_full = f"{self.service_src}/{self.name}" self.enabled = os.path.lexists(self.svc_full) if self.enabled: - self.downed = os.path.lexists(f'{self.svc_full}/down') + self.downed = os.path.lexists(f"{self.svc_full}/down") self.get_status() else: - self.downed = os.path.lexists(f'{self.src_full}/down') - self.state = 'stopped' + self.downed = os.path.lexists(f"{self.src_full}/down") + self.state = "stopped" def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) except OSError as e: - self.module.fail_json(path=self.src_full, msg=f'Error while linking: {to_native(e)}') + self.module.fail_json(path=self.src_full, msg=f"Error while linking: {to_native(e)}") else: self.module.fail_json(msg=f"Could not find source for service to enable ({self.src_full}).") @@ -167,12 +167,12 @@ def disable(self): try: os.unlink(self.svc_full) except OSError as e: - self.module.fail_json(path=self.svc_full, msg=f'Error while unlinking: {e}') - self.execute_command([self.svc_cmd, '-dx', self.src_full]) + self.module.fail_json(path=self.svc_full, msg=f"Error while unlinking: {e}") + self.execute_command([self.svc_cmd, "-dx", self.src_full]) - src_log = f'{self.src_full}/log' + src_log = f"{self.src_full}/log" if os.path.exists(src_log): - self.execute_command([self.svc_cmd, '-dx', src_log]) + self.execute_command([self.svc_cmd, "-dx", src_log]) def get_status(self): rc, out, err = self.execute_command([self.svstat_cmd, self.svc_full]) @@ -182,47 +182,47 @@ def get_status(self): else: self.full_state = out - m = re.search(r'\(pid (\d+)\)', out) + m = re.search(r"\(pid (\d+)\)", out) if m: self.pid = m.group(1) - m = re.search(r'(\d+) seconds', out) + m = re.search(r"(\d+) seconds", out) if m: self.duration = m.group(1) - if re.search(' up ', out): - self.state = 'start' - elif re.search(' down ', out): - self.state = 'stopp' + if re.search(" up ", out): + self.state = "start" + elif re.search(" down ", out): + self.state = "stopp" else: - self.state = 'unknown' + self.state = "unknown" return - if re.search(' want ', out): - self.state += 'ing' + if re.search(" want ", out): + self.state += "ing" else: - self.state += 'ed' + self.state += "ed" def start(self): - return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + return self.execute_command([self.svc_cmd, "-u", self.svc_full]) def stopp(self): return self.stop() def stop(self): - return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + return self.execute_command([self.svc_cmd, "-d", self.svc_full]) def once(self): - return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + return self.execute_command([self.svc_cmd, "-o", self.svc_full]) def reload(self): - return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + return self.execute_command([self.svc_cmd, "-1", self.svc_full]) def restart(self): - return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + return self.execute_command([self.svc_cmd, "-t", self.svc_full]) def kill(self): - return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + return self.execute_command([self.svc_cmd, "-k", self.svc_full]) def execute_command(self, cmd): try: @@ -242,24 +242,25 @@ def report(self): # =========================================== # Main control flow + def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), - enabled=dict(type='bool'), - downed=dict(type='bool'), - service_dir=dict(type='str', default='/service'), - service_src=dict(type='str', default='/etc/service'), + name=dict(type="str", required=True), + state=dict(type="str", choices=["killed", "once", "reloaded", "restarted", "started", "stopped"]), + enabled=dict(type="bool"), + downed=dict(type="bool"), + service_dir=dict(type="str", default="/service"), + service_src=dict(type="str", default="/etc/service"), ), supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") - state = module.params['state'] - enabled = module.params['enabled'] - downed = module.params['downed'] + state = module.params["state"] + enabled = module.params["enabled"] + downed = module.params["downed"] svc = Svc(module) changed = False @@ -296,5 +297,5 @@ def main(): module.exit_json(changed=changed, svc=svc.report()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py index 204dee6b80e..82a91cab596 100644 --- a/plugins/modules/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -114,9 +114,9 @@ def package_installed(module, name, category): - cmd = [module.get_bin_path('pkginfo', True), '-q'] + cmd = [module.get_bin_path("pkginfo", True), "-q"] if category: - cmd.append('-c') + cmd.append("-c") cmd.append(name) rc, out, err = module.run_command(cmd) if rc == 0: @@ -126,8 +126,8 @@ def package_installed(module, name, category): def create_admin_file(): - (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = b''' + (desc, filename) = tempfile.mkstemp(prefix="ansible_svr4pkg", text=True) + fullauto = b""" mail= instance=unique partial=nocheck @@ -144,7 +144,7 @@ def create_admin_file(): keystore=/var/sadm/security proxy= basedir=default -''' +""" os.write(desc, fullauto) os.close(desc) return filename @@ -158,16 +158,16 @@ def run_command(module, cmd): def package_install(module, name, src, proxy, response_file, zone, category): adminfile = create_admin_file() - cmd = ['pkgadd', '-n'] - if zone == 'current': - cmd += ['-G'] - cmd += ['-a', adminfile, '-d', src] + cmd = ["pkgadd", "-n"] + if zone == "current": + cmd += ["-G"] + cmd += ["-a", adminfile, "-d", src] if proxy is not None: - cmd += ['-x', proxy] + cmd += ["-x", proxy] if response_file is not None: - cmd += ['-r', response_file] + cmd += ["-r", response_file] if category: - cmd += ['-Y'] + cmd += ["-Y"] cmd.append(name) (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) @@ -177,9 +177,9 @@ def package_install(module, name, src, proxy, response_file, zone, category): def package_uninstall(module, name, src, category): adminfile = create_admin_file() if category: - cmd = ['pkgrm', '-na', adminfile, '-Y', name] + cmd = ["pkgrm", "-na", adminfile, "-Y", name] else: - cmd = ['pkgrm', '-na', adminfile, name] + cmd = ["pkgrm", "-na", adminfile, name] (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) return (rc, out, err) @@ -189,33 +189,32 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), src=dict(), proxy=dict(), response_file=dict(), - zone=dict(default='all', choices=['current', 'all']), - category=dict(default=False, type='bool') + zone=dict(default="all", choices=["current", "all"]), + category=dict(default=False, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) - state = module.params['state'] - name = module.params['name'] - src = module.params['src'] - proxy = module.params['proxy'] - response_file = module.params['response_file'] - zone = module.params['zone'] - category = module.params['category'] + state = module.params["state"] + name = module.params["name"] + src = module.params["src"] + proxy = module.params["proxy"] + response_file = module.params["response_file"] + zone = module.params["zone"] + category = module.params["category"] rc = None - out = '' - err = '' + out = "" + err = "" result = {} - result['name'] = name - result['state'] = state + result["name"] = name + result["state"] = state - if state == 'present': + if state == "present": if src is None: - module.fail_json(name=name, - msg="src is required when state=present") + module.fail_json(name=name, msg="src is required when state=present") if not package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) @@ -225,7 +224,7 @@ def main(): if len(out) > 75: out = f"{out[:75]}..." - elif state == 'absent': + elif state == "absent": if package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) @@ -243,26 +242,26 @@ def main(): # 20 Reboot after installation of this package. # 99 (observed) pkgadd: ERROR: could not process datastream from if rc in (0, 2, 3, 10, 20): - result['changed'] = True + result["changed"] = True # no install nor uninstall, or failed else: - result['changed'] = False + result["changed"] = False # rc will be none when the package already was installed and no action took place # Only return failed=False when the returncode is known to be good as there may be more # undocumented failure return codes if rc not in (None, 0, 2, 10, 20): - result['failed'] = True + result["failed"] = True else: - result['failed'] = False + result["failed"] = False if out: - result['stdout'] = out + result["stdout"] = out if err: - result['stderr'] = err + result["stderr"] = err module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py index f20310b315b..ad764336307 100644 --- a/plugins/modules/swdepot.py +++ b/plugins/modules/swdepot.py @@ -68,14 +68,15 @@ def compare_package(version1, version2): - """ Compare version packages. - Return values: - -1 first minor - 0 equal - 1 first greater """ + """Compare version packages. + Return values: + -1 first minor + 0 equal + 1 first greater""" def normalize(v): - return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] + return [int(x) for x in re.sub(r"(\.0+)*$", "", v).split(".")] + normalized_version1 = normalize(version1) normalized_version2 = normalize(version2) if normalized_version1 == normalized_version2: @@ -88,15 +89,15 @@ def normalize(v): def query_package(module, name, depot=None): - """ Returns whether a package is installed or not and version. """ + """Returns whether a package is installed or not and version.""" - cmd_list = ['/usr/sbin/swlist', '-a', 'revision', '-l', 'product'] + cmd_list = ["/usr/sbin/swlist", "-a", "revision", "-l", "product"] if depot: - cmd_list.extend(['-s', depot]) + cmd_list.extend(["-s", depot]) cmd_list.append(name) rc, stdout, stderr = module.run_command(cmd_list) if rc == 0: - stdout = ''.join(line for line in stdout.splitlines(True) if name in line) + stdout = "".join(line for line in stdout.splitlines(True) if name in line) version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] else: version = None @@ -105,9 +106,9 @@ def query_package(module, name, depot=None): def remove_package(module, name): - """ Uninstall package if installed. """ + """Uninstall package if installed.""" - cmd_remove = '/usr/sbin/swremove' + cmd_remove = "/usr/sbin/swremove" rc, stdout, stderr = module.run_command([cmd_remove, name]) if rc == 0: @@ -117,9 +118,9 @@ def remove_package(module, name): def install_package(module, depot, name): - """ Install package if not already installed """ + """Install package if not already installed""" - cmd_install = ['/usr/sbin/swinstall', '-x', 'mount_all_filesystems=false'] + cmd_install = ["/usr/sbin/swinstall", "-x", "mount_all_filesystems=false"] rc, stdout, stderr = module.run_command(cmd_install + ["-s", depot, name]) if rc == 0: return rc, stdout @@ -130,20 +131,20 @@ def install_package(module, depot, name): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['pkg'], required=True), - state=dict(choices=['present', 'absent', 'latest'], required=True), - depot=dict() + name=dict(aliases=["pkg"], required=True), + state=dict(choices=["present", "absent", "latest"], required=True), + depot=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) - name = module.params['name'] - state = module.params['state'] - depot = module.params['depot'] + name = module.params["name"] + state = module.params["state"] + depot = module.params["depot"] changed = False msg = "No changed" rc = 0 - if (state == 'present' or state == 'latest') and depot is None: + if (state == "present" or state == "latest") and depot is None: output = "depot parameter is mandatory in present or latest task" module.fail_json(name=name, msg=output, rc=rc) @@ -156,7 +157,7 @@ def main(): else: installed = False - if (state == 'present' or state == 'latest') and installed is False: + if (state == "present" or state == "latest") and installed is False: if module.check_mode: module.exit_json(changed=True) rc, output = install_package(module, depot, name) @@ -168,7 +169,7 @@ def main(): else: module.fail_json(name=name, msg=output, rc=rc) - elif state == 'latest' and installed is True: + elif state == "latest" and installed is True: # Check depot version rc, version_depot = query_package(module, name, depot) @@ -190,7 +191,7 @@ def main(): output = f"Software package not in repository {depot}" module.fail_json(name=name, msg=output, rc=rc) - elif state == 'absent' and installed is True: + elif state == "absent" and installed is True: if module.check_mode: module.exit_json(changed=True) rc, output = remove_package(module, name) @@ -206,5 +207,5 @@ def main(): module.exit_json(changed=changed, name=name, state=state, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py index 7322bfbaaf6..69155950978 100644 --- a/plugins/modules/swupd.py +++ b/plugins/modules/swupd.py @@ -250,7 +250,9 @@ def verify_os(self): cmd = self._get_cmd(["verify", "--fix"]) self._run_cmd(cmd) - if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): + if self.rc == 0 and ( + self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout + ): self.changed = True self.msg = "Fix successful" return @@ -275,7 +277,7 @@ def main(): ), required_one_of=[["name", "update", "verify"]], mutually_exclusive=[["name", "update", "verify"]], - supports_check_mode=True + supports_check_mode=True, ) swupd = Swupd(module) @@ -302,5 +304,5 @@ def main(): module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py index dc327c31adf..61a09f51ce2 100644 --- a/plugins/modules/syslogger.py +++ b/plugins/modules/syslogger.py @@ -124,37 +124,37 @@ def get_facility(facility): return { - 'kern': syslog.LOG_KERN, - 'user': syslog.LOG_USER, - 'mail': syslog.LOG_MAIL, - 'daemon': syslog.LOG_DAEMON, - 'auth': syslog.LOG_AUTH, - 'lpr': syslog.LOG_LPR, - 'news': syslog.LOG_NEWS, - 'uucp': syslog.LOG_UUCP, - 'cron': syslog.LOG_CRON, - 'syslog': syslog.LOG_SYSLOG, - 'local0': syslog.LOG_LOCAL0, - 'local1': syslog.LOG_LOCAL1, - 'local2': syslog.LOG_LOCAL2, - 'local3': syslog.LOG_LOCAL3, - 'local4': syslog.LOG_LOCAL4, - 'local5': syslog.LOG_LOCAL5, - 'local6': syslog.LOG_LOCAL6, - 'local7': syslog.LOG_LOCAL7 + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "syslog": syslog.LOG_SYSLOG, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, }.get(facility, syslog.LOG_DAEMON) def get_priority(priority): return { - 'emerg': syslog.LOG_EMERG, - 'alert': syslog.LOG_ALERT, - 'crit': syslog.LOG_CRIT, - 'err': syslog.LOG_ERR, - 'warning': syslog.LOG_WARNING, - 'notice': syslog.LOG_NOTICE, - 'info': syslog.LOG_INFO, - 'debug': syslog.LOG_DEBUG + "emerg": syslog.LOG_EMERG, + "alert": syslog.LOG_ALERT, + "crit": syslog.LOG_CRIT, + "err": syslog.LOG_ERR, + "warning": syslog.LOG_WARNING, + "notice": syslog.LOG_NOTICE, + "info": syslog.LOG_INFO, + "debug": syslog.LOG_DEBUG, }.get(priority, syslog.LOG_INFO) @@ -162,19 +162,36 @@ def main(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( - ident=dict(type='str', default='ansible_syslogger'), - msg=dict(type='str', required=True), - priority=dict(type='str', - choices=["emerg", "alert", "crit", "err", "warning", - "notice", "info", "debug"], - default='info'), - facility=dict(type='str', - choices=["kern", "user", "mail", "daemon", "auth", - "lpr", "news", "uucp", "cron", "syslog", - "local0", "local1", "local2", "local3", - "local4", "local5", "local6", "local7"], - default='daemon'), - log_pid=dict(type='bool', default=False) + ident=dict(type="str", default="ansible_syslogger"), + msg=dict(type="str", required=True), + priority=dict( + type="str", choices=["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"], default="info" + ), + facility=dict( + type="str", + choices=[ + "kern", + "user", + "mail", + "daemon", + "auth", + "lpr", + "news", + "uucp", + "cron", + "syslog", + "local0", + "local1", + "local2", + "local3", + "local4", + "local5", + "local6", + "local7", + ], + default="daemon", + ), + log_pid=dict(type="bool", default=False), ) module = AnsibleModule( @@ -183,28 +200,29 @@ def main(): result = dict( changed=False, - ident=module.params['ident'], - priority=module.params['priority'], - facility=module.params['facility'], - log_pid=module.params['log_pid'], - msg=module.params['msg'] + ident=module.params["ident"], + priority=module.params["priority"], + facility=module.params["facility"], + log_pid=module.params["log_pid"], + msg=module.params["msg"], ) # do the logging try: - syslog.openlog(module.params['ident'], - syslog.LOG_PID if module.params['log_pid'] else 0, - get_facility(module.params['facility'])) - syslog.syslog(get_priority(module.params['priority']), - module.params['msg']) + syslog.openlog( + module.params["ident"], + syslog.LOG_PID if module.params["log_pid"] else 0, + get_facility(module.params["facility"]), + ) + syslog.syslog(get_priority(module.params["priority"]), module.params["msg"]) syslog.closelog() - result['changed'] = True + result["changed"] = True except Exception as exc: - module.fail_json(error=f'Failed to write to syslog {exc}', exception=traceback.format_exc(), **result) + module.fail_json(error=f"Failed to write to syslog {exc}", exception=traceback.format_exc(), **result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py index 7231262867f..3095c7750f2 100644 --- a/plugins/modules/syspatch.py +++ b/plugins/modules/syspatch.py @@ -69,9 +69,7 @@ def run_module(): # define available arguments/parameters a user can pass to the module - module_args = dict( - revert=dict(type='str', choices=['all', 'one']) - ) + module_args = dict(revert=dict(type="str", choices=["all", "one"])) module = AnsibleModule( argument_spec=module_args, @@ -84,22 +82,22 @@ def run_module(): def syspatch_run(module): - cmd = module.get_bin_path('syspatch', True) + cmd = module.get_bin_path("syspatch", True) changed = False reboot_needed = False # Set safe defaults for run_flag and check_flag - run_flag = ['-c'] - check_flag = ['-c'] - if module.params['revert']: - check_flag = ['-l'] + run_flag = ["-c"] + check_flag = ["-c"] + if module.params["revert"]: + check_flag = ["-l"] - if module.params['revert'] == 'all': - run_flag = ['-R'] + if module.params["revert"] == "all": + run_flag = ["-R"] else: - run_flag = ['-r'] + run_flag = ["-r"] else: - check_flag = ['-c'] + check_flag = ["-c"] run_flag = [] # Run check command @@ -122,17 +120,17 @@ def syspatch_run(module): # Workaround syspatch ln bug: # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html - if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n': + if rc != 0 and err != "ln: /usr/X11R6/bin/X: No such file or directory\n": module.fail_json(msg=f"Command {cmd} failed rc={rc}, out={out}, err={err}") - elif out.lower().find('create unique kernel') >= 0: + elif out.lower().find("create unique kernel") >= 0: # Kernel update applied reboot_needed = True - elif out.lower().find('syspatch updated itself') >= 0: - module.warn('Syspatch was updated. Please run syspatch again.') + elif out.lower().find("syspatch updated itself") >= 0: + module.warn("Syspatch was updated. Please run syspatch again.") # If no stdout, then warn user if len(out) == 0: - module.warn('syspatch had suggested changes, but stdout was empty.') + module.warn("syspatch had suggested changes, but stdout was empty.") changed = True else: @@ -151,5 +149,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py index ca875f59cde..a8cbf0bfbf4 100644 --- a/plugins/modules/sysrc.py +++ b/plugins/modules/sysrc.py @@ -103,24 +103,24 @@ class Sysrc(StateModuleHelper): module = dict( argument_spec=dict( - name=dict(type='str', required=True), - value=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']), - path=dict(type='str', default='/etc/rc.conf'), - delim=dict(type='str', default=' '), - jail=dict(type='str') + name=dict(type="str", required=True), + value=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present", "value_present", "value_absent"]), + path=dict(type="str", default="/etc/rc.conf"), + delim=dict(type="str", default=" "), + jail=dict(type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) - output_params = ('value',) + output_params = ("value",) use_old_vardict = False def __init_module__(self): # OID style names are not supported - if not re.match(r'^\w+$', self.vars.name, re.ASCII): + if not re.match(r"^\w+$", self.vars.name, re.ASCII): self.module.fail_json(msg="Name may only contain alpha-numeric and underscore characters") - self.sysrc = self.module.get_bin_path('sysrc', True) + self.sysrc = self.module.get_bin_path("sysrc", True) def _contains(self): value = self._get() @@ -135,28 +135,28 @@ def _get(self): if not os.path.exists(self.vars.path): return None - (rc, out, err) = self._sysrc('-v', '-n', self.vars.name) + (rc, out, err) = self._sysrc("-v", "-n", self.vars.name) if "unknown variable" in err or "unknown variable" in out: # Prior to FreeBSD 11.1 sysrc would write "unknown variable" to stdout and not stderr # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=229806 return None if out.startswith(self.vars.path): - return out.split(':', 1)[1].strip() + return out.split(":", 1)[1].strip() return None def _modify(self, op, changed): (rc, out, err) = self._sysrc(f"{self.vars.name}{op}={self.vars.delim}{self.vars.value}") if out.startswith(f"{self.vars.name}:"): - return changed(out.split(' -> ')[1].strip().split(self.vars.delim)) + return changed(out.split(" -> ")[1].strip().split(self.vars.delim)) return False def _sysrc(self, *args): - cmd = [self.sysrc, '-f', self.vars.path] + cmd = [self.sysrc, "-f", self.vars.path] if self.vars.jail: - cmd += ['-j', self.vars.jail] + cmd += ["-j", self.vars.jail] cmd.extend(args) (rc, out, err) = self.module.run_command(cmd) @@ -170,7 +170,7 @@ def state_absent(self): return if not self.check_mode: - self._sysrc('-x', self.vars.name) + self._sysrc("-x", self.vars.name) self.changed = True @@ -180,7 +180,7 @@ def state_present(self): return if self.vars.value is None: - self.vars.set('value', value) + self.vars.set("value", value) return if not self.check_mode: @@ -193,7 +193,7 @@ def state_value_absent(self): if not contains: return - self.changed = self.check_mode or self._modify('-', lambda values: self.vars.value not in values) + self.changed = self.check_mode or self._modify("-", lambda values: self.vars.value not in values) def state_value_present(self): (contains, value) = self._contains() @@ -201,15 +201,15 @@ def state_value_present(self): return if self.vars.value is None: - self.vars.set('value', value) + self.vars.set("value", value) return - self.changed = self.check_mode or self._modify('+', lambda values: self.vars.value in values) + self.changed = self.check_mode or self._modify("+", lambda values: self.vars.value in values) def main(): Sysrc.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/systemd_info.py b/plugins/modules/systemd_info.py index 34a7a141d6e..eafe303133d 100644 --- a/plugins/modules/systemd_info.py +++ b/plugins/modules/systemd_info.py @@ -258,16 +258,16 @@ def get_unit_properties(runner, prop_list, unit): def determine_category(unit): - if unit.endswith('.service'): - return 'service' - elif unit.endswith('.target'): - return 'target' - elif unit.endswith('.socket'): - return 'socket' - elif unit.endswith('.mount'): - return 'mount' - elif unit.endswith('.timer'): - return 'timer' + if unit.endswith(".service"): + return "service" + elif unit.endswith(".target"): + return "target" + elif unit.endswith(".socket"): + return "socket" + elif unit.endswith(".mount"): + return "mount" + elif unit.endswith(".timer"): + return "timer" else: return None @@ -285,11 +285,11 @@ def unit_exists(unit, units_info): def get_category_base_props(category): base_props = { - 'service': ['FragmentPath', 'UnitFileState', 'UnitFilePreset', 'MainPID', 'ExecMainPID'], - 'target': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], - 'socket': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], - 'mount': ['Where', 'What', 'Options', 'Type'], - 'timer': ['FragmentPath', 'UnitFileState', 'UnitFilePreset'], + "service": ["FragmentPath", "UnitFileState", "UnitFilePreset", "MainPID", "ExecMainPID"], + "target": ["FragmentPath", "UnitFileState", "UnitFilePreset"], + "socket": ["FragmentPath", "UnitFileState", "UnitFilePreset"], + "mount": ["Where", "What", "Options", "Type"], + "timer": ["FragmentPath", "UnitFileState", "UnitFilePreset"], } return base_props.get(category, []) @@ -303,7 +303,7 @@ def validate_unit_and_properties(runner, unit, extra_properties, units_info, pro if not category: module.fail_json(msg=f"Could not determine the category for unit '{unit}'.") - state_props = ['LoadState', 'ActiveState', 'SubState'] + state_props = ["LoadState", "ActiveState", "SubState"] props = get_category_base_props(category) full_props = set(props + state_props + extra_properties) @@ -365,17 +365,17 @@ def process_unit(runner, unit, extra_properties, units_info, property_cache, sta def main(): global module module_args = dict( - unitname=dict(type='list', elements='str', default=[]), - extra_properties=dict(type='list', elements='str', default=[]) + unitname=dict(type="list", elements="str", default=[]), + extra_properties=dict(type="list", elements="str", default=[]), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - systemctl_bin = module.get_bin_path('systemctl', required=True) + systemctl_bin = module.get_bin_path("systemctl", required=True) base_runner = systemd_runner(module, systemctl_bin) get_version(base_runner) - state_props = ['LoadState', 'ActiveState', 'SubState'] + state_props = ["LoadState", "ActiveState", "SubState"] results = {} unit_types = ["service", "target", "socket", "mount", "timer"] @@ -398,10 +398,10 @@ def main(): } property_cache = {} - extra_properties = module.params['extra_properties'] + extra_properties = module.params["extra_properties"] - if module.params['unitname']: - selected_units = module.params['unitname'] + if module.params["unitname"]: + selected_units = module.params["unitname"] all_units = list(units_info) resolved_units, non_matching = process_wildcards(selected_units, all_units, module) units_to_process = sorted(resolved_units) @@ -413,5 +413,5 @@ def main(): module.exit_json(changed=False, units=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/sysupgrade.py b/plugins/modules/sysupgrade.py index 1709847cf97..44fe93f09c2 100644 --- a/plugins/modules/sysupgrade.py +++ b/plugins/modules/sysupgrade.py @@ -81,37 +81,37 @@ def sysupgrade_run(module): - sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True) + sysupgrade_bin = module.get_bin_path("/usr/sbin/sysupgrade", required=True) cmd = [sysupgrade_bin] changed = False # Setup command flags - if module.params['snapshot']: - run_flag = ['-s'] - if module.params['force']: + if module.params["snapshot"]: + run_flag = ["-s"] + if module.params["force"]: # Force only applies to snapshots - run_flag.append('-f') + run_flag.append("-f") else: # release flag - run_flag = ['-r'] + run_flag = ["-r"] - if module.params['keep_files']: - run_flag.append('-k') + if module.params["keep_files"]: + run_flag.append("-k") - if module.params['fetch_only']: - run_flag.append('-n') + if module.params["fetch_only"]: + run_flag.append("-n") # installurl must be the last argument - if module.params['installurl']: - run_flag.append(module.params['installurl']) + if module.params["installurl"]: + run_flag.append(module.params["installurl"]) rc, out, err = module.run_command(cmd + run_flag) if rc != 0: module.fail_json(msg=f"Command {cmd} failed rc={rc}, out={out}, err={err}") - elif out.lower().find('already on latest snapshot') >= 0: + elif out.lower().find("already on latest snapshot") >= 0: changed = False - elif out.lower().find('upgrade on next reboot') >= 0: + elif out.lower().find("upgrade on next reboot") >= 0: changed = True return dict( @@ -125,11 +125,11 @@ def sysupgrade_run(module): def main(): module = AnsibleModule( argument_spec=dict( - snapshot=dict(type='bool', default=False), - fetch_only=dict(type='bool', default=True), - force=dict(type='bool', default=False), - keep_files=dict(type='bool', default=False), - installurl=dict(type='str'), + snapshot=dict(type="bool", default=False), + fetch_only=dict(type="bool", default=True), + force=dict(type="bool", default=False), + keep_files=dict(type="bool", default=False), + installurl=dict(type="str"), ), supports_check_mode=False, ) @@ -137,5 +137,5 @@ def main(): module.exit_json(**return_dict) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/taiga_issue.py b/plugins/modules/taiga_issue.py index 249a52ff477..129f8da88f5 100644 --- a/plugins/modules/taiga_issue.py +++ b/plugins/modules/taiga_issue.py @@ -128,16 +128,28 @@ try: from taiga import TaigaAPI from taiga.exceptions import TaigaException + TAIGA_MODULE_IMPORTED = True except ImportError: TAIGA_IMP_ERR = traceback.format_exc() TAIGA_MODULE_IMPORTED = False -def manage_issue(taiga_host, project_name, issue_subject, issue_priority, - issue_status, issue_type, issue_severity, issue_description, - issue_attachment, issue_attachment_description, - issue_tags, state, check_mode=False): +def manage_issue( + taiga_host, + project_name, + issue_subject, + issue_priority, + issue_status, + issue_type, + issue_severity, + issue_description, + issue_attachment, + issue_attachment_description, + issue_tags, + state, + check_mode=False, +): """ Method that creates/deletes issues depending whether they exist and the state desired @@ -154,13 +166,13 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority, changed = False try: - token = getenv('TAIGA_TOKEN') + token = getenv("TAIGA_TOKEN") if token: api = TaigaAPI(host=taiga_host, token=token) else: api = TaigaAPI(host=taiga_host) - username = getenv('TAIGA_USERNAME') - password = getenv('TAIGA_PASSWORD') + username = getenv("TAIGA_USERNAME") + password = getenv("TAIGA_PASSWORD") if not any([username, password]): return False, changed, "Missing credentials", {} api.auth(username=username, password=password) @@ -214,8 +226,15 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority, changed = True if not check_mode: # Create the issue - new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, - description=issue_description) + new_issue = project.add_issue( + issue_subject, + priority_id, + status_id, + type_id, + severity_id, + tags=issue_tags, + description=issue_description, + ) if issue_attachment: new_issue.attach(issue_attachment, description=issue_attachment_description) issue["attachment"] = issue_attachment @@ -252,41 +271,41 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority, def main(): module = AnsibleModule( argument_spec=dict( - taiga_host=dict(type='str', default="https://api.taiga.io"), - project=dict(type='str', required=True), - subject=dict(type='str', required=True), - issue_type=dict(type='str', required=True), - priority=dict(type='str', default="Normal"), - status=dict(type='str', default="New"), - severity=dict(type='str', default="Normal"), - description=dict(type='str', default=""), - attachment=dict(type='path'), - attachment_description=dict(type='str', default=""), - tags=dict(default=[], type='list', elements='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), + taiga_host=dict(type="str", default="https://api.taiga.io"), + project=dict(type="str", required=True), + subject=dict(type="str", required=True), + issue_type=dict(type="str", required=True), + priority=dict(type="str", default="Normal"), + status=dict(type="str", default="New"), + severity=dict(type="str", default="Normal"), + description=dict(type="str", default=""), + attachment=dict(type="path"), + attachment_description=dict(type="str", default=""), + tags=dict(default=[], type="list", elements="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), ), - supports_check_mode=True + supports_check_mode=True, ) if not TAIGA_MODULE_IMPORTED: module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR) - taiga_host = module.params['taiga_host'] - project_name = module.params['project'] - issue_subject = module.params['subject'] - issue_priority = module.params['priority'] - issue_status = module.params['status'] - issue_type = module.params['issue_type'] - issue_severity = module.params['severity'] - issue_description = module.params['description'] - issue_attachment = module.params['attachment'] - issue_attachment_description = module.params['attachment_description'] + taiga_host = module.params["taiga_host"] + project_name = module.params["project"] + issue_subject = module.params["subject"] + issue_priority = module.params["priority"] + issue_status = module.params["status"] + issue_type = module.params["issue_type"] + issue_severity = module.params["severity"] + issue_description = module.params["description"] + issue_attachment = module.params["attachment"] + issue_attachment_description = module.params["attachment_description"] if issue_attachment: if not isfile(issue_attachment): msg = f"{issue_attachment} is not a file" module.fail_json(msg=msg) - issue_tags = module.params['tags'] - state = module.params['state'] + issue_tags = module.params["tags"] + state = module.params["state"] return_status, changed, msg, issue_attr_dict = manage_issue( taiga_host, @@ -301,7 +320,7 @@ def main(): issue_attachment_description, issue_tags, state, - check_mode=module.check_mode + check_mode=module.check_mode, ) if return_status: if issue_attr_dict: @@ -312,5 +331,5 @@ def main(): module.fail_json(msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/telegram.py b/plugins/modules/telegram.py index 0d8229d1dc1..41c4f606b65 100644 --- a/plugins/modules/telegram.py +++ b/plugins/modules/telegram.py @@ -87,6 +87,7 @@ from urllib.parse import quote from ansible.module_utils.basic import AnsibleModule + # noinspection PyUnresolvedReferences from ansible.module_utils.urls import fetch_url @@ -94,43 +95,44 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(type='str', required=True, no_log=True), - api_args=dict(type='dict'), + token=dict(type="str", required=True, no_log=True), + api_args=dict(type="dict"), api_method=dict(type="str", default="SendMessage"), ), - supports_check_mode=True + supports_check_mode=True, ) - token = quote(module.params.get('token')) - api_args = module.params.get('api_args') or {} - api_method = module.params.get('api_method') + token = quote(module.params.get("token")) + api_args = module.params.get("api_args") or {} + api_method = module.params.get("api_method") # filling backward compatibility args - api_args['chat_id'] = api_args.get('chat_id') - api_args['parse_mode'] = api_args.get('parse_mode') - api_args['text'] = api_args.get('text') + api_args["chat_id"] = api_args.get("chat_id") + api_args["parse_mode"] = api_args.get("parse_mode") + api_args["text"] = api_args.get("text") - if api_args['parse_mode'] == 'plain': - del api_args['parse_mode'] + if api_args["parse_mode"] == "plain": + del api_args["parse_mode"] - url = f'https://api.telegram.org/bot{token}/{api_method}' + url = f"https://api.telegram.org/bot{token}/{api_method}" if module.check_mode: module.exit_json(changed=False) - response, info = fetch_url(module, url, method="POST", data=json.dumps(api_args), - headers={'Content-Type': 'application/json'}) - if info['status'] == 200: + response, info = fetch_url( + module, url, method="POST", data=json.dumps(api_args), headers={"Content-Type": "application/json"} + ) + if info["status"] == 200: module.exit_json(changed=True) - elif info['status'] == -1: + elif info["status"] == -1: # SSL errors, connection problems, etc. module.fail_json(msg="Failed to send message", info=info, response=response) else: - body = json.loads(info['body']) + body = json.loads(info["body"]) module.fail_json( msg=f"Failed to send message, return status = {info['status']}\nurl = {url}\napi_args = {api_args}", - telegram_error=body['description'], + telegram_error=body["description"], ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/terraform.py b/plugins/modules/terraform.py index c9799f7b506..67296da4659 100644 --- a/plugins/modules/terraform.py +++ b/plugins/modules/terraform.py @@ -291,22 +291,26 @@ def get_version(bin_path): - extract_version = module.run_command([bin_path, 'version', '-json']) - terraform_version = (json.loads(extract_version[1]))['terraform_version'] + extract_version = module.run_command([bin_path, "version", "-json"]) + terraform_version = (json.loads(extract_version[1]))["terraform_version"] return terraform_version def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None, no_color=True): - if project_path is None or '/' not in project_path: + if project_path is None or "/" not in project_path: module.fail_json(msg="Path for Terraform project can not be None or ''.") if not os.path.exists(bin_path): - module.fail_json(msg=f"Path for Terraform binary '{bin_path}' doesn't exist on this host - check the path and try again please.") + module.fail_json( + msg=f"Path for Terraform binary '{bin_path}' doesn't exist on this host - check the path and try again please." + ) if not os.path.isdir(project_path): - module.fail_json(msg=f"Path for Terraform project '{project_path}' doesn't exist on this host - check the path and try again please.") - cmd = [bin_path, 'validate'] + module.fail_json( + msg=f"Path for Terraform project '{project_path}' doesn't exist on this host - check the path and try again please." + ) + cmd = [bin_path, "validate"] if no_color: - cmd.append('-no-color') - if LooseVersion(version) < LooseVersion('0.15.0'): + cmd.append("-no-color") + if LooseVersion(version) < LooseVersion("0.15.0"): module.run_command(cmd + variables_args, check_rc=True, cwd=project_path) else: module.run_command(cmd, check_rc=True, cwd=project_path) @@ -316,81 +320,92 @@ def _state_args(state_file): if not state_file: return [] if not os.path.exists(state_file): - module.warn(f'Could not find state_file "{state_file}", the process will not destroy any resources, please check your state file path.') - return ['-state', state_file] - - -def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color=True): - command = [bin_path, 'init', '-input=false'] + module.warn( + f'Could not find state_file "{state_file}", the process will not destroy any resources, please check your state file path.' + ) + return ["-state", state_file] + + +def init_plugins( + bin_path, + project_path, + backend_config, + backend_config_files, + init_reconfigure, + provider_upgrade, + plugin_paths, + workspace, + no_color=True, +): + command = [bin_path, "init", "-input=false"] if no_color: - command.append('-no-color') + command.append("-no-color") if backend_config: for key, val in backend_config.items(): - command.extend([ - '-backend-config', - f'{key}={val}' - ]) + command.extend(["-backend-config", f"{key}={val}"]) if backend_config_files: for f in backend_config_files: - command.extend(['-backend-config', f]) + command.extend(["-backend-config", f]) if init_reconfigure: - command.extend(['-reconfigure']) + command.extend(["-reconfigure"]) if provider_upgrade: - command.extend(['-upgrade']) + command.extend(["-upgrade"]) if plugin_paths: for plugin_path in plugin_paths: - command.extend(['-plugin-dir', plugin_path]) - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace}) + command.extend(["-plugin-dir", plugin_path]) + rc, out, err = module.run_command( + command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace} + ) def get_workspace_context(bin_path, project_path, no_color=True): workspace_ctx = {"current": "default", "all": []} - command = [bin_path, 'workspace', 'list'] + command = [bin_path, "workspace", "list"] if no_color: - command.append('-no-color') + command.append("-no-color") rc, out, err = module.run_command(command, cwd=project_path) if rc != 0: module.warn(f"Failed to list Terraform workspaces:\n{err}") - for item in out.split('\n'): + for item in out.split("\n"): stripped_item = item.strip() if not stripped_item: continue - elif stripped_item.startswith('* '): - workspace_ctx["current"] = stripped_item.replace('* ', '') - workspace_ctx["all"].append(stripped_item.replace('* ', '')) + elif stripped_item.startswith("* "): + workspace_ctx["current"] = stripped_item.replace("* ", "") + workspace_ctx["all"].append(stripped_item.replace("* ", "")) else: workspace_ctx["all"].append(stripped_item) return workspace_ctx def _workspace_cmd(bin_path, project_path, action, workspace, no_color=True): - command = [bin_path, 'workspace', action, workspace] + command = [bin_path, "workspace", action, workspace] if no_color: - command.append('-no-color') + command.append("-no-color") rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) return rc, out, err def create_workspace(bin_path, project_path, workspace, no_color=True): - _workspace_cmd(bin_path, project_path, 'new', workspace, no_color) + _workspace_cmd(bin_path, project_path, "new", workspace, no_color) def select_workspace(bin_path, project_path, workspace, no_color=True): - _workspace_cmd(bin_path, project_path, 'select', workspace, no_color) + _workspace_cmd(bin_path, project_path, "select", workspace, no_color) def remove_workspace(bin_path, project_path, workspace, no_color=True): - _workspace_cmd(bin_path, project_path, 'delete', workspace, no_color) + _workspace_cmd(bin_path, project_path, "delete", workspace, no_color) def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None, no_color=True): if plan_path is None: - f, plan_path = tempfile.mkstemp(suffix='.tfplan') + f, plan_path = tempfile.mkstemp(suffix=".tfplan") local_command = command[:] - plan_command = [command[0], 'plan'] + plan_command = [command[0], "plan"] if state == "planned": for c in local_command[1:]: @@ -406,13 +421,13 @@ def build_plan(command, project_path, variables_args, state_file, targets, state for a in args: plan_command.append(a) - plan_options = ['-input=false', '-detailed-exitcode', '-out', plan_path] + plan_options = ["-input=false", "-detailed-exitcode", "-out", plan_path] if no_color: - plan_options.insert(0, '-no-color') + plan_options.insert(0, "-no-color") plan_command.extend(plan_options) for t in targets: - plan_command.extend(['-target', t]) + plan_command.extend(["-target", t]) plan_command.extend(_state_args(state_file)) @@ -420,30 +435,34 @@ def build_plan(command, project_path, variables_args, state_file, targets, state if rc == 0: # no changes - return plan_path, False, out, err, plan_command if state == 'planned' else command + return plan_path, False, out, err, plan_command if state == "planned" else command elif rc == 1: # failure to plan module.fail_json( - msg=(f"Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\n" - f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}") + msg=( + f"Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\n" + f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}" + ) ) elif rc == 2: # changes, but successful - return plan_path, True, out, err, plan_command if state == 'planned' else command + return plan_path, True, out, err, plan_command if state == "planned" else command - module.fail_json(msg=f"Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\n" - f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}") + module.fail_json( + msg=f"Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\n" + f"COMMAND: {' '.join(plan_command)} {' '.join([shlex_quote(arg) for arg in variables_args])}" + ) def get_diff(diff_output): def get_tf_resource_address(e): - return e['resource'] + return e["resource"] diff_json_output = json.loads(diff_output) # Ignore diff if resource_changes does not exists in tfplan - if 'resource_changes' in diff_json_output: - tf_reosource_changes = diff_json_output['resource_changes'] + if "resource_changes" in diff_json_output: + tf_reosource_changes = diff_json_output["resource_changes"] else: module.warn("Cannot find resource_changes in terraform plan, diff/check ignored") return False, {} @@ -452,20 +471,20 @@ def get_tf_resource_address(e): diff_before = [] changed = False for item in tf_reosource_changes: - item_change = item['change'] - tf_before_state = {'resource': item['address'], 'change': item['change']['before']} - tf_after_state = {'resource': item['address'], 'change': item['change']['after']} + item_change = item["change"] + tf_before_state = {"resource": item["address"], "change": item["change"]["before"]} + tf_after_state = {"resource": item["address"], "change": item["change"]["after"]} - if item_change['actions'] == ['update'] or item_change['actions'] == ['delete', 'create']: + if item_change["actions"] == ["update"] or item_change["actions"] == ["delete", "create"]: diff_before.append(tf_before_state) diff_after.append(tf_after_state) changed = True - if item_change['actions'] == ['delete']: + if item_change["actions"] == ["delete"]: diff_before.append(tf_before_state) changed = True - if item_change['actions'] == ['create']: + if item_change["actions"] == ["create"]: diff_after.append(tf_after_state) changed = True @@ -473,8 +492,8 @@ def get_tf_resource_address(e): diff_after.sort(key=get_tf_resource_address) return changed, dict( - before=({'data': diff_before}), - after=({'data': diff_after}), + before=({"data": diff_before}), + after=({"data": diff_after}), ) @@ -482,79 +501,89 @@ def main(): global module module = AnsibleModule( argument_spec=dict( - project_path=dict(required=True, type='path'), - binary_path=dict(type='path'), - plugin_paths=dict(type='list', elements='path'), - workspace=dict(type='str', default='default'), - purge_workspace=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'planned']), - variables=dict(type='dict'), - complex_vars=dict(type='bool', default=False), - variables_files=dict(aliases=['variables_file'], type='list', elements='path'), - plan_file=dict(type='path'), - state_file=dict(type='path'), - targets=dict(type='list', elements='str', default=[]), - lock=dict(type='bool', default=True), - lock_timeout=dict(type='int'), - force_init=dict(type='bool', default=False), - backend_config=dict(type='dict'), - backend_config_files=dict(type='list', elements='path'), - init_reconfigure=dict(type='bool', default=False), - overwrite_init=dict(type='bool', default=True), - check_destroy=dict(type='bool', default=False), - parallelism=dict(type='int'), - provider_upgrade=dict(type='bool', default=False), - no_color=dict(type='bool', default=True), + project_path=dict(required=True, type="path"), + binary_path=dict(type="path"), + plugin_paths=dict(type="list", elements="path"), + workspace=dict(type="str", default="default"), + purge_workspace=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent", "planned"]), + variables=dict(type="dict"), + complex_vars=dict(type="bool", default=False), + variables_files=dict(aliases=["variables_file"], type="list", elements="path"), + plan_file=dict(type="path"), + state_file=dict(type="path"), + targets=dict(type="list", elements="str", default=[]), + lock=dict(type="bool", default=True), + lock_timeout=dict(type="int"), + force_init=dict(type="bool", default=False), + backend_config=dict(type="dict"), + backend_config_files=dict(type="list", elements="path"), + init_reconfigure=dict(type="bool", default=False), + overwrite_init=dict(type="bool", default=True), + check_destroy=dict(type="bool", default=False), + parallelism=dict(type="int"), + provider_upgrade=dict(type="bool", default=False), + no_color=dict(type="bool", default=True), ), - required_if=[('state', 'planned', ['plan_file'])], + required_if=[("state", "planned", ["plan_file"])], supports_check_mode=True, ) - project_path = module.params.get('project_path') - bin_path = module.params.get('binary_path') - plugin_paths = module.params.get('plugin_paths') - workspace = module.params.get('workspace') - purge_workspace = module.params.get('purge_workspace') - state = module.params.get('state') - variables = module.params.get('variables') or {} - complex_vars = module.params.get('complex_vars') - variables_files = module.params.get('variables_files') - plan_file = module.params.get('plan_file') - state_file = module.params.get('state_file') - force_init = module.params.get('force_init') - backend_config = module.params.get('backend_config') - backend_config_files = module.params.get('backend_config_files') - init_reconfigure = module.params.get('init_reconfigure') - overwrite_init = module.params.get('overwrite_init') - check_destroy = module.params.get('check_destroy') - provider_upgrade = module.params.get('provider_upgrade') - no_color = module.params.get('no_color') + project_path = module.params.get("project_path") + bin_path = module.params.get("binary_path") + plugin_paths = module.params.get("plugin_paths") + workspace = module.params.get("workspace") + purge_workspace = module.params.get("purge_workspace") + state = module.params.get("state") + variables = module.params.get("variables") or {} + complex_vars = module.params.get("complex_vars") + variables_files = module.params.get("variables_files") + plan_file = module.params.get("plan_file") + state_file = module.params.get("state_file") + force_init = module.params.get("force_init") + backend_config = module.params.get("backend_config") + backend_config_files = module.params.get("backend_config_files") + init_reconfigure = module.params.get("init_reconfigure") + overwrite_init = module.params.get("overwrite_init") + check_destroy = module.params.get("check_destroy") + provider_upgrade = module.params.get("provider_upgrade") + no_color = module.params.get("no_color") if bin_path is not None: command = [bin_path] else: - command = [module.get_bin_path('terraform', required=True)] + command = [module.get_bin_path("terraform", required=True)] checked_version = get_version(command[0]) - if LooseVersion(checked_version) < LooseVersion('0.15.0'): + if LooseVersion(checked_version) < LooseVersion("0.15.0"): if no_color: - DESTROY_ARGS = ('destroy', '-no-color', '-force') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') + DESTROY_ARGS = ("destroy", "-no-color", "-force") + APPLY_ARGS = ("apply", "-no-color", "-input=false", "-auto-approve=true") else: - DESTROY_ARGS = ('destroy', '-force') - APPLY_ARGS = ('apply', '-input=false', '-auto-approve=true') + DESTROY_ARGS = ("destroy", "-force") + APPLY_ARGS = ("apply", "-input=false", "-auto-approve=true") else: if no_color: - DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') - APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') + DESTROY_ARGS = ("destroy", "-no-color", "-auto-approve") + APPLY_ARGS = ("apply", "-no-color", "-input=false", "-auto-approve") else: - DESTROY_ARGS = ('destroy', '-auto-approve') - APPLY_ARGS = ('apply', '-input=false', '-auto-approve') + DESTROY_ARGS = ("destroy", "-auto-approve") + APPLY_ARGS = ("apply", "-input=false", "-auto-approve") if force_init: if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color) + init_plugins( + command[0], + project_path, + backend_config, + backend_config_files, + init_reconfigure, + provider_upgrade, + plugin_paths, + workspace, + no_color, + ) workspace_ctx = get_workspace_context(command[0], project_path, no_color) if workspace_ctx["current"] != workspace: @@ -563,24 +592,24 @@ def main(): else: select_workspace(command[0], project_path, workspace, no_color) - if state == 'present': + if state == "present": command.extend(APPLY_ARGS) - elif state == 'absent': + elif state == "absent": command.extend(DESTROY_ARGS) - if state == 'present' and module.params.get('parallelism') is not None: + if state == "present" and module.params.get("parallelism") is not None: command.append(f"-parallelism={module.params.get('parallelism')}") def format_args(vars): if vars is None: - return 'null' + return "null" elif isinstance(vars, str): - return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"')).replace('\n', '\\n') + return '"{string}"'.format(string=vars.replace("\\", "\\\\").replace('"', '\\"')).replace("\n", "\\n") elif isinstance(vars, bool): if vars: - return 'true' + return "true" else: - return 'false' + return "false" return str(vars) def process_complex_args(vars): @@ -588,14 +617,16 @@ def process_complex_args(vars): if isinstance(vars, dict): for k, v in vars.items(): if isinstance(v, dict): - ret_out.append(f'{k}={{{process_complex_args(v)}}}') + ret_out.append(f"{k}={{{process_complex_args(v)}}}") elif isinstance(v, list): ret_out.append(f"{k}={process_complex_args(v)}") elif isinstance(v, (int, float, str, bool)) or v is None: - ret_out.append(f'{k}={format_args(v)}') + ret_out.append(f"{k}={format_args(v)}") else: # only to handle anything unforeseen - module.fail_json(msg="Supported types are, dictionaries, lists, strings, integers, boolean and float.") + module.fail_json( + msg="Supported types are, dictionaries, lists, strings, integers, boolean and float." + ) if isinstance(vars, list): l_out = [] for item in vars: @@ -607,7 +638,9 @@ def process_complex_args(vars): l_out.append(format_args(item)) else: # only to handle anything unforeseen - module.fail_json(msg="Supported types are, dictionaries, lists, strings, integers, boolean and float.") + module.fail_json( + msg="Supported types are, dictionaries, lists, strings, integers, boolean and float." + ) ret_out.append(f"[{','.join(l_out)}]") return ",".join(ret_out) @@ -616,136 +649,154 @@ def process_complex_args(vars): if complex_vars: for k, v in variables.items(): if isinstance(v, dict): - variables_args.extend([ - '-var', - f'{k}={{{process_complex_args(v)}}}' - ]) + variables_args.extend(["-var", f"{k}={{{process_complex_args(v)}}}"]) elif isinstance(v, list): - variables_args.extend([ - '-var', - f'{k}={process_complex_args(v)}' - ]) + variables_args.extend(["-var", f"{k}={process_complex_args(v)}"]) # on the top-level we need to pass just the python string with necessary # terraform string escape sequences elif isinstance(v, str): - variables_args.extend([ - '-var', - f"{k}={v}" - ]) + variables_args.extend(["-var", f"{k}={v}"]) else: - variables_args.extend([ - '-var', - f'{k}={format_args(v)}' - ]) + variables_args.extend(["-var", f"{k}={format_args(v)}"]) else: for k, v in variables.items(): - variables_args.extend([ - '-var', - f'{k}={v}' - ]) + variables_args.extend(["-var", f"{k}={v}"]) if variables_files: for f in variables_files: - variables_args.extend(['-var-file', f]) + variables_args.extend(["-var-file", f]) preflight_validation(command[0], project_path, checked_version, variables_args, plan_file, no_color) - if module.params.get('lock') is not None: - if module.params.get('lock'): - command.append('-lock=true') + if module.params.get("lock") is not None: + if module.params.get("lock"): + command.append("-lock=true") else: - command.append('-lock=false') - if module.params.get('lock_timeout') is not None: + command.append("-lock=false") + if module.params.get("lock_timeout") is not None: command.append(f"-lock-timeout={module.params.get('lock_timeout')}s") - for t in (module.params.get('targets') or []): - command.extend(['-target', t]) + for t in module.params.get("targets") or []: + command.extend(["-target", t]) # we aren't sure if this plan will result in changes, so assume yes needs_application, changed = True, False - out, err = '', '' + out, err = "", "" - if state == 'absent': + if state == "absent": command.extend(variables_args) - elif state == 'present' and plan_file: + elif state == "present" and plan_file: if any([os.path.isfile(f"{project_path}/{plan_file}"), os.path.isfile(plan_file)]): command.append(plan_file) else: module.fail_json(msg=f'Could not find plan_file "{plan_file}", check the path and try again.') else: - plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, APPLY_ARGS, plan_file, no_color) - if state == 'present' and check_destroy and '- destroy' in out: - module.fail_json(msg="Aborting command because it would destroy some resources. " - "Consider switching the 'check_destroy' to false to suppress this error") + plan_file, needs_application, out, err, command = build_plan( + command, + project_path, + variables_args, + state_file, + module.params.get("targets"), + state, + APPLY_ARGS, + plan_file, + no_color, + ) + if state == "present" and check_destroy and "- destroy" in out: + module.fail_json( + msg="Aborting command because it would destroy some resources. " + "Consider switching the 'check_destroy' to false to suppress this error" + ) command.append(plan_file) result_diff = dict() if module._diff or module.check_mode: - if state == 'absent': - plan_absent_args = ['-destroy'] - plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, - module.params.get('targets'), state, plan_absent_args, plan_file, no_color) - diff_command = [command[0], 'show', '-json', plan_file] + if state == "absent": + plan_absent_args = ["-destroy"] + plan_file, needs_application, out, err, command = build_plan( + command, + project_path, + variables_args, + state_file, + module.params.get("targets"), + state, + plan_absent_args, + plan_file, + no_color, + ) + diff_command = [command[0], "show", "-json", plan_file] rc, diff_output, err = module.run_command(diff_command, check_rc=False, cwd=project_path) changed, result_diff = get_diff(diff_output) if rc != 0: if workspace_ctx["current"] != workspace: select_workspace(command[0], project_path, workspace_ctx["current"], no_color) - module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, - stdout_lines=out.splitlines(), stderr=err, - stderr_lines=err.splitlines(), - cmd=' '.join(command)) - - if needs_application and not module.check_mode and state != 'planned': + module.fail_json( + msg=err.rstrip(), + rc=rc, + stdout=out, + stdout_lines=out.splitlines(), + stderr=err, + stderr_lines=err.splitlines(), + cmd=" ".join(command), + ) + + if needs_application and not module.check_mode and state != "planned": rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) if rc != 0: if workspace_ctx["current"] != workspace: select_workspace(command[0], project_path, workspace_ctx["current"], no_color) - module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, - stdout_lines=out.splitlines(), stderr=err, - stderr_lines=err.splitlines(), - cmd=' '.join(command)) + module.fail_json( + msg=err.rstrip(), + rc=rc, + stdout=out, + stdout_lines=out.splitlines(), + stderr=err, + stderr_lines=err.splitlines(), + cmd=" ".join(command), + ) # checks out to decide if changes were made during execution - if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: + if " 0 added, 0 changed" not in out and not state == "absent" or " 0 destroyed" not in out: changed = True if no_color: - outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) + outputs_command = [command[0], "output", "-no-color", "-json"] + _state_args(state_file) else: - outputs_command = [command[0], 'output', '-json'] + _state_args(state_file) + outputs_command = [command[0], "output", "-json"] + _state_args(state_file) rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) outputs = {} if rc == 1: - module.warn(f"Could not get Terraform outputs. This usually means none have been defined.\nstdout: {outputs_text}\nstderr: {outputs_err}") + module.warn( + f"Could not get Terraform outputs. This usually means none have been defined.\nstdout: {outputs_text}\nstderr: {outputs_err}" + ) elif rc != 0: module.fail_json( msg=f"Failure when getting Terraform outputs. Exited {rc}.\nstdout: {outputs_text}\nstderr: {outputs_err}", - command=' '.join(outputs_command)) + command=" ".join(outputs_command), + ) else: outputs = json.loads(outputs_text) # Restore the Terraform workspace found when running the module if workspace_ctx["current"] != workspace: select_workspace(command[0], project_path, workspace_ctx["current"], no_color) - if state == 'absent' and workspace != 'default' and purge_workspace is True: + if state == "absent" and workspace != "default" and purge_workspace is True: remove_workspace(command[0], project_path, workspace, no_color) result = { - 'state': state, - 'workspace': workspace, - 'outputs': outputs, - 'stdout': out, - 'stderr': err, - 'command': ' '.join(command), - 'changed': changed, - 'diff': result_diff, + "state": state, + "workspace": workspace, + "outputs": outputs, + "stdout": out, + "stderr": err, + "command": " ".join(command), + "changed": changed, + "diff": result_diff, } module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py index 29cd4b7b276..2537290f3bc 100644 --- a/plugins/modules/timezone.py +++ b/plugins/modules/timezone.py @@ -90,38 +90,38 @@ def __new__(cls, module): Args: module: The AnsibleModule. """ - if platform.system() == 'Linux': - timedatectl = module.get_bin_path('timedatectl') + if platform.system() == "Linux": + timedatectl = module.get_bin_path("timedatectl") if timedatectl is not None: rc, stdout, stderr = module.run_command(timedatectl) if rc == 0: return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) else: - module.debug(f'timedatectl command was found but not usable: {stderr}. using other method.') + module.debug(f"timedatectl command was found but not usable: {stderr}. using other method.") return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) else: return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) - elif re.match('^joyent_.*Z', platform.version()): + elif re.match("^joyent_.*Z", platform.version()): # platform.system() returns SunOS, which is too broad. So look at the # platform version instead. However we have to ensure that we're not # running in the global zone where changing the timezone has no effect. - zonename_cmd = module.get_bin_path('zonename') + zonename_cmd = module.get_bin_path("zonename") if zonename_cmd is not None: (rc, stdout, dummy) = module.run_command(zonename_cmd) - if rc == 0 and stdout.strip() == 'global': - module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + if rc == 0 and stdout.strip() == "global": + module.fail_json(msg="Adjusting timezone is not supported in Global Zone") return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) - elif platform.system() == 'Darwin': + elif platform.system() == "Darwin": return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) - elif re.match('^(Free|Net|Open)BSD', platform.platform()): + elif re.match("^(Free|Net|Open)BSD", platform.platform()): return super(Timezone, BSDTimezone).__new__(BSDTimezone) - elif platform.system() == 'AIX': + elif platform.system() == "AIX": AIXoslevel = int(platform.version() + platform.release()) if AIXoslevel >= 61: return super(Timezone, AIXTimezone).__new__(AIXTimezone) else: - module.fail_json(msg=f'AIX os level must be >= 61 for timezone module (Target: {AIXoslevel}).') + module.fail_json(msg=f"AIX os level must be >= 61 for timezone module (Target: {AIXoslevel}).") else: # Not supported yet return super(Timezone, Timezone).__new__(Timezone) @@ -152,11 +152,11 @@ def abort(self, msg): Args: msg: The error message. """ - error_msg = ['Error message:', msg] + error_msg = ["Error message:", msg] if len(self.msg) > 0: - error_msg.append('Other message(s):') + error_msg.append("Other message(s):") error_msg.extend(self.msg) - self.module.fail_json(msg='\n'.join(error_msg)) + self.module.fail_json(msg="\n".join(error_msg)) def execute(self, *commands, **kwargs): """Execute the shell command. @@ -172,11 +172,11 @@ def execute(self, *commands, **kwargs): stdout: Standard output of the command. """ (rc, stdout, stderr) = self.module.run_command(list(commands), check_rc=True) - if kwargs.get('log', False): + if kwargs.get("log", False): self.msg.append(f"executed `{' '.join(commands)}`") return stdout - def diff(self, phase1='before', phase2='after'): + def diff(self, phase1="before", phase2="after"): """Calculate the difference between given 2 phases. Args: @@ -202,7 +202,7 @@ def check(self, phase): Returns: NO RETURN VALUE """ - if phase == 'planned': + if phase == "planned": return for key, value in self.value.items(): value[phase] = self.get(key, phase) @@ -210,8 +210,8 @@ def check(self, phase): def change(self): """Make the changes effect based on `self.value`.""" for key, value in self.value.items(): - if value['before'] != value['planned']: - self.set(key, value['planned']) + if value["before"] != value["planned"]: + self.set(key, value["planned"]) # =========================================== # Platform specific methods (must be replaced by subclass). @@ -228,7 +228,7 @@ def get(self, key, phase): Return: value: The value for the key at the given phase. """ - self.abort('get(key, phase) is not implemented on target platform') + self.abort("get(key, phase) is not implemented on target platform") def set(self, key, value): """Set the value for the key (of course, for the phase 'after'). @@ -239,11 +239,11 @@ def set(self, key, value): key: Key to set the value value: Value to set """ - self.abort('set(key, value) is not implemented on target platform') + self.abort("set(key, value) is not implemented on target platform") def _verify_timezone(self): - tz = self.value['name']['planned'] - tzfile = f'/usr/share/zoneinfo/{tz}' + tz = self.value["name"]["planned"] + tzfile = f"/usr/share/zoneinfo/{tz}" if not os.path.isfile(tzfile): self.abort(f'given timezone "{tz}" is not available') return tzfile @@ -256,46 +256,43 @@ class SystemdTimezone(Timezone): """ regexps = dict( - hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), - name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + hwclock=re.compile(r"^\s*RTC in local TZ\s*:\s*([^\s]+)", re.MULTILINE), + name=re.compile(r"^\s*Time ?zone\s*:\s*([^\s]+)", re.MULTILINE), ) - subcmds = dict( - hwclock='set-local-rtc', - name='set-timezone' - ) + subcmds = dict(hwclock="set-local-rtc", name="set-timezone") def __init__(self, module): super().__init__(module) - self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.timedatectl = module.get_bin_path("timedatectl", required=True) self.status = dict() # Validate given timezone - if 'name' in self.value: + if "name" in self.value: self._verify_timezone() def _get_status(self, phase): if phase not in self.status: - self.status[phase] = self.execute(self.timedatectl, 'status') + self.status[phase] = self.execute(self.timedatectl, "status") return self.status[phase] def get(self, key, phase): status = self._get_status(phase) value = self.regexps[key].search(status).group(1) - if key == 'hwclock': + if key == "hwclock": # For key='hwclock'; convert yes/no -> local/UTC if self.module.boolean(value): - value = 'local' + value = "local" else: - value = 'UTC' + value = "UTC" return value def set(self, key, value): # For key='hwclock'; convert UTC/local -> yes/no - if key == 'hwclock': - if value == 'local': - value = 'yes' + if key == "hwclock": + if value == "local": + value = "yes" else: - value = 'no' + value = "no" self.execute(self.timedatectl, self.subcmds[key], value, log=True) @@ -312,98 +309,100 @@ class NosystemdTimezone(Timezone): conf_files = dict( name=None, # To be set in __init__ hwclock=None, # To be set in __init__ - adjtime='/etc/adjtime' + adjtime="/etc/adjtime", ) # It is fine if all tree config files don't exist - allow_no_file = dict( - name=True, - hwclock=True, - adjtime=True - ) + allow_no_file = dict(name=True, hwclock=True, adjtime=True) regexps = dict( name=None, # To be set in __init__ - hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), - adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + hwclock=re.compile(r"^UTC\s*=\s*([^\s]+)", re.MULTILINE), + adjtime=re.compile(r"^(UTC|LOCAL)$", re.MULTILINE), ) dist_regexps = dict( SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), - redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), ) - dist_tzline_format = dict( - SuSE='TIMEZONE="%s"\n', - redhat='ZONE="%s"\n' - ) + dist_tzline_format = dict(SuSE='TIMEZONE="%s"\n', redhat='ZONE="%s"\n') def __init__(self, module): super().__init__(module) # Validate given timezone - planned_tz = '' - if 'name' in self.value: + planned_tz = "" + if "name" in self.value: tzfile = self._verify_timezone() - planned_tz = self.value['name']['planned'] + planned_tz = self.value["name"]["planned"] # `--remove-destination` is needed if /etc/localtime is a symlink so # that it overwrites it instead of following it. - self.update_timezone = [[self.module.get_bin_path('cp', required=True), '--remove-destination', tzfile, '/etc/localtime']] - self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + self.update_timezone = [ + [self.module.get_bin_path("cp", required=True), "--remove-destination", tzfile, "/etc/localtime"] + ] + self.update_hwclock = self.module.get_bin_path("hwclock", required=True) distribution = get_distribution() - self.conf_files['name'] = '/etc/timezone' - self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) - self.tzline_format = '%s\n' + self.conf_files["name"] = "/etc/timezone" + self.regexps["name"] = re.compile(r"^([^\s]+)", re.MULTILINE) + self.tzline_format = "%s\n" # Distribution-specific configurations - if self.module.get_bin_path('dpkg-reconfigure') is not None: + if self.module.get_bin_path("dpkg-reconfigure") is not None: # Debian/Ubuntu - if 'name' in self.value: - self.update_timezone = [[self.module.get_bin_path('ln', required=True), '-sf', tzfile, '/etc/localtime'], - [self.module.get_bin_path('dpkg-reconfigure', required=True), '--frontend', 'noninteractive', 'tzdata']] - self.conf_files['hwclock'] = '/etc/default/rcS' - elif distribution == 'Alpine' or distribution == 'Gentoo': - self.conf_files['hwclock'] = '/etc/conf.d/hwclock' - if distribution == 'Alpine': - self.update_timezone = [[self.module.get_bin_path('setup-timezone', required=True), '-z', planned_tz]] + if "name" in self.value: + self.update_timezone = [ + [self.module.get_bin_path("ln", required=True), "-sf", tzfile, "/etc/localtime"], + [ + self.module.get_bin_path("dpkg-reconfigure", required=True), + "--frontend", + "noninteractive", + "tzdata", + ], + ] + self.conf_files["hwclock"] = "/etc/default/rcS" + elif distribution == "Alpine" or distribution == "Gentoo": + self.conf_files["hwclock"] = "/etc/conf.d/hwclock" + if distribution == "Alpine": + self.update_timezone = [[self.module.get_bin_path("setup-timezone", required=True), "-z", planned_tz]] else: # RHEL/CentOS/SUSE - if self.module.get_bin_path('tzdata-update') is not None: + if self.module.get_bin_path("tzdata-update") is not None: # tzdata-update cannot update the timezone if /etc/localtime is # a symlink so we have to use cp to update the time zone which # was set above. - if not os.path.islink('/etc/localtime'): - self.update_timezone = [[self.module.get_bin_path('tzdata-update', required=True)]] + if not os.path.islink("/etc/localtime"): + self.update_timezone = [[self.module.get_bin_path("tzdata-update", required=True)]] # else: # self.update_timezone = 'cp --remove-destination ...' <- configured above - self.conf_files['name'] = '/etc/sysconfig/clock' - self.conf_files['hwclock'] = '/etc/sysconfig/clock' + self.conf_files["name"] = "/etc/sysconfig/clock" + self.conf_files["hwclock"] = "/etc/sysconfig/clock" try: - with open(self.conf_files['name'], 'r') as f: + with open(self.conf_files["name"], "r") as f: sysconfig_clock = f.read() except IOError as err: - if self._allow_ioerror(err, 'name'): + if self._allow_ioerror(err, "name"): # If the config file doesn't exist detect the distribution and set regexps. - if distribution == 'SuSE': + if distribution == "SuSE": # For SUSE - self.regexps['name'] = self.dist_regexps['SuSE'] - self.tzline_format = self.dist_tzline_format['SuSE'] + self.regexps["name"] = self.dist_regexps["SuSE"] + self.tzline_format = self.dist_tzline_format["SuSE"] else: # For RHEL/CentOS - self.regexps['name'] = self.dist_regexps['redhat'] - self.tzline_format = self.dist_tzline_format['redhat'] + self.regexps["name"] = self.dist_regexps["redhat"] + self.tzline_format = self.dist_tzline_format["redhat"] else: self.abort(f'could not read configuration file "{self.conf_files["name"]}"') else: # The key for timezone might be `ZONE` or `TIMEZONE` # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). # So check the content of /etc/sysconfig/clock and decide which key to use. - if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + if re.search(r"^TIMEZONE\s*=", sysconfig_clock, re.MULTILINE): # For SUSE - self.regexps['name'] = self.dist_regexps['SuSE'] - self.tzline_format = self.dist_tzline_format['SuSE'] + self.regexps["name"] = self.dist_regexps["SuSE"] + self.tzline_format = self.dist_tzline_format["SuSE"] else: # For RHEL/CentOS - self.regexps['name'] = self.dist_regexps['redhat'] - self.tzline_format = self.dist_tzline_format['redhat'] + self.regexps["name"] = self.dist_regexps["redhat"] + self.tzline_format = self.dist_tzline_format["redhat"] def _allow_ioerror(self, err, key): # In some cases, even if the target file does not exist, @@ -428,7 +427,7 @@ def _edit_file(self, filename, regexp, value, key): """ # Read the file try: - with open(filename, 'r') as file: + with open(filename, "r") as file: lines = file.readlines() except IOError as err: if self._allow_ioerror(err, key): @@ -451,82 +450,84 @@ def _edit_file(self, filename, regexp, value, key): lines.insert(insert_line, value) # Write the changes try: - with open(filename, 'w') as file: + with open(filename, "w") as file: file.writelines(lines) except IOError: self.abort(f'tried to configure {key} using a file "{filename}", but could not write to it') - self.msg.append(f'Added 1 line and deleted {len(matched_indices)} line(s) on {filename}') + self.msg.append(f"Added 1 line and deleted {len(matched_indices)} line(s) on {filename}") def _get_value_from_config(self, key, phase): filename = self.conf_files[key] try: - with open(filename, mode='r') as file: + with open(filename, mode="r") as file: status = file.read() except IOError as err: if self._allow_ioerror(err, key): - if key == 'hwclock': - return 'n/a' - elif key == 'adjtime': - return 'UTC' - elif key == 'name': - return 'n/a' + if key == "hwclock": + return "n/a" + elif key == "adjtime": + return "UTC" + elif key == "name": + return "n/a" else: self.abort(f'tried to configure {key} using a file "{filename}", but could not read it') else: try: value = self.regexps[key].search(status).group(1) except AttributeError: - if key == 'hwclock': + if key == "hwclock": # If we cannot find UTC in the config that's fine. - return 'n/a' - elif key == 'adjtime': + return "n/a" + elif key == "adjtime": # If we cannot find UTC/LOCAL in /etc/cannot that means UTC # will be used by default. - return 'UTC' - elif key == 'name': - if phase == 'before': + return "UTC" + elif key == "name": + if phase == "before": # In 'before' phase UTC/LOCAL doesn't need to be set in # the timezone config file, so we ignore this error. - return 'n/a' + return "n/a" else: - self.abort(f'tried to configure {key} using a file "{filename}", but could not find a valid value in it') + self.abort( + f'tried to configure {key} using a file "{filename}", but could not find a valid value in it' + ) else: - if key == 'hwclock': + if key == "hwclock": # convert yes/no -> UTC/local if self.module.boolean(value): - value = 'UTC' + value = "UTC" else: - value = 'local' - elif key == 'adjtime': + value = "local" + elif key == "adjtime": # convert LOCAL -> local - if value != 'UTC': + if value != "UTC": value = value.lower() return value def get(self, key, phase): - planned = self.value[key]['planned'] - if key == 'hwclock': + planned = self.value[key]["planned"] + if key == "hwclock": value = self._get_value_from_config(key, phase) if value == planned: # If the value in the config file is the same as the 'planned' # value, we need to check /etc/adjtime. - value = self._get_value_from_config('adjtime', phase) - elif key == 'name': + value = self._get_value_from_config("adjtime", phase) + elif key == "name": value = self._get_value_from_config(key, phase) if value == planned: # If the planned values is the same as the one in the config file # we need to check if /etc/localtime is also set to the 'planned' zone. - if os.path.islink('/etc/localtime'): + if os.path.islink("/etc/localtime"): # If /etc/localtime is a symlink and is not set to the TZ we 'planned' # to set, we need to return the TZ which the symlink points to. - if os.path.exists('/etc/localtime'): + if os.path.exists("/etc/localtime"): # We use readlink() because on some distros zone files are symlinks # to other zone files, so it is hard to get which TZ is actually set # if we follow the symlink. - path = os.readlink('/etc/localtime') + path = os.readlink("/etc/localtime") # most linuxes has it in /usr/share/zoneinfo # alpine linux links under /etc/zoneinfo - linktz = re.search(r'(?:/(?:usr/share|etc)/zoneinfo/)(.*)', path, re.MULTILINE) + linktz = re.search(r"(?:/(?:usr/share|etc)/zoneinfo/)(.*)", path, re.MULTILINE) if linktz: valuelink = linktz.group(1) if valuelink != planned: @@ -534,48 +535,46 @@ def get(self, key, phase): else: # Set current TZ to 'n/a' if the symlink points to a path # which isn't a zone file. - value = 'n/a' + value = "n/a" else: # Set current TZ to 'n/a' if the symlink to the zone file is broken. - value = 'n/a' + value = "n/a" else: # If /etc/localtime is not a symlink best we can do is compare it with # the 'planned' zone info file and return 'n/a' if they are different. try: - if not filecmp.cmp('/etc/localtime', f"/usr/share/zoneinfo/{planned}"): - return 'n/a' + if not filecmp.cmp("/etc/localtime", f"/usr/share/zoneinfo/{planned}"): + return "n/a" except Exception: - return 'n/a' + return "n/a" else: self.abort(f'unknown parameter "{key}"') return value def set_timezone(self, value): - self._edit_file(filename=self.conf_files['name'], - regexp=self.regexps['name'], - value=self.tzline_format % value, - key='name') + self._edit_file( + filename=self.conf_files["name"], regexp=self.regexps["name"], value=self.tzline_format % value, key="name" + ) for cmd in self.update_timezone: self.execute(*cmd) def set_hwclock(self, value): - if value == 'local': - option = '--localtime' - utc = 'no' + if value == "local": + option = "--localtime" + utc = "no" else: - option = '--utc' - utc = 'yes' - if self.conf_files['hwclock'] is not None: - self._edit_file(filename=self.conf_files['hwclock'], - regexp=self.regexps['hwclock'], - value=f'UTC={utc}\n', - key='hwclock') - self.execute(self.update_hwclock, '--systohc', option, log=True) + option = "--utc" + utc = "yes" + if self.conf_files["hwclock"] is not None: + self._edit_file( + filename=self.conf_files["hwclock"], regexp=self.regexps["hwclock"], value=f"UTC={utc}\n", key="hwclock" + ) + self.execute(self.update_hwclock, "--systohc", option, log=True) def set(self, key, value): - if key == 'name': + if key == "name": self.set_timezone(value) - elif key == 'hwclock': + elif key == "hwclock": self.set_hwclock(value) else: self.abort(f'unknown parameter "{key}"') @@ -593,32 +592,32 @@ class SmartOSTimezone(Timezone): def __init__(self, module): super().__init__(module) - self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + self.settimezone = self.module.get_bin_path("sm-set-timezone", required=False) if not self.settimezone: - module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + module.fail_json(msg="sm-set-timezone not found. Make sure the smtools package is installed.") def get(self, key, phase): """Lookup the current timezone name in `/etc/default/init`. If anything else is requested, or if the TZ field is not set we fail. """ - if key == 'name': + if key == "name": try: - with open('/etc/default/init', 'r') as f: + with open("/etc/default/init", "r") as f: for line in f: - m = re.match('^TZ=(.*)$', line.strip()) + m = re.match("^TZ=(.*)$", line.strip()) if m: return m.groups()[0] except Exception: - self.module.fail_json(msg='Failed to read /etc/default/init') + self.module.fail_json(msg="Failed to read /etc/default/init") else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") def set(self, key, value): """Set the requested timezone through sm-set-timezone, an invalid timezone name will be rejected and we have no further input validation to perform. """ - if key == 'name': - cmd = ['sm-set-timezone', value] + if key == "name": + cmd = ["sm-set-timezone", value] (rc, stdout, stderr) = self.module.run_command(cmd) @@ -627,11 +626,11 @@ def set(self, key, value): # sm-set-timezone knows no state and will always set the timezone. # XXX: https://github.com/joyent/smtools/pull/2 - m = re.match(rf'^\* Changed (to)? timezone (to)? ({value}).*', stdout.splitlines()[1]) + m = re.match(rf"^\* Changed (to)? timezone (to)? ({value}).*", stdout.splitlines()[1]) if not (m and m.groups()[-1] == value): - self.module.fail_json(msg='Failed to set timezone') + self.module.fail_json(msg="Failed to set timezone") else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") class DarwinTimezone(Timezone): @@ -640,47 +639,45 @@ class DarwinTimezone(Timezone): the timezone. """ - regexps = dict( - name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) - ) + regexps = dict(name=re.compile(r"^\s*Time ?Zone\s*:\s*([^\s]+)", re.MULTILINE)) def __init__(self, module): super().__init__(module) - self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.systemsetup = module.get_bin_path("systemsetup", required=True) self.status = dict() # Validate given timezone - if 'name' in self.value: + if "name" in self.value: self._verify_timezone() def _get_current_timezone(self, phase): """Lookup the current timezone via `systemsetup -gettimezone`.""" if phase not in self.status: - self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + self.status[phase] = self.execute(self.systemsetup, "-gettimezone") return self.status[phase] def _verify_timezone(self): - tz = self.value['name']['planned'] + tz = self.value["name"]["planned"] # Lookup the list of supported timezones via `systemsetup -listtimezones`. # Note: Skip the first line that contains the label 'Time Zones:' - out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + out = self.execute(self.systemsetup, "-listtimezones").splitlines()[1:] tz_list = list(map(lambda x: x.strip(), out)) if tz not in tz_list: self.abort(f'given timezone "{tz}" is not available') return tz def get(self, key, phase): - if key == 'name': + if key == "name": status = self._get_current_timezone(phase) value = self.regexps[key].search(status).group(1) return value else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") def set(self, key, value): - if key == 'name': - self.execute(self.systemsetup, '-settimezone', value, log=True) + if key == "name": + self.execute(self.systemsetup, "-settimezone", value, log=True) else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") class BSDTimezone(Timezone): @@ -693,14 +690,14 @@ def __init__(self, module): super().__init__(module) def __get_timezone(self): - zoneinfo_dir = '/usr/share/zoneinfo/' - localtime_file = '/etc/localtime' + zoneinfo_dir = "/usr/share/zoneinfo/" + localtime_file = "/etc/localtime" # Strategy 1: # If /etc/localtime does not exist, assume the timezone is UTC. if not os.path.exists(localtime_file): - self.module.warn('Could not read /etc/localtime. Assuming UTC.') - return 'UTC' + self.module.warn("Could not read /etc/localtime. Assuming UTC.") + return "UTC" # Strategy 2: # Follow symlink of /etc/localtime @@ -712,7 +709,7 @@ def __get_timezone(self): # OSError means "end of symlink chain" or broken link. break else: - return zoneinfo_file.replace(zoneinfo_dir, '') + return zoneinfo_file.replace(zoneinfo_dir, "") # Strategy 3: # (If /etc/localtime is not symlinked) @@ -721,30 +718,30 @@ def __get_timezone(self): for fname in sorted(fnames): zoneinfo_file = os.path.join(dname, fname) if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): - return zoneinfo_file.replace(zoneinfo_dir, '') + return zoneinfo_file.replace(zoneinfo_dir, "") # Strategy 4: # As a fall-back, return 'UTC' as default assumption. - self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') - return 'UTC' + self.module.warn("Could not identify timezone name from /etc/localtime. Assuming UTC.") + return "UTC" def get(self, key, phase): """Lookup the current timezone by resolving `/etc/localtime`.""" - if key == 'name': + if key == "name": return self.__get_timezone() else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") def set(self, key, value): - if key == 'name': + if key == "name": # First determine if the requested timezone is valid by looking in # the zoneinfo directory. zonefile = f"/usr/share/zoneinfo/{value}" try: if not os.path.isfile(zonefile): - self.module.fail_json(msg=f'{value} is not a recognized timezone') + self.module.fail_json(msg=f"{value} is not a recognized timezone") except Exception: - self.module.fail_json(msg=f'Failed to stat {zonefile}') + self.module.fail_json(msg=f"Failed to stat {zonefile}") # Now (somewhat) atomically update the symlink by creating a new # symlink and move it into place. Otherwise we have to remove the @@ -756,12 +753,12 @@ def set(self, key, value): try: os.symlink(zonefile, new_localtime) - os.rename(new_localtime, '/etc/localtime') + os.rename(new_localtime, "/etc/localtime") except Exception: os.remove(new_localtime) - self.module.fail_json(msg='Could not update /etc/localtime') + self.module.fail_json(msg="Could not update /etc/localtime") else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") class AIXTimezone(Timezone): @@ -781,17 +778,17 @@ class AIXTimezone(Timezone): def __init__(self, module): super().__init__(module) - self.settimezone = self.module.get_bin_path('chtz', required=True) + self.settimezone = self.module.get_bin_path("chtz", required=True) def __get_timezone(self): - """ Return the current value of TZ= in /etc/environment """ + """Return the current value of TZ= in /etc/environment""" try: - with open('/etc/environment', 'r') as f: + with open("/etc/environment", "r") as f: etcenvironment = f.read() except Exception: - self.module.fail_json(msg='Issue reading contents of /etc/environment') + self.module.fail_json(msg="Issue reading contents of /etc/environment") - match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + match = re.search(r"^TZ=(.*)$", etcenvironment, re.MULTILINE) if match: return match.group(1) else: @@ -801,16 +798,16 @@ def get(self, key, phase): """Lookup the current timezone name in `/etc/environment`. If anything else is requested, or if the TZ field is not set we fail. """ - if key == 'name': + if key == "name": return self.__get_timezone() else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") def set(self, key, value): """Set the requested timezone through chtz, an invalid timezone name will be rejected and we have no further input validation to perform. """ - if key == 'name': + if key == "name": # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. # It will only return non-zero if the chtz command itself fails, it does not check for # valid timezones. We need to perform a basic check to confirm that the timezone @@ -829,12 +826,12 @@ def set(self, key, value): zonefile = f"/usr/share/lib/zoneinfo/{value}" try: if not os.path.isfile(zonefile): - self.module.fail_json(msg=f'{value} is not a recognized timezone.') + self.module.fail_json(msg=f"{value} is not a recognized timezone.") except Exception: - self.module.fail_json(msg=f'Failed to check {zonefile}.') + self.module.fail_json(msg=f"Failed to check {zonefile}.") # Now set the TZ using chtz - cmd = ['chtz', value] + cmd = ["chtz", value] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: @@ -844,50 +841,48 @@ def set(self, key, value): # change. TZ = self.__get_timezone() if TZ != value: - msg = f'TZ value does not match post-change (Actual: {TZ}, Expected: {value}).' + msg = f"TZ value does not match post-change (Actual: {TZ}, Expected: {value})." self.module.fail_json(msg=msg) else: - self.module.fail_json(msg=f'{key} is not a supported option on target platform') + self.module.fail_json(msg=f"{key} is not a supported option on target platform") def main(): # Construct 'module' and 'tz' module = AnsibleModule( argument_spec=dict( - hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), - name=dict(type='str'), + hwclock=dict(type="str", choices=["local", "UTC"], aliases=["rtc"]), + name=dict(type="str"), ), - required_one_of=[ - ['hwclock', 'name'] - ], + required_one_of=[["hwclock", "name"]], supports_check_mode=True, ) tz = Timezone(module) # Check the current state - tz.check(phase='before') + tz.check(phase="before") if module.check_mode: - diff = tz.diff('before', 'planned') + diff = tz.diff("before", "planned") # In check mode, 'planned' state is treated as 'after' state - diff['after'] = diff.pop('planned') + diff["after"] = diff.pop("planned") else: # Make change tz.change() # Check the current state - tz.check(phase='after') + tz.check(phase="after") # Examine if the current state matches planned state - (after, planned) = tz.diff('after', 'planned').values() + (after, planned) = tz.diff("after", "planned").values() if after != planned: - tz.abort(f'still not desired state, though changes have made - planned: {planned}, after: {after}') - diff = tz.diff('before', 'after') + tz.abort(f"still not desired state, though changes have made - planned: {planned}, after: {after}") + diff = tz.diff("before", "after") - changed = (diff['before'] != diff['after']) + changed = diff["before"] != diff["after"] if len(tz.msg) > 0: - module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + module.exit_json(changed=changed, diff=diff, msg="\n".join(tz.msg)) else: module.exit_json(changed=changed, diff=diff) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/twilio.py b/plugins/modules/twilio.py index f5732b334b1..816626712f2 100644 --- a/plugins/modules/twilio.py +++ b/plugins/modules/twilio.py @@ -112,24 +112,24 @@ from ansible.module_utils.urls import fetch_url -def post_twilio_api(module, account_sid, auth_token, msg, from_number, - to_number, media_url=None): +def post_twilio_api(module, account_sid, auth_token, msg, from_number, to_number, media_url=None): URI = f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Messages.json" AGENT = "Ansible" - data = {'From': from_number, 'To': to_number, 'Body': msg} + data = {"From": from_number, "To": to_number, "Body": msg} if media_url: - data['MediaUrl'] = media_url + data["MediaUrl"] = media_url encoded_data = urlencode(data) - headers = {'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json', - } + headers = { + "User-Agent": AGENT, + "Content-type": "application/x-www-form-urlencoded", + "Accept": "application/json", + } # Hack module params to have the Basic auth params that fetch_url expects - module.params['url_username'] = account_sid.replace('\n', '') - module.params['url_password'] = auth_token.replace('\n', '') + module.params["url_username"] = account_sid.replace("\n", "") + module.params["url_password"] = auth_token.replace("\n", "") return fetch_url(module, URI, data=encoded_data, headers=headers) @@ -138,39 +138,38 @@ def post_twilio_api(module, account_sid, auth_token, msg, from_number, # Main # -def main(): +def main(): module = AnsibleModule( argument_spec=dict( account_sid=dict(required=True), auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), - to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), + to_numbers=dict(required=True, aliases=["to_number"], type="list", elements="str"), media_url=dict(), ), - supports_check_mode=True + supports_check_mode=True, ) - account_sid = module.params['account_sid'] - auth_token = module.params['auth_token'] - msg = module.params['msg'] - from_number = module.params['from_number'] - to_numbers = module.params['to_numbers'] - media_url = module.params['media_url'] + account_sid = module.params["account_sid"] + auth_token = module.params["auth_token"] + msg = module.params["msg"] + from_number = module.params["from_number"] + to_numbers = module.params["to_numbers"] + media_url = module.params["media_url"] for number in to_numbers: - r, info = post_twilio_api(module, account_sid, auth_token, msg, - from_number, number, media_url) - if info['status'] not in [200, 201]: + r, info = post_twilio_api(module, account_sid, auth_token, msg, from_number, number, media_url) + if info["status"] not in [200, 201]: body_message = "unknown error" - if 'body' in info: - body = module.from_json(info['body']) - body_message = body['message'] + if "body" in info: + body = module.from_json(info["body"]) + body_message = body["message"] module.fail_json(msg=f"unable to send message to {number}: {body_message}") module.exit_json(msg=msg, changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py index a5ed1abeb85..621e0e15127 100644 --- a/plugins/modules/typetalk.py +++ b/plugins/modules/typetalk.py @@ -68,26 +68,29 @@ def do_request(module, url, params, headers=None): data = urlencode(params) if headers is None: headers = dict() - headers = dict(headers, **{ - 'User-Agent': 'Ansible/typetalk module', - }) + headers = dict( + headers, + **{ + "User-Agent": "Ansible/typetalk module", + }, + ) r, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] != 200: - exc = ConnectionError(info['msg']) - exc.code = info['status'] + if info["status"] != 200: + exc = ConnectionError(info["msg"]) + exc.code = info["status"] raise exc return r def get_access_token(module, client_id, client_secret): params = { - 'client_id': client_id, - 'client_secret': client_secret, - 'grant_type': 'client_credentials', - 'scope': 'topic.post' + "client_id": client_id, + "client_secret": client_secret, + "grant_type": "client_credentials", + "scope": "topic.post", } - res = do_request(module, 'https://typetalk.com/oauth2/access_token', params) - return json.load(res)['access_token'] + res = do_request(module, "https://typetalk.com/oauth2/access_token", params) + return json.load(res)["access_token"] def send_message(module, client_id, client_secret, topic, msg): @@ -96,26 +99,25 @@ def send_message(module, client_id, client_secret, topic, msg): """ try: access_token = get_access_token(module, client_id, client_secret) - url = f'https://typetalk.com/api/v1/topics/{topic}' + url = f"https://typetalk.com/api/v1/topics/{topic}" headers = { - 'Authorization': f'Bearer {access_token}', + "Authorization": f"Bearer {access_token}", } - do_request(module, url, {'message': msg}, headers) - return True, {'access_token': access_token} + do_request(module, url, {"message": msg}, headers) + return True, {"access_token": access_token} except ConnectionError as e: return False, e def main(): - module = AnsibleModule( argument_spec=dict( client_id=dict(required=True), client_secret=dict(required=True, no_log=True), - topic=dict(required=True, type='int'), + topic=dict(required=True, type="int"), msg=dict(required=True), ), - supports_check_mode=False + supports_check_mode=False, ) if not json: @@ -128,10 +130,10 @@ def main(): res, error = send_message(module, client_id, client_secret, topic, msg) if not res: - module.fail_json(msg=f'fail to send message with response code {error.code}') + module.fail_json(msg=f"fail to send message with response code {error.code}") module.exit_json(changed=True, topic=topic, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/udm_dns_record.py b/plugins/modules/udm_dns_record.py index a3c71b4d7f6..9d691acb25e 100644 --- a/plugins/modules/udm_dns_record.py +++ b/plugins/modules/udm_dns_record.py @@ -118,78 +118,73 @@ def main(): module = AnsibleModule( argument_spec=dict( - type=dict(required=True, type='str'), - zone=dict(required=True, type='str'), - name=dict(required=True, type='str'), - data=dict(default={}, type='dict'), - state=dict(default='present', choices=['present', 'absent'], type='str') + type=dict(required=True, type="str"), + zone=dict(required=True, type="str"), + name=dict(required=True, type="str"), + data=dict(default={}, type="dict"), + state=dict(default="present", choices=["present", "absent"], type="str"), ), supports_check_mode=True, - required_if=([ - ('state', 'present', ['data']) - ]) + required_if=([("state", "present", ["data"])]), ) deps.validate(module, "univention") - type = module.params['type'] - zone = module.params['zone'] - name = module.params['name'] - data = module.params['data'] - state = module.params['state'] + type = module.params["type"] + zone = module.params["zone"] + name = module.params["name"] + data = module.params["data"] + state = module.params["state"] changed = False diff = None workname = name - if type == 'ptr_record': + if type == "ptr_record": deps.validate(module, "ipaddress") try: - if 'arpa' not in zone: + if "arpa" not in zone: raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") ipaddr_rev = ipaddress.ip_address(name).reverse_pointer subnet_offset = ipaddr_rev.find(zone) if subnet_offset == -1: raise Exception(f"reversed IP address {ipaddr_rev} is not part of zone.") - workname = ipaddr_rev[0:subnet_offset - 1] + workname = ipaddr_rev[0 : subnet_offset - 1] except Exception as e: - module.fail_json( - msg=f'handling PTR record for {name} in zone {zone} failed: {e}' - ) - - obj = list(ldap_search( - f'(&(objectClass=dNSZone)(zoneName={zone})(relativeDomainName={workname}))', - attr=['dNSZone'] - )) + module.fail_json(msg=f"handling PTR record for {name} in zone {zone} failed: {e}") + + obj = list( + ldap_search(f"(&(objectClass=dNSZone)(zoneName={zone})(relativeDomainName={workname}))", attr=["dNSZone"]) + ) exists = bool(len(obj)) - container = f'zoneName={zone},cn=dns,{base_dn()}' - dn = f'relativeDomainName={workname},{container}' + container = f"zoneName={zone},cn=dns,{base_dn()}" + dn = f"relativeDomainName={workname},{container}" - if state == 'present': + if state == "present": try: if not exists: so = forward_zone.lookup( config(), uldap(), - f'(zone={zone})', - scope='domain', + f"(zone={zone})", + scope="domain", ) or reverse_zone.lookup( config(), uldap(), - f'(zoneName={zone})', - scope='domain', + f"(zoneName={zone})", + scope="domain", ) if not so == 0: raise Exception(f"Did not find zone '{zone}' in Univention") - obj = umc_module_for_add(f'dns/{type}', container, superordinate=so[0]) + obj = umc_module_for_add(f"dns/{type}", container, superordinate=so[0]) else: - obj = umc_module_for_edit(f'dns/{type}', dn) + obj = umc_module_for_edit(f"dns/{type}", dn) - if type == 'ptr_record': - obj['ip'] = name - obj['address'] = workname + if type == "ptr_record": + obj["ip"] = name + obj["address"] = workname else: - obj['name'] = name + obj["name"] = name obj.update(data) diff = obj.diff() @@ -200,28 +195,19 @@ def main(): else: obj.modify() except Exception as e: - module.fail_json( - msg=f'Creating/editing dns entry {name} in {container} failed: {e}' - ) + module.fail_json(msg=f"Creating/editing dns entry {name} in {container} failed: {e}") - if state == 'absent' and exists: + if state == "absent" and exists: try: - obj = umc_module_for_edit(f'dns/{type}', dn) + obj = umc_module_for_edit(f"dns/{type}", dn) if not module.check_mode: obj.remove() changed = True except Exception as e: - module.fail_json( - msg=f'Removing dns entry {name} in {container} failed: {e}' - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) + module.fail_json(msg=f"Removing dns entry {name} in {container} failed: {e}") + + module.exit_json(changed=changed, name=name, diff=diff, container=container) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/udm_dns_zone.py b/plugins/modules/udm_dns_zone.py index 5a2af1a06bc..fe9fddd2fde 100644 --- a/plugins/modules/udm_dns_zone.py +++ b/plugins/modules/udm_dns_zone.py @@ -114,95 +114,74 @@ def convert_time(time): """Convert a time in seconds into the biggest unit""" units = [ - (24 * 60 * 60, 'days'), - (60 * 60, 'hours'), - (60, 'minutes'), - (1, 'seconds'), + (24 * 60 * 60, "days"), + (60 * 60, "hours"), + (60, "minutes"), + (1, "seconds"), ] if time == 0: - return ('0', 'seconds') + return ("0", "seconds") for unit in units: if time >= unit[0]: - return (f'{time // unit[0]}', unit[1]) + return (f"{time // unit[0]}", unit[1]) def main(): module = AnsibleModule( argument_spec=dict( - type=dict(required=True, - type='str'), - zone=dict(required=True, - aliases=['name'], - type='str'), - nameserver=dict(default=[], - type='list', - elements='str'), - interfaces=dict(default=[], - type='list', - elements='str'), - refresh=dict(default=3600, - type='int'), - retry=dict(default=1800, - type='int'), - expire=dict(default=604800, - type='int'), - ttl=dict(default=600, - type='int'), - contact=dict(default='', - type='str'), - mx=dict(default=[], - type='list', - elements='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') + type=dict(required=True, type="str"), + zone=dict(required=True, aliases=["name"], type="str"), + nameserver=dict(default=[], type="list", elements="str"), + interfaces=dict(default=[], type="list", elements="str"), + refresh=dict(default=3600, type="int"), + retry=dict(default=1800, type="int"), + expire=dict(default=604800, type="int"), + ttl=dict(default=600, type="int"), + contact=dict(default="", type="str"), + mx=dict(default=[], type="list", elements="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), ), supports_check_mode=True, - required_if=([ - ('state', 'present', ['nameserver', 'interfaces']) - ]) + required_if=([("state", "present", ["nameserver", "interfaces"])]), ) - type = module.params['type'] - zone = module.params['zone'] - nameserver = module.params['nameserver'] - interfaces = module.params['interfaces'] - refresh = module.params['refresh'] - retry = module.params['retry'] - expire = module.params['expire'] - ttl = module.params['ttl'] - contact = module.params['contact'] - mx = module.params['mx'] - state = module.params['state'] + type = module.params["type"] + zone = module.params["zone"] + nameserver = module.params["nameserver"] + interfaces = module.params["interfaces"] + refresh = module.params["refresh"] + retry = module.params["retry"] + expire = module.params["expire"] + ttl = module.params["ttl"] + contact = module.params["contact"] + mx = module.params["mx"] + state = module.params["state"] changed = False diff = None - obj = list(ldap_search( - f'(&(objectClass=dNSZone)(zoneName={zone}))', - attr=['dNSZone'] - )) + obj = list(ldap_search(f"(&(objectClass=dNSZone)(zoneName={zone}))", attr=["dNSZone"])) exists = bool(len(obj)) - container = f'cn=dns,{base_dn()}' - dn = f'zoneName={zone},{container}' - if contact == '': - contact = f'root@{zone}.' + container = f"cn=dns,{base_dn()}" + dn = f"zoneName={zone},{container}" + if contact == "": + contact = f"root@{zone}." - if state == 'present': + if state == "present": try: if not exists: - obj = umc_module_for_add(f'dns/{type}', container) + obj = umc_module_for_add(f"dns/{type}", container) else: - obj = umc_module_for_edit(f'dns/{type}', dn) - obj['zone'] = zone - obj['nameserver'] = nameserver - obj['a'] = interfaces - obj['refresh'] = convert_time(refresh) - obj['retry'] = convert_time(retry) - obj['expire'] = convert_time(expire) - obj['ttl'] = convert_time(ttl) - obj['contact'] = contact - obj['mx'] = mx + obj = umc_module_for_edit(f"dns/{type}", dn) + obj["zone"] = zone + obj["nameserver"] = nameserver + obj["a"] = interfaces + obj["refresh"] = convert_time(refresh) + obj["retry"] = convert_time(retry) + obj["expire"] = convert_time(expire) + obj["ttl"] = convert_time(ttl) + obj["contact"] = contact + obj["mx"] = mx diff = obj.diff() if exists: for k in obj.keys(): @@ -216,27 +195,19 @@ def main(): elif changed: obj.modify() except Exception as e: - module.fail_json( - msg=f'Creating/editing dns zone {zone} failed: {e}' - ) + module.fail_json(msg=f"Creating/editing dns zone {zone} failed: {e}") - if state == 'absent' and exists: + if state == "absent" and exists: try: - obj = umc_module_for_edit(f'dns/{type}', dn) + obj = umc_module_for_edit(f"dns/{type}", dn) if not module.check_mode: obj.remove() changed = True except Exception as e: - module.fail_json( - msg=f'Removing dns zone {zone} failed: {e}' - ) - - module.exit_json( - changed=changed, - diff=diff, - zone=zone - ) + module.fail_json(msg=f"Removing dns zone {zone} failed: {e}") + + module.exit_json(changed=changed, diff=diff, zone=zone) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/udm_group.py b/plugins/modules/udm_group.py index 4c764e915a7..696857f04e2 100644 --- a/plugins/modules/udm_group.py +++ b/plugins/modules/udm_group.py @@ -97,54 +97,45 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, - type='str'), - description=dict(type='str'), - position=dict(default='', - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=groups', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') + name=dict(required=True, type="str"), + description=dict(type="str"), + position=dict(default="", type="str"), + ou=dict(default="", type="str"), + subpath=dict(default="cn=groups", type="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) - name = module.params['name'] - description = module.params['description'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] + name = module.params["name"] + description = module.params["description"] + position = module.params["position"] + ou = module.params["ou"] + subpath = module.params["subpath"] + state = module.params["state"] changed = False diff = None - groups = list(ldap_search( - f'(&(objectClass=posixGroup)(cn={name}))', - attr=['cn'] - )) - if position != '': + groups = list(ldap_search(f"(&(objectClass=posixGroup)(cn={name}))", attr=["cn"])) + if position != "": container = position else: - if ou != '': - ou = f'ou={ou},' - if subpath != '': - subpath = f'{subpath},' - container = f'{subpath}{ou}{base_dn()}' - group_dn = f'cn={name},{container}' + if ou != "": + ou = f"ou={ou}," + if subpath != "": + subpath = f"{subpath}," + container = f"{subpath}{ou}{base_dn()}" + group_dn = f"cn={name},{container}" exists = bool(len(groups)) - if state == 'present': + if state == "present": try: if not exists: - grp = umc_module_for_add('groups/group', container) + grp = umc_module_for_add("groups/group", container) else: - grp = umc_module_for_edit('groups/group', group_dn) - grp['name'] = name - grp['description'] = description + grp = umc_module_for_edit("groups/group", group_dn) + grp["name"] = name + grp["description"] = description diff = grp.diff() changed = grp.diff() != [] if not module.check_mode: @@ -153,28 +144,19 @@ def main(): else: grp.modify() except Exception: - module.fail_json( - msg=f"Creating/editing group {name} in {container} failed" - ) + module.fail_json(msg=f"Creating/editing group {name} in {container} failed") - if state == 'absent' and exists: + if state == "absent" and exists: try: - grp = umc_module_for_edit('groups/group', group_dn) + grp = umc_module_for_edit("groups/group", group_dn) if not module.check_mode: grp.remove() changed = True except Exception: - module.fail_json( - msg=f"Removing group {name} failed" - ) - - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) + module.fail_json(msg=f"Removing group {name} failed") + + module.exit_json(changed=changed, name=name, diff=diff, container=container) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/udm_share.py b/plugins/modules/udm_share.py index 9af5bf37b77..109ccb8b111 100644 --- a/plugins/modules/udm_share.py +++ b/plugins/modules/udm_share.py @@ -352,178 +352,89 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, - type='str'), - ou=dict(required=True, - type='str'), - owner=dict(type='str', - default='0'), - group=dict(type='str', - default='0'), - path=dict(type='path'), - directorymode=dict(type='str', - default='00755'), - host=dict(type='str'), - root_squash=dict(type='bool', - default=True), - subtree_checking=dict(type='bool', - default=True), - sync=dict(type='str', - default='sync'), - writeable=dict(type='bool', - default=True), - sambaBlockSize=dict(type='str', - aliases=['samba_block_size']), - sambaBlockingLocks=dict(type='bool', - aliases=['samba_blocking_locks'], - default=True), - sambaBrowseable=dict(type='bool', - aliases=['samba_browsable'], - default=True), - sambaCreateMode=dict(type='str', - aliases=['samba_create_mode'], - default='0744'), - sambaCscPolicy=dict(type='str', - aliases=['samba_csc_policy'], - default='manual'), - sambaCustomSettings=dict(type='list', - elements='dict', - aliases=['samba_custom_settings'], - default=[]), - sambaDirectoryMode=dict(type='str', - aliases=['samba_directory_mode'], - default='0755'), - sambaDirectorySecurityMode=dict(type='str', - aliases=['samba_directory_security_mode'], - default='0777'), - sambaDosFilemode=dict(type='bool', - aliases=['samba_dos_filemode'], - default=False), - sambaFakeOplocks=dict(type='bool', - aliases=['samba_fake_oplocks'], - default=False), - sambaForceCreateMode=dict(type='bool', - aliases=['samba_force_create_mode'], - default=False), - sambaForceDirectoryMode=dict(type='bool', - aliases=['samba_force_directory_mode'], - default=False), - sambaForceDirectorySecurityMode=dict(type='bool', - aliases=['samba_force_directory_security_mode'], - default=False), - sambaForceGroup=dict(type='str', - aliases=['samba_force_group']), - sambaForceSecurityMode=dict(type='bool', - aliases=['samba_force_security_mode'], - default=False), - sambaForceUser=dict(type='str', - aliases=['samba_force_user']), - sambaHideFiles=dict(type='str', - aliases=['samba_hide_files']), - sambaHideUnreadable=dict(type='bool', - aliases=['samba_hide_unreadable'], - default=False), - sambaHostsAllow=dict(type='list', - elements='str', - aliases=['samba_hosts_allow'], - default=[]), - sambaHostsDeny=dict(type='list', - elements='str', - aliases=['samba_hosts_deny'], - default=[]), - sambaInheritAcls=dict(type='bool', - aliases=['samba_inherit_acls'], - default=True), - sambaInheritOwner=dict(type='bool', - aliases=['samba_inherit_owner'], - default=False), - sambaInheritPermissions=dict(type='bool', - aliases=['samba_inherit_permissions'], - default=False), - sambaInvalidUsers=dict(type='str', - aliases=['samba_invalid_users']), - sambaLevel2Oplocks=dict(type='bool', - aliases=['samba_level_2_oplocks'], - default=True), - sambaLocking=dict(type='bool', - aliases=['samba_locking'], - default=True), - sambaMSDFSRoot=dict(type='bool', - aliases=['samba_msdfs_root'], - default=False), - sambaName=dict(type='str', - aliases=['samba_name']), - sambaNtAclSupport=dict(type='bool', - aliases=['samba_nt_acl_support'], - default=True), - sambaOplocks=dict(type='bool', - aliases=['samba_oplocks'], - default=True), - sambaPostexec=dict(type='str', - aliases=['samba_postexec']), - sambaPreexec=dict(type='str', - aliases=['samba_preexec']), - sambaPublic=dict(type='bool', - aliases=['samba_public'], - default=False), - sambaSecurityMode=dict(type='str', - aliases=['samba_security_mode'], - default='0777'), - sambaStrictLocking=dict(type='str', - aliases=['samba_strict_locking'], - default='Auto'), - sambaVFSObjects=dict(type='str', - aliases=['samba_vfs_objects']), - sambaValidUsers=dict(type='str', - aliases=['samba_valid_users']), - sambaWriteList=dict(type='str', - aliases=['samba_write_list']), - sambaWriteable=dict(type='bool', - aliases=['samba_writeable'], - default=True), - nfs_hosts=dict(type='list', - elements='str', - default=[]), - nfsCustomSettings=dict(type='list', - elements='str', - aliases=['nfs_custom_settings'], - default=[]), - state=dict(default='present', - choices=['present', 'absent'], - type='str') + name=dict(required=True, type="str"), + ou=dict(required=True, type="str"), + owner=dict(type="str", default="0"), + group=dict(type="str", default="0"), + path=dict(type="path"), + directorymode=dict(type="str", default="00755"), + host=dict(type="str"), + root_squash=dict(type="bool", default=True), + subtree_checking=dict(type="bool", default=True), + sync=dict(type="str", default="sync"), + writeable=dict(type="bool", default=True), + sambaBlockSize=dict(type="str", aliases=["samba_block_size"]), + sambaBlockingLocks=dict(type="bool", aliases=["samba_blocking_locks"], default=True), + sambaBrowseable=dict(type="bool", aliases=["samba_browsable"], default=True), + sambaCreateMode=dict(type="str", aliases=["samba_create_mode"], default="0744"), + sambaCscPolicy=dict(type="str", aliases=["samba_csc_policy"], default="manual"), + sambaCustomSettings=dict(type="list", elements="dict", aliases=["samba_custom_settings"], default=[]), + sambaDirectoryMode=dict(type="str", aliases=["samba_directory_mode"], default="0755"), + sambaDirectorySecurityMode=dict(type="str", aliases=["samba_directory_security_mode"], default="0777"), + sambaDosFilemode=dict(type="bool", aliases=["samba_dos_filemode"], default=False), + sambaFakeOplocks=dict(type="bool", aliases=["samba_fake_oplocks"], default=False), + sambaForceCreateMode=dict(type="bool", aliases=["samba_force_create_mode"], default=False), + sambaForceDirectoryMode=dict(type="bool", aliases=["samba_force_directory_mode"], default=False), + sambaForceDirectorySecurityMode=dict( + type="bool", aliases=["samba_force_directory_security_mode"], default=False + ), + sambaForceGroup=dict(type="str", aliases=["samba_force_group"]), + sambaForceSecurityMode=dict(type="bool", aliases=["samba_force_security_mode"], default=False), + sambaForceUser=dict(type="str", aliases=["samba_force_user"]), + sambaHideFiles=dict(type="str", aliases=["samba_hide_files"]), + sambaHideUnreadable=dict(type="bool", aliases=["samba_hide_unreadable"], default=False), + sambaHostsAllow=dict(type="list", elements="str", aliases=["samba_hosts_allow"], default=[]), + sambaHostsDeny=dict(type="list", elements="str", aliases=["samba_hosts_deny"], default=[]), + sambaInheritAcls=dict(type="bool", aliases=["samba_inherit_acls"], default=True), + sambaInheritOwner=dict(type="bool", aliases=["samba_inherit_owner"], default=False), + sambaInheritPermissions=dict(type="bool", aliases=["samba_inherit_permissions"], default=False), + sambaInvalidUsers=dict(type="str", aliases=["samba_invalid_users"]), + sambaLevel2Oplocks=dict(type="bool", aliases=["samba_level_2_oplocks"], default=True), + sambaLocking=dict(type="bool", aliases=["samba_locking"], default=True), + sambaMSDFSRoot=dict(type="bool", aliases=["samba_msdfs_root"], default=False), + sambaName=dict(type="str", aliases=["samba_name"]), + sambaNtAclSupport=dict(type="bool", aliases=["samba_nt_acl_support"], default=True), + sambaOplocks=dict(type="bool", aliases=["samba_oplocks"], default=True), + sambaPostexec=dict(type="str", aliases=["samba_postexec"]), + sambaPreexec=dict(type="str", aliases=["samba_preexec"]), + sambaPublic=dict(type="bool", aliases=["samba_public"], default=False), + sambaSecurityMode=dict(type="str", aliases=["samba_security_mode"], default="0777"), + sambaStrictLocking=dict(type="str", aliases=["samba_strict_locking"], default="Auto"), + sambaVFSObjects=dict(type="str", aliases=["samba_vfs_objects"]), + sambaValidUsers=dict(type="str", aliases=["samba_valid_users"]), + sambaWriteList=dict(type="str", aliases=["samba_write_list"]), + sambaWriteable=dict(type="bool", aliases=["samba_writeable"], default=True), + nfs_hosts=dict(type="list", elements="str", default=[]), + nfsCustomSettings=dict(type="list", elements="str", aliases=["nfs_custom_settings"], default=[]), + state=dict(default="present", choices=["present", "absent"], type="str"), ), supports_check_mode=True, - required_if=([ - ('state', 'present', ['path', 'host', 'sambaName']) - ]) + required_if=([("state", "present", ["path", "host", "sambaName"])]), ) - name = module.params['name'] - state = module.params['state'] + name = module.params["name"] + state = module.params["state"] changed = False diff = None - obj = list(ldap_search( - f'(&(objectClass=univentionShare)(cn={name}))', - attr=['cn'] - )) + obj = list(ldap_search(f"(&(objectClass=univentionShare)(cn={name}))", attr=["cn"])) exists = bool(len(obj)) container = f"cn=shares,ou={module.params['ou']},{base_dn()}" - dn = f'cn={name},{container}' + dn = f"cn={name},{container}" - if state == 'present': + if state == "present": try: if not exists: - obj = umc_module_for_add('shares/share', container) + obj = umc_module_for_add("shares/share", container) else: - obj = umc_module_for_edit('shares/share', dn) + obj = umc_module_for_edit("shares/share", dn) - module.params['printablename'] = f"{name} ({module.params['host']})" + module.params["printablename"] = f"{name} ({module.params['host']})" for k in obj.keys(): if module.params[k] is True: - module.params[k] = '1' + module.params[k] = "1" elif module.params[k] is False: - module.params[k] = '0' + module.params[k] = "0" obj[k] = module.params[k] diff = obj.diff() @@ -539,28 +450,19 @@ def main(): elif changed: obj.modify() except Exception as err: - module.fail_json( - msg=f'Creating/editing share {name} in {container} failed: {err}' - ) + module.fail_json(msg=f"Creating/editing share {name} in {container} failed: {err}") - if state == 'absent' and exists: + if state == "absent" and exists: try: - obj = umc_module_for_edit('shares/share', dn) + obj = umc_module_for_edit("shares/share", dn) if not module.check_mode: obj.remove() changed = True except Exception as err: - module.fail_json( - msg=f'Removing share {name} in {container} failed: {err}' - ) + module.fail_json(msg=f"Removing share {name} in {container} failed: {err}") - module.exit_json( - changed=changed, - name=name, - diff=diff, - container=container - ) + module.exit_json(changed=changed, name=name, diff=diff, container=container) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py index 1bcb8d4ef5c..3a72c6703ea 100644 --- a/plugins/modules/udm_user.py +++ b/plugins/modules/udm_user.py @@ -341,6 +341,7 @@ LEGACYCRYPT_IMPORT_ERROR: str | None try: import legacycrypt + if not HAS_CRYPT: crypt = legacycrypt except ImportError: @@ -355,179 +356,120 @@ def main(): expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") module = AnsibleModule( argument_spec=dict( - birthday=dict(type='str'), - city=dict(type='str'), - country=dict(type='str'), - department_number=dict(type='str', - aliases=['departmentNumber']), - description=dict(type='str'), - display_name=dict(type='str', - aliases=['displayName']), - email=dict(default=[''], - type='list', - elements='str'), - employee_number=dict(type='str', - aliases=['employeeNumber']), - employee_type=dict(type='str', - aliases=['employeeType']), - firstname=dict(type='str'), - gecos=dict(type='str'), - groups=dict(default=[], - type='list', - elements='str'), - home_share=dict(type='str', - aliases=['homeShare']), - home_share_path=dict(type='str', - aliases=['homeSharePath']), - home_telephone_number=dict(default=[], - type='list', - elements='str', - aliases=['homeTelephoneNumber']), - homedrive=dict(type='str'), - lastname=dict(type='str'), - mail_alternative_address=dict(default=[], - type='list', - elements='str', - aliases=['mailAlternativeAddress']), - mail_home_server=dict(type='str', - aliases=['mailHomeServer']), - mail_primary_address=dict(type='str', - aliases=['mailPrimaryAddress']), - mobile_telephone_number=dict(default=[], - type='list', - elements='str', - aliases=['mobileTelephoneNumber']), - organisation=dict(type='str', - aliases=['organization']), - overridePWHistory=dict(default=False, - type='bool', - aliases=['override_pw_history']), - overridePWLength=dict(default=False, - type='bool', - aliases=['override_pw_length']), - pager_telephonenumber=dict(default=[], - type='list', - elements='str', - aliases=['pagerTelephonenumber']), - password=dict(type='str', - no_log=True), - phone=dict(default=[], - type='list', - elements='str'), - postcode=dict(type='str'), - primary_group=dict(type='str', - aliases=['primaryGroup']), - profilepath=dict(type='str'), - pwd_change_next_login=dict(type='str', - choices=['0', '1'], - aliases=['pwdChangeNextLogin']), - room_number=dict(type='str', - aliases=['roomNumber']), - samba_privileges=dict(default=[], - type='list', - elements='str', - aliases=['sambaPrivileges']), - samba_user_workstations=dict(default=[], - type='list', - elements='str', - aliases=['sambaUserWorkstations']), - sambahome=dict(type='str'), - scriptpath=dict(type='str'), - secretary=dict(default=[], - type='list', - elements='str'), - serviceprovider=dict(default=[''], - type='list', - elements='str'), - shell=dict(default='/bin/bash', - type='str'), - street=dict(type='str'), - title=dict(type='str'), - unixhome=dict(type='str'), - userexpiry=dict(type='str'), - username=dict(required=True, - aliases=['name'], - type='str'), - position=dict(default='', - type='str'), - update_password=dict(default='always', - choices=['always', 'on_create'], - type='str'), - ou=dict(default='', - type='str'), - subpath=dict(default='cn=users', - type='str'), - state=dict(default='present', - choices=['present', 'absent'], - type='str') + birthday=dict(type="str"), + city=dict(type="str"), + country=dict(type="str"), + department_number=dict(type="str", aliases=["departmentNumber"]), + description=dict(type="str"), + display_name=dict(type="str", aliases=["displayName"]), + email=dict(default=[""], type="list", elements="str"), + employee_number=dict(type="str", aliases=["employeeNumber"]), + employee_type=dict(type="str", aliases=["employeeType"]), + firstname=dict(type="str"), + gecos=dict(type="str"), + groups=dict(default=[], type="list", elements="str"), + home_share=dict(type="str", aliases=["homeShare"]), + home_share_path=dict(type="str", aliases=["homeSharePath"]), + home_telephone_number=dict(default=[], type="list", elements="str", aliases=["homeTelephoneNumber"]), + homedrive=dict(type="str"), + lastname=dict(type="str"), + mail_alternative_address=dict(default=[], type="list", elements="str", aliases=["mailAlternativeAddress"]), + mail_home_server=dict(type="str", aliases=["mailHomeServer"]), + mail_primary_address=dict(type="str", aliases=["mailPrimaryAddress"]), + mobile_telephone_number=dict(default=[], type="list", elements="str", aliases=["mobileTelephoneNumber"]), + organisation=dict(type="str", aliases=["organization"]), + overridePWHistory=dict(default=False, type="bool", aliases=["override_pw_history"]), + overridePWLength=dict(default=False, type="bool", aliases=["override_pw_length"]), + pager_telephonenumber=dict(default=[], type="list", elements="str", aliases=["pagerTelephonenumber"]), + password=dict(type="str", no_log=True), + phone=dict(default=[], type="list", elements="str"), + postcode=dict(type="str"), + primary_group=dict(type="str", aliases=["primaryGroup"]), + profilepath=dict(type="str"), + pwd_change_next_login=dict(type="str", choices=["0", "1"], aliases=["pwdChangeNextLogin"]), + room_number=dict(type="str", aliases=["roomNumber"]), + samba_privileges=dict(default=[], type="list", elements="str", aliases=["sambaPrivileges"]), + samba_user_workstations=dict(default=[], type="list", elements="str", aliases=["sambaUserWorkstations"]), + sambahome=dict(type="str"), + scriptpath=dict(type="str"), + secretary=dict(default=[], type="list", elements="str"), + serviceprovider=dict(default=[""], type="list", elements="str"), + shell=dict(default="/bin/bash", type="str"), + street=dict(type="str"), + title=dict(type="str"), + unixhome=dict(type="str"), + userexpiry=dict(type="str"), + username=dict(required=True, aliases=["name"], type="str"), + position=dict(default="", type="str"), + update_password=dict(default="always", choices=["always", "on_create"], type="str"), + ou=dict(default="", type="str"), + subpath=dict(default="cn=users", type="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), ), supports_check_mode=True, - required_if=([ - ('state', 'present', ['firstname', 'lastname', 'password']) - ]) + required_if=([("state", "present", ["firstname", "lastname", "password"])]), ) if not HAS_CRYPT and not HAS_LEGACYCRYPT: module.fail_json( - msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + msg=missing_required_lib("crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)"), exception=LEGACYCRYPT_IMPORT_ERROR, ) - username = module.params['username'] - position = module.params['position'] - ou = module.params['ou'] - subpath = module.params['subpath'] - state = module.params['state'] + username = module.params["username"] + position = module.params["position"] + ou = module.params["ou"] + subpath = module.params["subpath"] + state = module.params["state"] changed = False diff = None - users = list(ldap_search( - f'(&(objectClass=posixAccount)(uid={username}))', - attr=['uid'] - )) - if position != '': + users = list(ldap_search(f"(&(objectClass=posixAccount)(uid={username}))", attr=["uid"])) + if position != "": container = position else: - if ou != '': - ou = f'ou={ou},' - if subpath != '': - subpath = f'{subpath},' - container = f'{subpath}{ou}{base_dn()}' - user_dn = f'uid={username},{container}' + if ou != "": + ou = f"ou={ou}," + if subpath != "": + subpath = f"{subpath}," + container = f"{subpath}{ou}{base_dn()}" + user_dn = f"uid={username},{container}" exists = bool(len(users)) - if state == 'present': + if state == "present": try: if not exists: - obj = umc_module_for_add('users/user', container) + obj = umc_module_for_add("users/user", container) else: - obj = umc_module_for_edit('users/user', user_dn) + obj = umc_module_for_edit("users/user", user_dn) - if module.params['displayName'] is None: - module.params['displayName'] = f"{module.params['firstname']} {module.params['lastname']}" - if module.params['unixhome'] is None: - module.params['unixhome'] = f"/home/{module.params['username']}" + if module.params["displayName"] is None: + module.params["displayName"] = f"{module.params['firstname']} {module.params['lastname']}" + if module.params["unixhome"] is None: + module.params["unixhome"] = f"/home/{module.params['username']}" for k in obj.keys(): - if (k != 'password' and - k != 'groups' and - k != 'overridePWHistory' and - k in module.params and - module.params[k] is not None): + if ( + k != "password" + and k != "groups" + and k != "overridePWHistory" + and k in module.params + and module.params[k] is not None + ): obj[k] = module.params[k] # handle some special values - obj['e-mail'] = module.params['email'] - if 'userexpiry' in obj and obj.get('userexpiry') is None: - obj['userexpiry'] = expiry - password = module.params['password'] - if obj['password'] is None: - obj['password'] = password - if module.params['update_password'] == 'always': - old_password = obj['password'].split('}', 2)[1] + obj["e-mail"] = module.params["email"] + if "userexpiry" in obj and obj.get("userexpiry") is None: + obj["userexpiry"] = expiry + password = module.params["password"] + if obj["password"] is None: + obj["password"] = password + if module.params["update_password"] == "always": + old_password = obj["password"].split("}", 2)[1] if crypt.crypt(password, old_password) != old_password: - obj['overridePWHistory'] = module.params['overridePWHistory'] - obj['overridePWLength'] = module.params['overridePWLength'] - obj['password'] = password + obj["overridePWHistory"] = module.params["overridePWHistory"] + obj["overridePWLength"] = module.params["overridePWLength"] + obj["password"] = password diff = obj.diff() if exists: @@ -542,44 +484,33 @@ def main(): elif changed: obj.modify() except Exception: - module.fail_json( - msg=f"Creating/editing user {username} in {container} failed" - ) + module.fail_json(msg=f"Creating/editing user {username} in {container} failed") try: - groups = module.params['groups'] + groups = module.params["groups"] if groups: filter = f"(&(objectClass=posixGroup)(|(cn={')(cn='.join(groups)})))" - group_dns = list(ldap_search(filter, attr=['dn'])) + group_dns = list(ldap_search(filter, attr=["dn"])) for dn in group_dns: - grp = umc_module_for_edit('groups/group', dn[0]) - if user_dn not in grp['users']: - grp['users'].append(user_dn) + grp = umc_module_for_edit("groups/group", dn[0]) + if user_dn not in grp["users"]: + grp["users"].append(user_dn) if not module.check_mode: grp.modify() changed = True except Exception: - module.fail_json( - msg=f"Adding groups to user {username} failed" - ) + module.fail_json(msg=f"Adding groups to user {username} failed") - if state == 'absent' and exists: + if state == "absent" and exists: try: - obj = umc_module_for_edit('users/user', user_dn) + obj = umc_module_for_edit("users/user", user_dn) if not module.check_mode: obj.remove() changed = True except Exception: - module.fail_json( - msg=f"Removing user {username} failed" - ) + module.fail_json(msg=f"Removing user {username} failed") - module.exit_json( - changed=changed, - username=username, - diff=diff, - container=container - ) + module.exit_json(changed=changed, username=username, diff=diff, container=container) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/ufw.py b/plugins/modules/ufw.py index e38176d7135..96c965ad59a 100644 --- a/plugins/modules/ufw.py +++ b/plugins/modules/ufw.py @@ -319,41 +319,47 @@ def compile_ipv6_regexp(): def main(): - command_keys = ['state', 'default', 'rule', 'logging'] + command_keys = ["state", "default", "rule", "logging"] module = AnsibleModule( argument_spec=dict( - state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), - default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), - logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), - direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), - delete=dict(type='bool', default=False), - route=dict(type='bool', default=False), - insert=dict(type='int'), - insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'), - rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), - interface=dict(type='str', aliases=['if']), - interface_in=dict(type='str', aliases=['if_in']), - interface_out=dict(type='str', aliases=['if_out']), - log=dict(type='bool', default=False), - from_ip=dict(type='str', default='any', aliases=['from', 'src']), - from_port=dict(type='str'), - to_ip=dict(type='str', default='any', aliases=['dest', 'to']), - to_port=dict(type='str', aliases=['port']), - proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp', 'vrrp']), - name=dict(type='str', aliases=['app']), - comment=dict(type='str'), + state=dict(type="str", choices=["enabled", "disabled", "reloaded", "reset"]), + default=dict(type="str", aliases=["policy"], choices=["allow", "deny", "reject"]), + logging=dict(type="str", choices=["full", "high", "low", "medium", "off", "on"]), + direction=dict(type="str", choices=["in", "incoming", "out", "outgoing", "routed"]), + delete=dict(type="bool", default=False), + route=dict(type="bool", default=False), + insert=dict(type="int"), + insert_relative_to=dict( + choices=["zero", "first-ipv4", "last-ipv4", "first-ipv6", "last-ipv6"], default="zero" + ), + rule=dict(type="str", choices=["allow", "deny", "limit", "reject"]), + interface=dict(type="str", aliases=["if"]), + interface_in=dict(type="str", aliases=["if_in"]), + interface_out=dict(type="str", aliases=["if_out"]), + log=dict(type="bool", default=False), + from_ip=dict(type="str", default="any", aliases=["from", "src"]), + from_port=dict(type="str"), + to_ip=dict(type="str", default="any", aliases=["dest", "to"]), + to_port=dict(type="str", aliases=["port"]), + proto=dict( + type="str", + aliases=["protocol"], + choices=["ah", "any", "esp", "ipv6", "tcp", "udp", "gre", "igmp", "vrrp"], + ), + name=dict(type="str", aliases=["app"]), + comment=dict(type="str"), ), supports_check_mode=True, mutually_exclusive=[ - ['name', 'proto', 'logging'], + ["name", "proto", "logging"], # Mutual exclusivity with `interface` implied by `required_by`. - ['direction', 'interface_in'], - ['direction', 'interface_out'], + ["direction", "interface_in"], + ["direction", "interface_out"], ], required_one_of=([command_keys]), required_by=dict( - interface=('direction', ), + interface=("direction",), ), ) @@ -363,16 +369,16 @@ def main(): ipv6_regexp = compile_ipv6_regexp() def filter_line_that_not_start_with(pattern, content): - return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)]) + return "".join([line for line in content.splitlines(True) if line.startswith(pattern)]) def filter_line_that_contains(pattern, content): return [line for line in content.splitlines(True) if pattern in line] def filter_line_that_not_contains(pattern, content): - return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)]) + return "".join([line for line in content.splitlines(True) if not line.contains(pattern)]) def filter_line_that_match_func(match_func, content): - return ''.join([line for line in content.splitlines(True) if match_func(line) is not None]) + return "".join([line for line in content.splitlines(True) if match_func(line) is not None]) def filter_line_that_contains_ipv4(content): return filter_line_that_match_func(ipv4_regexp.search, content) @@ -387,7 +393,7 @@ def is_starting_by_ipv6(ip): return ipv6_regexp.match(ip) is not None def execute(cmd, ignore_error=False): - cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + cmd = " ".join(map(itemgetter(-1), filter(itemgetter(0), cmd))) cmds.append(cmd) (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"}) @@ -398,12 +404,14 @@ def execute(cmd, ignore_error=False): return out def get_current_rules(): - user_rules_files = ["/lib/ufw/user.rules", - "/lib/ufw/user6.rules", - "/etc/ufw/user.rules", - "/etc/ufw/user6.rules", - "/var/lib/ufw/user.rules", - "/var/lib/ufw/user6.rules"] + user_rules_files = [ + "/lib/ufw/user.rules", + "/lib/ufw/user6.rules", + "/etc/ufw/user.rules", + "/etc/ufw/user6.rules", + "/var/lib/ufw/user.rules", + "/var/lib/ufw/user6.rules", + ] cmd = [[grep_bin], ["-h"], ["'^### tuple'"]] @@ -416,11 +424,11 @@ def ufw_version(): """ out = execute([[ufw_bin], ["--version"]]) - lines = [x for x in out.split('\n') if x.strip() != ''] + lines = [x for x in out.split("\n") if x.strip() != ""] if len(lines) == 0: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) - matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) + matches = re.search(r"^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$", lines[0]) if matches is None: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) @@ -438,37 +446,35 @@ def ufw_version(): commands = {key: params[key] for key in command_keys if params[key]} # Ensure ufw is available - ufw_bin = module.get_bin_path('ufw', True) - grep_bin = module.get_bin_path('grep', True) + ufw_bin = module.get_bin_path("ufw", True) + grep_bin = module.get_bin_path("grep", True) # Save the pre state and rules in order to recognize changes - pre_state = execute([[ufw_bin], ['status verbose']]) + pre_state = execute([[ufw_bin], ["status verbose"]]) pre_rules = get_current_rules() changed = False # Execute filter - for (command, value) in commands.items(): + for command, value in commands.items(): + cmd = [[ufw_bin], [module.check_mode, "--dry-run"]] - cmd = [[ufw_bin], [module.check_mode, '--dry-run']] + if command == "state": + states = {"enabled": "enable", "disabled": "disable", "reloaded": "reload", "reset": "reset"} - if command == 'state': - states = {'enabled': 'enable', 'disabled': 'disable', - 'reloaded': 'reload', 'reset': 'reset'} - - if value in ['reloaded', 'reset']: + if value in ["reloaded", "reset"]: changed = True if module.check_mode: # "active" would also match "inactive", hence the space ufw_enabled = pre_state.find(" active") != -1 - if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled): + if (value == "disabled" and ufw_enabled) or (value == "enabled" and not ufw_enabled): changed = True else: - execute(cmd + [['-f'], [states[value]]]) + execute(cmd + [["-f"], [states[value]]]) - elif command == 'logging': - extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state) + elif command == "logging": + extract = re.search(r"Logging: (on|off)(?: \(([a-z]+)\))?", pre_state) if extract: current_level = extract.group(2) current_on_off_value = extract.group(1) @@ -485,96 +491,102 @@ def ufw_version(): if not module.check_mode: execute(cmd + [[command], [value]]) - elif command == 'default': - if params['direction'] not in ['outgoing', 'incoming', 'routed', None]: - module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.') + elif command == "default": + if params["direction"] not in ["outgoing", "incoming", "routed", None]: + module.fail_json( + msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.' + ) if module.check_mode: - regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)' + regexp = r"Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)" extract = re.search(regexp, pre_state) if extract is not None: current_default_values = {} current_default_values["incoming"] = extract.group(1) current_default_values["outgoing"] = extract.group(2) current_default_values["routed"] = extract.group(3) - v = current_default_values[params['direction'] or 'incoming'] - if v not in (value, 'disabled'): + v = current_default_values[params["direction"] or "incoming"] + if v not in (value, "disabled"): changed = True else: changed = True else: - execute(cmd + [[command], [value], [params['direction']]]) - - elif command == 'rule': - if params['direction'] not in ['in', 'out', None]: - module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.') - if not params['route'] and params['interface_in'] and params['interface_out']: - module.fail_json(msg='Only route rules can combine ' - 'interface_in and interface_out') + execute(cmd + [[command], [value], [params["direction"]]]) + + elif command == "rule": + if params["direction"] not in ["in", "out", None]: + module.fail_json( + msg='For rules, direction must be one of "in" and "out", or direction must not be specified.' + ) + if not params["route"] and params["interface_in"] and params["interface_out"]: + module.fail_json(msg="Only route rules can combine interface_in and interface_out") # Rules are constructed according to the long format # # ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] [comment COMMENT] - cmd.append([module.boolean(params['route']), 'route']) - cmd.append([module.boolean(params['delete']), 'delete']) - if params['insert'] is not None and not params['delete']: - relative_to_cmd = params['insert_relative_to'] - if relative_to_cmd == 'zero': - insert_to = params['insert'] + cmd.append([module.boolean(params["route"]), "route"]) + cmd.append([module.boolean(params["delete"]), "delete"]) + if params["insert"] is not None and not params["delete"]: + relative_to_cmd = params["insert_relative_to"] + if relative_to_cmd == "zero": + insert_to = params["insert"] else: - (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered']) - numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ') - lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] + (dummy, numbered_state, dummy) = module.run_command([ufw_bin, "status", "numbered"]) + numbered_line_re = re.compile(R"^\[ *([0-9]+)\] ") + lines = [(numbered_line_re.match(line), "(v6)" in line) for line in numbered_state.splitlines()] lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] last_number = max([no for (no, ipv6) in lines]) if lines else 0 has_ipv4 = any(not ipv6 for (no, ipv6) in lines) has_ipv6 = any(ipv6 for (no, ipv6) in lines) - if relative_to_cmd == 'first-ipv4': + if relative_to_cmd == "first-ipv4": relative_to = 1 - elif relative_to_cmd == 'last-ipv4': + elif relative_to_cmd == "last-ipv4": relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1 - elif relative_to_cmd == 'first-ipv6': + elif relative_to_cmd == "first-ipv6": relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1 - elif relative_to_cmd == 'last-ipv6': + elif relative_to_cmd == "last-ipv6": relative_to = last_number if has_ipv6 else last_number + 1 - insert_to = params['insert'] + relative_to + insert_to = params["insert"] + relative_to if insert_to > last_number: # ufw does not like it when the insert number is larger than the # maximal rule number for IPv4/IPv6. insert_to = None cmd.append([insert_to is not None, f"insert {insert_to}"]) cmd.append([value]) - cmd.append([params['direction'], params['direction']]) - cmd.append([params['interface'], f"on {params['interface']}"]) - cmd.append([params['interface_in'], f"in on {params['interface_in']}"]) - cmd.append([params['interface_out'], f"out on {params['interface_out']}"]) - cmd.append([module.boolean(params['log']), 'log']) - - for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), - ('to_ip', "to %s"), ('to_port', "port %s"), - ('proto', "proto %s"), ('name', "app '%s'")]: + cmd.append([params["direction"], params["direction"]]) + cmd.append([params["interface"], f"on {params['interface']}"]) + cmd.append([params["interface_in"], f"in on {params['interface_in']}"]) + cmd.append([params["interface_out"], f"out on {params['interface_out']}"]) + cmd.append([module.boolean(params["log"]), "log"]) + + for key, template in [ + ("from_ip", "from %s"), + ("from_port", "port %s"), + ("to_ip", "to %s"), + ("to_port", "port %s"), + ("proto", "proto %s"), + ("name", "app '%s'"), + ]: value = params[key] cmd.append([value, template % (value)]) ufw_major, ufw_minor, dummy = ufw_version() # comment is supported only in ufw version after 0.35 if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: - cmd.append([params['comment'], f"comment '{params['comment']}'"]) + cmd.append([params["comment"], f"comment '{params['comment']}'"]) rules_dry = execute(cmd) if module.check_mode: - nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry)) if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))): - rules_dry = filter_line_that_not_start_with("### tuple", rules_dry) # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules - if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']): + if is_starting_by_ipv4(params["from_ip"]) or is_starting_by_ipv4(params["to_ip"]): if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry): changed = True - elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']): + elif is_starting_by_ipv6(params["from_ip"]) or is_starting_by_ipv6(params["to_ip"]): if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry): changed = True elif pre_rules != rules_dry: @@ -584,12 +596,12 @@ def ufw_version(): if module.check_mode: return module.exit_json(changed=changed, commands=cmds) else: - post_state = execute([[ufw_bin], ['status'], ['verbose']]) + post_state = execute([[ufw_bin], ["status"], ["verbose"]]) if not changed: post_rules = get_current_rules() changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/uptimerobot.py b/plugins/modules/uptimerobot.py index 133c9ff44fb..548f20bc86c 100644 --- a/plugins/modules/uptimerobot.py +++ b/plugins/modules/uptimerobot.py @@ -66,21 +66,17 @@ API_BASE = "https://api.uptimerobot.com/" -API_ACTIONS = dict( - status='getMonitors?', - editMonitor='editMonitor?' -) +API_ACTIONS = dict(status="getMonitors?", editMonitor="editMonitor?") -API_FORMAT = 'json' +API_FORMAT = "json" API_NOJSONCALLBACK = 1 CHANGED_STATE = False SUPPORTS_CHECK_MODE = False def checkID(module, params): - data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['status'] + data + full_uri = API_BASE + API_ACTIONS["status"] + data req, info = fetch_url(module, full_uri) result = to_text(req.read()) jsonresult = json.loads(result) @@ -89,66 +85,57 @@ def checkID(module, params): def startMonitor(module, params): - - params['monitorStatus'] = 1 + params["monitorStatus"] = 1 data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + full_uri = API_BASE + API_ACTIONS["editMonitor"] + data req, info = fetch_url(module, full_uri) result = to_text(req.read()) jsonresult = json.loads(result) req.close() - return jsonresult['stat'] + return jsonresult["stat"] def pauseMonitor(module, params): - - params['monitorStatus'] = 0 + params["monitorStatus"] = 0 data = urlencode(params) - full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + full_uri = API_BASE + API_ACTIONS["editMonitor"] + data req, info = fetch_url(module, full_uri) result = to_text(req.read()) jsonresult = json.loads(result) req.close() - return jsonresult['stat'] + return jsonresult["stat"] def main(): - module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['started', 'paused']), + state=dict(required=True, choices=["started", "paused"]), apikey=dict(required=True, no_log=True), - monitorid=dict(required=True) + monitorid=dict(required=True), ), - supports_check_mode=SUPPORTS_CHECK_MODE + supports_check_mode=SUPPORTS_CHECK_MODE, ) params = dict( - apiKey=module.params['apikey'], - monitors=module.params['monitorid'], - monitorID=module.params['monitorid'], + apiKey=module.params["apikey"], + monitors=module.params["monitorid"], + monitorID=module.params["monitorid"], format=API_FORMAT, - noJsonCallback=API_NOJSONCALLBACK + noJsonCallback=API_NOJSONCALLBACK, ) check_result = checkID(module, params) - if check_result['stat'] != "ok": - module.fail_json( - msg="failed", - result=check_result['message'] - ) + if check_result["stat"] != "ok": + module.fail_json(msg="failed", result=check_result["message"]) - if module.params['state'] == 'started': + if module.params["state"] == "started": monitor_result = startMonitor(module, params) else: monitor_result = pauseMonitor(module, params) - module.exit_json( - msg="success", - result=monitor_result - ) + module.exit_json(msg="success", result=monitor_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/urpmi.py b/plugins/modules/urpmi.py index 0698cc91e15..915eaa190b6 100644 --- a/plugins/modules/urpmi.py +++ b/plugins/modules/urpmi.py @@ -109,7 +109,6 @@ def query_package_provides(module, name, root): def update_package_db(module): - urpmiupdate_path = module.get_bin_path("urpmi.update", True) cmd = [urpmiupdate_path, "-a", "-q"] rc, stdout, stderr = module.run_command(cmd, check_rc=False) @@ -118,7 +117,6 @@ def update_package_db(module): def remove_packages(module, packages, root): - remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: @@ -136,14 +134,12 @@ def remove_packages(module, packages, root): remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg=f"removed {remove_c} package(s)") module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, pkgspec, root, force=True, no_recommends=True): - packages = "" for package in pkgspec: if not query_package_provides(module, package, root): @@ -151,12 +147,12 @@ def install_packages(module, pkgspec, root, force=True, no_recommends=True): if len(packages) != 0: if no_recommends: - no_recommends_yes = ['--no-recommends'] + no_recommends_yes = ["--no-recommends"] else: no_recommends_yes = [] if force: - force_yes = ['--force'] + force_yes = ["--force"] else: force_yes = [] @@ -188,27 +184,26 @@ def root_option(root): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', - choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict(type='bool', default=False), - force=dict(type='bool', default=True), - no_recommends=dict(type='bool', default=True), - name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']), - root=dict(type='str', aliases=['installroot']), + state=dict(type="str", default="present", choices=["absent", "installed", "present", "removed"]), + update_cache=dict(type="bool", default=False), + force=dict(type="bool", default=True), + no_recommends=dict(type="bool", default=True), + name=dict(type="list", elements="str", required=True, aliases=["package", "pkg"]), + root=dict(type="str", aliases=["installroot"]), ), ) p = module.params - if p['update_cache']: + if p["update_cache"]: update_package_db(module) - if p['state'] in ['installed', 'present']: - install_packages(module, p['name'], p['root'], p['force'], p['no_recommends']) + if p["state"] in ["installed", "present"]: + install_packages(module, p["name"], p["root"], p["force"], p["no_recommends"]) - elif p['state'] in ['removed', 'absent']: - remove_packages(module, p['name'], p['root']) + elif p["state"] in ["removed", "absent"]: + remove_packages(module, p["name"], p["root"]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/usb_facts.py b/plugins/modules/usb_facts.py index d1eb475b9ce..5d1f21c137a 100644 --- a/plugins/modules/usb_facts.py +++ b/plugins/modules/usb_facts.py @@ -74,37 +74,29 @@ def parse_lsusb(module, lsusb_path): rc, stdout, stderr = module.run_command(lsusb_path, check_rc=True) - regex = re.compile(r'^Bus (\d{3}) Device (\d{3}): ID ([0-9a-f]{4}:[0-9a-f]{4}) (.*)$') + regex = re.compile(r"^Bus (\d{3}) Device (\d{3}): ID ([0-9a-f]{4}:[0-9a-f]{4}) (.*)$") usb_devices = [] for line in stdout.splitlines(): match = re.match(regex, line) if not match: module.fail_json(msg=f"failed to parse unknown lsusb output {line}", stdout=stdout, stderr=stderr) - current_device = { - 'bus': match.group(1), - 'device': match.group(2), - 'id': match.group(3), - 'name': match.group(4) - } + current_device = {"bus": match.group(1), "device": match.group(2), "id": match.group(3), "name": match.group(4)} usb_devices.append(current_device) - return_value = { - "usb_devices": usb_devices - } - module.exit_json(msg=f"parsed {len(usb_devices)} USB devices", stdout=stdout, stderr=stderr, ansible_facts=return_value) + return_value = {"usb_devices": usb_devices} + module.exit_json( + msg=f"parsed {len(usb_devices)} USB devices", stdout=stdout, stderr=stderr, ansible_facts=return_value + ) def main(): - module = AnsibleModule( - {}, - supports_check_mode=True - ) + module = AnsibleModule({}, supports_check_mode=True) # Set LANG env since we parse stdout - module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + module.run_command_environ_update = dict(LANGUAGE="C", LC_ALL="C") - lsusb_path = module.get_bin_path('lsusb', required=True) + lsusb_path = module.get_bin_path("lsusb", required=True) parse_lsusb(module, lsusb_path) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_aaa_group.py b/plugins/modules/utm_aaa_group.py index 8cb492ab93d..ba427315342 100644 --- a/plugins/modules/utm_aaa_group.py +++ b/plugins/modules/utm_aaa_group.py @@ -202,26 +202,39 @@ def main(): endpoint = "aaa/group" - key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic", - "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members", - "network", "radius_groups", "tacacs_groups"] + key_to_check_for_changes = [ + "comment", + "adirectory_groups", + "adirectory_groups_sids", + "backend_match", + "dynamic", + "edirectory_groups", + "ipsec_dn", + "ldap_attribute", + "ldap_attribute_value", + "members", + "network", + "radius_groups", + "tacacs_groups", + ] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - adirectory_groups=dict(type='list', elements='str', default=[]), - adirectory_groups_sids=dict(type='dict', default={}), - backend_match=dict(type='str', default="none", - choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]), - comment=dict(type='str', default=""), - dynamic=dict(type='str', default="none", choices=["none", "ipsec_dn", "directory_groups"]), - edirectory_groups=dict(type='list', elements='str', default=[]), - ipsec_dn=dict(type='str', default=""), - ldap_attribute=dict(type='str', default=""), - ldap_attribute_value=dict(type='str', default=""), - members=dict(type='list', elements='str', default=[]), - network=dict(type='str', default=""), - radius_groups=dict(type='list', elements='str', default=[]), - tacacs_groups=dict(type='list', elements='str', default=[]), + name=dict(type="str", required=True), + adirectory_groups=dict(type="list", elements="str", default=[]), + adirectory_groups_sids=dict(type="dict", default={}), + backend_match=dict( + type="str", default="none", choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"] + ), + comment=dict(type="str", default=""), + dynamic=dict(type="str", default="none", choices=["none", "ipsec_dn", "directory_groups"]), + edirectory_groups=dict(type="list", elements="str", default=[]), + ipsec_dn=dict(type="str", default=""), + ldap_attribute=dict(type="str", default=""), + ldap_attribute_value=dict(type="str", default=""), + members=dict(type="list", elements="str", default=[]), + network=dict(type="str", default=""), + radius_groups=dict(type="list", elements="str", default=[]), + tacacs_groups=dict(type="list", elements="str", default=[]), ) ) try: @@ -230,5 +243,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_aaa_group_info.py b/plugins/modules/utm_aaa_group_info.py index f0e45994ea5..d3b11970cda 100644 --- a/plugins/modules/utm_aaa_group_info.py +++ b/plugins/modules/utm_aaa_group_info.py @@ -109,9 +109,7 @@ def main(): endpoint = "aaa/group" key_to_check_for_changes = [] module = UTMModule( - argument_spec=dict( - name=dict(type='str', required=True) - ), + argument_spec=dict(name=dict(type="str", required=True)), supports_check_mode=True, ) try: @@ -120,5 +118,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_ca_host_key_cert.py b/plugins/modules/utm_ca_host_key_cert.py index c98389f2cd7..6123954efbf 100644 --- a/plugins/modules/utm_ca_host_key_cert.py +++ b/plugins/modules/utm_ca_host_key_cert.py @@ -142,13 +142,13 @@ def main(): key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - ca=dict(type='str', required=True), - meta=dict(type='str', required=True), - certificate=dict(type='str', required=True), - comment=dict(type='str'), - encrypted=dict(type='bool', default=False), - key=dict(type='str', no_log=True), + name=dict(type="str", required=True), + ca=dict(type="str", required=True), + meta=dict(type="str", required=True), + certificate=dict(type="str", required=True), + comment=dict(type="str"), + encrypted=dict(type="bool", default=False), + key=dict(type="str", no_log=True), ) ) try: @@ -158,5 +158,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_ca_host_key_cert_info.py b/plugins/modules/utm_ca_host_key_cert_info.py index 174d8cf3b1f..a1f343936e0 100644 --- a/plugins/modules/utm_ca_host_key_cert_info.py +++ b/plugins/modules/utm_ca_host_key_cert_info.py @@ -89,9 +89,7 @@ def main(): endpoint = "ca/host_key_cert" key_to_check_for_changes = [] module = UTMModule( - argument_spec=dict( - name=dict(type='str', required=True) - ), + argument_spec=dict(name=dict(type="str", required=True)), supports_check_mode=True, ) try: @@ -101,5 +99,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_dns_host.py b/plugins/modules/utm_dns_host.py index 168f92d44f8..d0b284a00e3 100644 --- a/plugins/modules/utm_dns_host.py +++ b/plugins/modules/utm_dns_host.py @@ -141,15 +141,15 @@ def main(): key_to_check_for_changes = ["comment", "hostname", "interface"] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - address=dict(type='str', default='0.0.0.0'), - address6=dict(type='str', default='::'), - comment=dict(type='str', default=""), - hostname=dict(type='str'), - interface=dict(type='str', default=""), - resolved=dict(type='bool', default=False), - resolved6=dict(type='bool', default=False), - timeout=dict(type='int', default=0), + name=dict(type="str", required=True), + address=dict(type="str", default="0.0.0.0"), + address6=dict(type="str", default="::"), + comment=dict(type="str", default=""), + hostname=dict(type="str"), + interface=dict(type="str", default=""), + resolved=dict(type="bool", default=False), + resolved6=dict(type="bool", default=False), + timeout=dict(type="int", default=0), ) ) try: @@ -158,5 +158,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_network_interface_address.py b/plugins/modules/utm_network_interface_address.py index 99786c03bb5..f367fe6e666 100644 --- a/plugins/modules/utm_network_interface_address.py +++ b/plugins/modules/utm_network_interface_address.py @@ -119,12 +119,12 @@ def main(): key_to_check_for_changes = ["comment", "address"] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - address=dict(type='str', required=True), - comment=dict(type='str', default=""), - address6=dict(type='str'), - resolved=dict(type='bool'), - resolved6=dict(type='bool'), + name=dict(type="str", required=True), + address=dict(type="str", required=True), + comment=dict(type="str", default=""), + address6=dict(type="str"), + resolved=dict(type="bool"), + resolved6=dict(type="bool"), ) ) try: @@ -133,5 +133,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_network_interface_address_info.py b/plugins/modules/utm_network_interface_address_info.py index ac89139a73c..9ee203db2b4 100644 --- a/plugins/modules/utm_network_interface_address_info.py +++ b/plugins/modules/utm_network_interface_address_info.py @@ -85,9 +85,7 @@ def main(): endpoint = "network/interface_address" key_to_check_for_changes = [] module = UTMModule( - argument_spec=dict( - name=dict(type='str', required=True) - ), + argument_spec=dict(name=dict(type="str", required=True)), supports_check_mode=True, ) try: @@ -96,5 +94,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_auth_profile.py b/plugins/modules/utm_proxy_auth_profile.py index e9139a3cf7e..b664c8dcad5 100644 --- a/plugins/modules/utm_proxy_auth_profile.py +++ b/plugins/modules/utm_proxy_auth_profile.py @@ -299,44 +299,62 @@ def main(): endpoint = "reverse_proxy/auth_profile" - key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth", - "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie", - "frontend_cookie_secret", "frontend_form", "frontend_form_template", - "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm", - "frontend_session_allow_persistency", "frontend_session_lifetime", - "frontend_session_lifetime_limited", "frontend_session_lifetime_scope", - "frontend_session_timeout", "frontend_session_timeout_enabled", - "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode", - "redirect_to_requested_url"] + key_to_check_for_changes = [ + "aaa", + "basic_prompt", + "backend_mode", + "backend_strip_basic_auth", + "backend_user_prefix", + "backend_user_suffix", + "comment", + "frontend_cookie", + "frontend_cookie_secret", + "frontend_form", + "frontend_form_template", + "frontend_login", + "frontend_logout", + "frontend_mode", + "frontend_realm", + "frontend_session_allow_persistency", + "frontend_session_lifetime", + "frontend_session_lifetime_limited", + "frontend_session_lifetime_scope", + "frontend_session_timeout", + "frontend_session_timeout_enabled", + "frontend_session_timeout_scope", + "logout_delegation_urls", + "logout_mode", + "redirect_to_requested_url", + ] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - aaa=dict(type='list', elements='str', required=True), - basic_prompt=dict(type='str', required=True), - backend_mode=dict(type='str', default="None", choices=['Basic', 'None']), - backend_strip_basic_auth=dict(type='bool', default=True), - backend_user_prefix=dict(type='str', default=""), - backend_user_suffix=dict(type='str', default=""), - comment=dict(type='str', default=""), - frontend_cookie=dict(type='str'), - frontend_cookie_secret=dict(type='str', no_log=True), - frontend_form=dict(type='str'), - frontend_form_template=dict(type='str', default=""), - frontend_login=dict(type='str'), - frontend_logout=dict(type='str'), - frontend_mode=dict(type='str', default="Basic", choices=['Basic', 'Form']), - frontend_realm=dict(type='str'), - frontend_session_allow_persistency=dict(type='bool', default=False), - frontend_session_lifetime=dict(type='int', required=True), - frontend_session_lifetime_limited=dict(type='bool', default=True), - frontend_session_lifetime_scope=dict(type='str', default="hours", choices=['days', 'hours', 'minutes']), - frontend_session_timeout=dict(type='int', required=True), - frontend_session_timeout_enabled=dict(type='bool', default=True), - frontend_session_timeout_scope=dict(type='str', default="minutes", choices=['days', 'hours', 'minutes']), - logout_delegation_urls=dict(type='list', elements='str', default=[]), - logout_mode=dict(type='str', default="None", choices=['None', 'Delegation']), - redirect_to_requested_url=dict(type='bool', default=False) + name=dict(type="str", required=True), + aaa=dict(type="list", elements="str", required=True), + basic_prompt=dict(type="str", required=True), + backend_mode=dict(type="str", default="None", choices=["Basic", "None"]), + backend_strip_basic_auth=dict(type="bool", default=True), + backend_user_prefix=dict(type="str", default=""), + backend_user_suffix=dict(type="str", default=""), + comment=dict(type="str", default=""), + frontend_cookie=dict(type="str"), + frontend_cookie_secret=dict(type="str", no_log=True), + frontend_form=dict(type="str"), + frontend_form_template=dict(type="str", default=""), + frontend_login=dict(type="str"), + frontend_logout=dict(type="str"), + frontend_mode=dict(type="str", default="Basic", choices=["Basic", "Form"]), + frontend_realm=dict(type="str"), + frontend_session_allow_persistency=dict(type="bool", default=False), + frontend_session_lifetime=dict(type="int", required=True), + frontend_session_lifetime_limited=dict(type="bool", default=True), + frontend_session_lifetime_scope=dict(type="str", default="hours", choices=["days", "hours", "minutes"]), + frontend_session_timeout=dict(type="int", required=True), + frontend_session_timeout_enabled=dict(type="bool", default=True), + frontend_session_timeout_scope=dict(type="str", default="minutes", choices=["days", "hours", "minutes"]), + logout_delegation_urls=dict(type="list", elements="str", default=[]), + logout_mode=dict(type="str", default="None", choices=["None", "Delegation"]), + redirect_to_requested_url=dict(type="bool", default=False), ) ) try: @@ -345,5 +363,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_exception.py b/plugins/modules/utm_proxy_exception.py index 322d07489d9..9d4c80a1337 100644 --- a/plugins/modules/utm_proxy_exception.py +++ b/plugins/modules/utm_proxy_exception.py @@ -212,26 +212,40 @@ def main(): endpoint = "reverse_proxy/exception" - key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav", - "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken", - "skiphtmlrewrite", "skiptft", "skipurl", "source"] + key_to_check_for_changes = [ + "op", + "path", + "skip_custom_threats_filters", + "skip_threats_filter_categories", + "skipav", + "comment", + "skipbadclients", + "skipcookie", + "skipform", + "status", + "skipform_missingtoken", + "skiphtmlrewrite", + "skiptft", + "skipurl", + "source", + ] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - op=dict(type='str', default='AND', choices=['AND', 'OR']), - path=dict(type='list', elements='str', default=[]), - skip_custom_threats_filters=dict(type='list', elements='str', default=[]), - skip_threats_filter_categories=dict(type='list', elements='str', default=[]), - skipav=dict(type='bool', default=False), - skipbadclients=dict(type='bool', default=False), - skipcookie=dict(type='bool', default=False), - skipform=dict(type='bool', default=False), - skipform_missingtoken=dict(type='bool', default=False), - skiphtmlrewrite=dict(type='bool', default=False), - skiptft=dict(type='bool', default=False), - skipurl=dict(type='bool', default=False), - source=dict(type='list', elements='str', default=[]), - status=dict(type='bool', default=True), + name=dict(type="str", required=True), + op=dict(type="str", default="AND", choices=["AND", "OR"]), + path=dict(type="list", elements="str", default=[]), + skip_custom_threats_filters=dict(type="list", elements="str", default=[]), + skip_threats_filter_categories=dict(type="list", elements="str", default=[]), + skipav=dict(type="bool", default=False), + skipbadclients=dict(type="bool", default=False), + skipcookie=dict(type="bool", default=False), + skipform=dict(type="bool", default=False), + skipform_missingtoken=dict(type="bool", default=False), + skiphtmlrewrite=dict(type="bool", default=False), + skiptft=dict(type="bool", default=False), + skipurl=dict(type="bool", default=False), + source=dict(type="list", elements="str", default=[]), + status=dict(type="bool", default=True), ) ) try: @@ -240,5 +254,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_frontend.py b/plugins/modules/utm_proxy_frontend.py index 2295caa8394..40d3184575a 100644 --- a/plugins/modules/utm_proxy_frontend.py +++ b/plugins/modules/utm_proxy_frontend.py @@ -242,33 +242,49 @@ def main(): endpoint = "reverse_proxy/frontend" - key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate", - "comment", "disable_compression", "domain", "exceptions", "htmlrewrite", - "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations", - "port", "preservehost", "profile", "status", "type", "xheaders"] + key_to_check_for_changes = [ + "add_content_type_header", + "address", + "allowed_networks", + "certificate", + "comment", + "disable_compression", + "domain", + "exceptions", + "htmlrewrite", + "htmlrewrite_cookies", + "implicitredirect", + "lbmethod", + "locations", + "port", + "preservehost", + "profile", + "status", + "type", + "xheaders", + ] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - add_content_type_header=dict(type='bool', default=False), - address=dict(type='str', default="REF_DefaultInternalAddress"), - allowed_networks=dict(type='list', elements='str', default=["REF_NetworkAny"]), - certificate=dict(type='str', default=""), - comment=dict(type='str', default=""), - disable_compression=dict(type='bool', default=False), - domain=dict(type='list', elements='str'), - exceptions=dict(type='list', elements='str', default=[]), - htmlrewrite=dict(type='bool', default=False), - htmlrewrite_cookies=dict(type='bool', default=False), - implicitredirect=dict(type='bool', default=False), - lbmethod=dict(type='str', default="bybusyness", - choices=['bybusyness', 'bytraffic', 'byrequests', '']), - locations=dict(type='list', elements='str', default=[]), - port=dict(type='int', default=80), - preservehost=dict(type='bool', default=False), - profile=dict(type='str', default=""), - status=dict(type='bool', default=True), - type=dict(type='str', default="http", choices=['http', 'https']), - xheaders=dict(type='bool', default=False), + name=dict(type="str", required=True), + add_content_type_header=dict(type="bool", default=False), + address=dict(type="str", default="REF_DefaultInternalAddress"), + allowed_networks=dict(type="list", elements="str", default=["REF_NetworkAny"]), + certificate=dict(type="str", default=""), + comment=dict(type="str", default=""), + disable_compression=dict(type="bool", default=False), + domain=dict(type="list", elements="str"), + exceptions=dict(type="list", elements="str", default=[]), + htmlrewrite=dict(type="bool", default=False), + htmlrewrite_cookies=dict(type="bool", default=False), + implicitredirect=dict(type="bool", default=False), + lbmethod=dict(type="str", default="bybusyness", choices=["bybusyness", "bytraffic", "byrequests", ""]), + locations=dict(type="list", elements="str", default=[]), + port=dict(type="int", default=80), + preservehost=dict(type="bool", default=False), + profile=dict(type="str", default=""), + status=dict(type="bool", default=True), + type=dict(type="str", default="http", choices=["http", "https"]), + xheaders=dict(type="bool", default=False), ) ) try: @@ -277,5 +293,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_frontend_info.py b/plugins/modules/utm_proxy_frontend_info.py index 6ad6e57c032..b8bd0da982f 100644 --- a/plugins/modules/utm_proxy_frontend_info.py +++ b/plugins/modules/utm_proxy_frontend_info.py @@ -131,7 +131,7 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), ), supports_check_mode=True, ) @@ -141,5 +141,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_location.py b/plugins/modules/utm_proxy_location.py index 9be88891f24..d6b73fd7fbc 100644 --- a/plugins/modules/utm_proxy_location.py +++ b/plugins/modules/utm_proxy_location.py @@ -190,25 +190,37 @@ def main(): endpoint = "reverse_proxy/location" - key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment", - "denied_networks", "hot_standby", "path", "status", "stickysession_id", - "stickysession_status", "websocket_passthrough"] + key_to_check_for_changes = [ + "access_control", + "allowed_networks", + "auth_profile", + "backend", + "be_path", + "comment", + "denied_networks", + "hot_standby", + "path", + "status", + "stickysession_id", + "stickysession_status", + "websocket_passthrough", + ] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), - access_control=dict(type='str', default="0", choices=['0', '1']), - allowed_networks=dict(type='list', elements='str', default=['REF_NetworkAny']), - auth_profile=dict(type='str', default=""), - backend=dict(type='list', elements='str', default=[]), - be_path=dict(type='str', default=""), - comment=dict(type='str', default=""), - denied_networks=dict(type='list', elements='str', default=[]), - hot_standby=dict(type='bool', default=False), - path=dict(type='str', default="/"), - status=dict(type='bool', default=True), - stickysession_id=dict(type='str', default='ROUTEID'), - stickysession_status=dict(type='bool', default=False), - websocket_passthrough=dict(type='bool', default=False), + name=dict(type="str", required=True), + access_control=dict(type="str", default="0", choices=["0", "1"]), + allowed_networks=dict(type="list", elements="str", default=["REF_NetworkAny"]), + auth_profile=dict(type="str", default=""), + backend=dict(type="list", elements="str", default=[]), + be_path=dict(type="str", default=""), + comment=dict(type="str", default=""), + denied_networks=dict(type="list", elements="str", default=[]), + hot_standby=dict(type="bool", default=False), + path=dict(type="str", default="/"), + status=dict(type="bool", default=True), + stickysession_id=dict(type="str", default="ROUTEID"), + stickysession_status=dict(type="bool", default=False), + websocket_passthrough=dict(type="bool", default=False), ) ) try: @@ -217,5 +229,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/utm_proxy_location_info.py b/plugins/modules/utm_proxy_location_info.py index 5df70d32c45..5ea2267cb14 100644 --- a/plugins/modules/utm_proxy_location_info.py +++ b/plugins/modules/utm_proxy_location_info.py @@ -112,7 +112,7 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type="str", required=True), ), supports_check_mode=True, ) @@ -122,5 +122,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vdo.py b/plugins/modules/vdo.py index b651e052482..5c1e4027ceb 100644 --- a/plugins/modules/vdo.py +++ b/plugins/modules/vdo.py @@ -227,6 +227,7 @@ YAML_IMP_ERR = None try: import yaml + HAS_YAML = True except ImportError: YAML_IMP_ERR = traceback.format_exc() @@ -260,7 +261,7 @@ def inventory_vdos(module, vdocmd): if vdostatusyaml is None: return vdolist - vdoyamls = vdostatusyaml['VDOs'] + vdoyamls = vdostatusyaml["VDOs"] if vdoyamls is not None: vdolist = list(vdoyamls.keys()) @@ -270,7 +271,7 @@ def inventory_vdos(module, vdocmd): def list_running_vdos(module, vdocmd): rc, vdolistout, err = module.run_command([vdocmd, "list"]) - runningvdolist = [_f for _f in vdolistout.split('\n') if _f] + runningvdolist = [_f for _f in vdolistout.split("\n") if _f] return runningvdolist @@ -313,31 +314,31 @@ def deactivate_vdo(module, vdoname, vdocmd): def add_vdooptions(params): options = [] - if params.get('logicalsize') is not None: + if params.get("logicalsize") is not None: options.append(f"--vdoLogicalSize={params['logicalsize']}") - if params.get('blockmapcachesize') is not None: + if params.get("blockmapcachesize") is not None: options.append(f"--blockMapCacheSize={params['blockmapcachesize']}") - if params.get('readcache') == 'enabled': + if params.get("readcache") == "enabled": options.append("--readCache=enabled") - if params.get('readcachesize') is not None: + if params.get("readcachesize") is not None: options.append(f"--readCacheSize={params['readcachesize']}") - if params.get('slabsize') is not None: + if params.get("slabsize") is not None: options.append(f"--vdoSlabSize={params['slabsize']}") - if params.get('emulate512'): + if params.get("emulate512"): options.append("--emulate512=enabled") - if params.get('indexmem') is not None: + if params.get("indexmem") is not None: options.append(f"--indexMem={params['indexmem']}") - if params.get('indexmode') == 'sparse': + if params.get("indexmode") == "sparse": options.append("--sparseIndex=enabled") - if params.get('force'): + if params.get("force"): options.append("--force") # Entering an invalid thread config results in a cryptic @@ -346,26 +347,25 @@ def add_vdooptions(params): # output a more helpful message, but one would have to log # onto that system to read the error. For now, heed the thread # limit warnings in the DOCUMENTATION section above. - if params.get('ackthreads') is not None: + if params.get("ackthreads") is not None: options.append(f"--vdoAckThreads={params['ackthreads']}") - if params.get('biothreads') is not None: + if params.get("biothreads") is not None: options.append(f"--vdoBioThreads={params['biothreads']}") - if params.get('cputhreads') is not None: + if params.get("cputhreads") is not None: options.append(f"--vdoCpuThreads={params['cputhreads']}") - if params.get('logicalthreads') is not None: + if params.get("logicalthreads") is not None: options.append(f"--vdoLogicalThreads={params['logicalthreads']}") - if params.get('physicalthreads') is not None: + if params.get("physicalthreads") is not None: options.append(f"--vdoPhysicalThreads={params['physicalthreads']}") return options def run_module(): - # Define the available arguments/parameters that a user can pass to # the module. # Defaults for VDO parameters are None, in order to facilitate @@ -373,29 +373,29 @@ def run_module(): # Creation param defaults are determined by the creation section. module_args = dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - activated=dict(type='bool'), - running=dict(type='bool'), - growphysical=dict(type='bool', default=False), - device=dict(type='str'), - logicalsize=dict(type='str'), - deduplication=dict(type='str', choices=['disabled', 'enabled']), - compression=dict(type='str', choices=['disabled', 'enabled']), - blockmapcachesize=dict(type='str'), - readcache=dict(type='str', choices=['disabled', 'enabled']), - readcachesize=dict(type='str'), - emulate512=dict(type='bool', default=False), - slabsize=dict(type='str'), - writepolicy=dict(type='str', choices=['async', 'auto', 'sync']), - indexmem=dict(type='str'), - indexmode=dict(type='str', choices=['dense', 'sparse']), - ackthreads=dict(type='str'), - biothreads=dict(type='str'), - cputhreads=dict(type='str'), - logicalthreads=dict(type='str'), - physicalthreads=dict(type='str'), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + activated=dict(type="bool"), + running=dict(type="bool"), + growphysical=dict(type="bool", default=False), + device=dict(type="str"), + logicalsize=dict(type="str"), + deduplication=dict(type="str", choices=["disabled", "enabled"]), + compression=dict(type="str", choices=["disabled", "enabled"]), + blockmapcachesize=dict(type="str"), + readcache=dict(type="str", choices=["disabled", "enabled"]), + readcachesize=dict(type="str"), + emulate512=dict(type="bool", default=False), + slabsize=dict(type="str"), + writepolicy=dict(type="str", choices=["async", "auto", "sync"]), + indexmem=dict(type="str"), + indexmode=dict(type="str", choices=["dense", "sparse"]), + ackthreads=dict(type="str"), + biothreads=dict(type="str"), + cputhreads=dict(type="str"), + logicalthreads=dict(type="str"), + physicalthreads=dict(type="str"), + force=dict(type="bool", default=False), ) # Seed the result dictionary in the object. There will be an @@ -415,11 +415,11 @@ def run_module(): ) if not HAS_YAML: - module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR) + module.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) vdocmd = module.get_bin_path("vdo", required=True) if not vdocmd: - module.fail_json(msg='VDO is not installed.', **result) + module.fail_json(msg="VDO is not installed.", **result) # Print a pre-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) @@ -428,15 +428,14 @@ def run_module(): # Collect the name of the desired VDO volume, and its state. These will # determine what to do. - desiredvdo = module.params['name'] - state = module.params['state'] + desiredvdo = module.params["name"] + state = module.params["state"] # Create a desired VDO volume that doesn't exist yet. - if (desiredvdo not in vdolist) and (state == 'present'): - device = module.params['device'] + if (desiredvdo not in vdolist) and (state == "present"): + device = module.params["device"] if device is None: - module.fail_json(msg="Creating a VDO volume requires specifying " - "a 'device' in the playbook.") + module.fail_json(msg="Creating a VDO volume requires specifying a 'device' in the playbook.") # Create a dictionary of the options from the AnsibleModule # parameters, compile the vdo command options, and run "vdo create" @@ -446,22 +445,23 @@ def run_module(): # assume default values. vdocmdoptions = add_vdooptions(module.params) rc, out, err = module.run_command( - [vdocmd, "create", f"--name={desiredvdo}", f"--device={device}"] + vdocmdoptions) + [vdocmd, "create", f"--name={desiredvdo}", f"--device={device}"] + vdocmdoptions + ) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Creating VDO {desiredvdo} failed.", rc=rc, err=err) - if module.params['compression'] == 'disabled': + if module.params["compression"] == "disabled": rc, out, err = module.run_command([vdocmd, "disableCompression", f"--name={desiredvdo}"]) - if module.params['deduplication'] == 'disabled': + if module.params["deduplication"] == "disabled": rc, out, err = module.run_command([vdocmd, "disableDeduplication", f"--name={desiredvdo}"]) - if module.params['activated'] is False: + if module.params["activated"] is False: deactivate_vdo(module, desiredvdo, vdocmd) - if module.params['running'] is False: + if module.params["running"] is False: stop_vdo(module, desiredvdo, vdocmd) # Print a post-run list of VDO volumes in the result object. @@ -470,47 +470,49 @@ def run_module(): module.exit_json(**result) # Modify the current parameters of a VDO that exists. - if desiredvdo in vdolist and state == 'present': + if desiredvdo in vdolist and state == "present": rc, vdostatusoutput, err = module.run_command([vdocmd, "status"]) vdostatusyaml = yaml.safe_load(vdostatusoutput) # An empty dictionary to contain dictionaries of VDO statistics processedvdos = {} - vdoyamls = vdostatusyaml['VDOs'] + vdoyamls = vdostatusyaml["VDOs"] if vdoyamls is not None: processedvdos = vdoyamls # The 'vdo status' keys that are currently modifiable. - statusparamkeys = ['Acknowledgement threads', - 'Bio submission threads', - 'Block map cache size', - 'CPU-work threads', - 'Logical threads', - 'Physical threads', - 'Read cache', - 'Read cache size', - 'Configured write policy', - 'Compression', - 'Deduplication'] + statusparamkeys = [ + "Acknowledgement threads", + "Bio submission threads", + "Block map cache size", + "CPU-work threads", + "Logical threads", + "Physical threads", + "Read cache", + "Read cache size", + "Configured write policy", + "Compression", + "Deduplication", + ] # A key translation table from 'vdo status' output to Ansible # module parameters. This covers all of the 'vdo status' # parameter keys that could be modified with the 'vdo' # command. vdokeytrans = { - 'Logical size': 'logicalsize', - 'Compression': 'compression', - 'Deduplication': 'deduplication', - 'Block map cache size': 'blockmapcachesize', - 'Read cache': 'readcache', - 'Read cache size': 'readcachesize', - 'Configured write policy': 'writepolicy', - 'Acknowledgement threads': 'ackthreads', - 'Bio submission threads': 'biothreads', - 'CPU-work threads': 'cputhreads', - 'Logical threads': 'logicalthreads', - 'Physical threads': 'physicalthreads' + "Logical size": "logicalsize", + "Compression": "compression", + "Deduplication": "deduplication", + "Block map cache size": "blockmapcachesize", + "Read cache": "readcache", + "Read cache size": "readcachesize", + "Configured write policy": "writepolicy", + "Acknowledgement threads": "ackthreads", + "Bio submission threads": "biothreads", + "CPU-work threads": "cputhreads", + "Logical threads": "logicalthreads", + "Physical threads": "physicalthreads", } # Build a dictionary of the current VDO status parameters, with @@ -549,46 +551,50 @@ def run_module(): if vdocmdoptions: rc, out, err = module.run_command([vdocmd, "modify", f"--name={desiredvdo}"] + vdocmdoptions) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Modifying VDO {desiredvdo} failed.", rc=rc, err=err) - if 'deduplication' in diffparams.keys(): - dedupemod = diffparams['deduplication'] - dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication" + if "deduplication" in diffparams.keys(): + dedupemod = diffparams["deduplication"] + dedupeparam = "disableDeduplication" if dedupemod == "disabled" else "enableDeduplication" rc, out, err = module.run_command([vdocmd, dedupeparam, f"--name={desiredvdo}"]) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Changing deduplication on VDO volume {desiredvdo} failed.", rc=rc, err=err) - if 'compression' in diffparams.keys(): - compressmod = diffparams['compression'] - compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression" + if "compression" in diffparams.keys(): + compressmod = diffparams["compression"] + compressparam = "disableCompression" if compressmod == "disabled" else "enableCompression" rc, out, err = module.run_command([vdocmd, compressparam, f"--name={desiredvdo}"]) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Changing compression on VDO volume {desiredvdo} failed.", rc=rc, err=err) - if 'writepolicy' in diffparams.keys(): - writepolmod = diffparams['writepolicy'] - rc, out, err = module.run_command([ - vdocmd, - "changeWritePolicy", - f"--name={desiredvdo}", - f"--writePolicy={writepolmod}", - ]) + if "writepolicy" in diffparams.keys(): + writepolmod = diffparams["writepolicy"] + rc, out, err = module.run_command( + [ + vdocmd, + "changeWritePolicy", + f"--name={desiredvdo}", + f"--writePolicy={writepolmod}", + ] + ) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Changing write policy on VDO volume {desiredvdo} failed.", rc=rc, err=err) # Process the size parameters, to determine of a growPhysical or # growLogical operation needs to occur. - sizeparamkeys = ['Logical size', ] + sizeparamkeys = [ + "Logical size", + ] currentsizeparams = {} sizetrans = {} @@ -605,13 +611,13 @@ def run_module(): if module.params[key] is not None and str(sizeparams[key]) != module.params[key]: diffsizeparams[key] = module.params[key] - if module.params['growphysical']: - physdevice = module.params['device'] + if module.params["growphysical"]: + physdevice = module.params["device"] rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice]) - devblocks = (int(devsectors) / 8) + devblocks = int(devsectors) / 8 dmvdoname = f"/dev/mapper/{desiredvdo}" - currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname] - currentphysblocks = currentvdostats['physical blocks'] + currentvdostats = processedvdos[desiredvdo]["VDO statistics"][dmvdoname] + currentphysblocks = currentvdostats["physical blocks"] # Set a growPhysical threshold to grow only when there is # guaranteed to be more than 2 slabs worth of unallocated @@ -621,29 +627,31 @@ def run_module(): growthresh = devblocks + 16777216 if currentphysblocks > growthresh: - result['changed'] = True + result["changed"] = True rc, out, err = module.run_command([vdocmd, "growPhysical", f"--name={desiredvdo}"]) - if 'logicalsize' in diffsizeparams.keys(): - result['changed'] = True - rc, out, err = module.run_command([vdocmd, "growLogical", f"--name={desiredvdo}", f"--vdoLogicalSize={diffsizeparams['logicalsize']}"]) + if "logicalsize" in diffsizeparams.keys(): + result["changed"] = True + rc, out, err = module.run_command( + [vdocmd, "growLogical", f"--name={desiredvdo}", f"--vdoLogicalSize={diffsizeparams['logicalsize']}"] + ) - vdoactivatestatus = processedvdos[desiredvdo]['Activate'] + vdoactivatestatus = processedvdos[desiredvdo]["Activate"] - if module.params['activated'] is False and vdoactivatestatus == 'enabled': + if module.params["activated"] is False and vdoactivatestatus == "enabled": deactivate_vdo(module, desiredvdo, vdocmd) - if not result['changed']: - result['changed'] = True + if not result["changed"]: + result["changed"] = True - if module.params['activated'] and vdoactivatestatus == 'disabled': + if module.params["activated"] and vdoactivatestatus == "disabled": activate_vdo(module, desiredvdo, vdocmd) - if not result['changed']: - result['changed'] = True + if not result["changed"]: + result["changed"] = True - if module.params['running'] is False and desiredvdo in runningvdolist: + if module.params["running"] is False and desiredvdo in runningvdolist: stop_vdo(module, desiredvdo, vdocmd) - if not result['changed']: - result['changed'] = True + if not result["changed"]: + result["changed"] = True # Note that a disabled VDO volume cannot be started by the # 'vdo start' command, by design. To accurately track changed @@ -652,10 +660,14 @@ def run_module(): # the activate_vdo() operation succeeded, as 'vdoactivatestatus' # will have the activated status prior to the activate_vdo() # call. - if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist: + if ( + (vdoactivatestatus == "enabled" or module.params["activated"]) + and module.params["running"] + and desiredvdo not in runningvdolist + ): start_vdo(module, desiredvdo, vdocmd) - if not result['changed']: - result['changed'] = True + if not result["changed"]: + result["changed"] = True # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) @@ -665,10 +677,10 @@ def run_module(): module.exit_json(**result) # Remove a desired VDO that currently exists. - if desiredvdo in vdolist and state == 'absent': + if desiredvdo in vdolist and state == "absent": rc, out, err = module.run_command([vdocmd, "remove", f"--name={desiredvdo}"]) if rc == 0: - result['changed'] = True + result["changed"] = True else: module.fail_json(msg=f"Removing VDO {desiredvdo} failed.", rc=rc, err=err) @@ -691,5 +703,5 @@ def main(): run_module() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vertica_configuration.py b/plugins/modules/vertica_configuration.py index a5ebf85a195..35b1f719a06 100644 --- a/plugins/modules/vertica_configuration.py +++ b/plugins/modules/vertica_configuration.py @@ -89,32 +89,38 @@ class NotSupportedError(Exception): class CannotDropError(Exception): pass + # module specific functions -def get_configuration_facts(cursor, parameter_name=''): +def get_configuration_facts(cursor, parameter_name=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select c.parameter_name, c.current_value, c.default_value from configuration_parameters c where c.node_name = 'ALL' and (? = '' or c.parameter_name ilike ?) - """, parameter_name, parameter_name) + """, + parameter_name, + parameter_name, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} + "parameter_name": row.parameter_name, + "current_value": row.current_value, + "default_value": row.default_value, + } return facts def check(configuration_facts, parameter_name, current_value): parameter_key = parameter_name.lower() - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + if current_value and current_value.lower() != configuration_facts[parameter_key]["current_value"].lower(): return False return True @@ -122,37 +128,39 @@ def check(configuration_facts, parameter_name, current_value): def present(configuration_facts, cursor, parameter_name, current_value): parameter_key = parameter_name.lower() changed = False - if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + if current_value and current_value.lower() != configuration_facts[parameter_key]["current_value"].lower(): cursor.execute(f"select set_config_parameter('{parameter_name}', '{current_value}')") changed = True if changed: configuration_facts.update(get_configuration_facts(cursor, parameter_name)) return changed + # module logic def main(): - module = AnsibleModule( argument_spec=dict( - parameter=dict(required=True, aliases=['name']), + parameter=dict(required=True, aliases=["name"]), value=dict(), db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), + cluster=dict(default="localhost"), + port=dict(default="5433"), + login_user=dict(default="dbadmin"), login_password=dict(no_log=True), - ), supports_check_mode=True) + ), + supports_check_mode=True, + ) if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyodbc"), exception=PYODBC_IMP_ERR) - parameter_name = module.params['parameter'] - current_value = module.params['value'] - db = '' - if module.params['db']: - db = module.params['db'] + parameter_name = module.params["parameter"] + current_value = module.params["value"] + db = "" + if module.params["db"]: + db = module.params["db"] changed = False @@ -169,8 +177,7 @@ def main(): db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() except Exception as e: - module.fail_json(msg=f"Unable to connect to database: {e}.", - exception=traceback.format_exc()) + module.fail_json(msg=f"Unable to connect to database: {e}.", exception=traceback.format_exc()) try: configuration_facts = get_configuration_facts(cursor) @@ -182,17 +189,19 @@ def main(): except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_configuration": configuration_facts}) except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_configuration": configuration_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + module.exit_json( + changed=changed, parameter=parameter_name, ansible_facts={"vertica_configuration": configuration_facts} + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vertica_info.py b/plugins/modules/vertica_info.py index 9cb7f16facc..cfc0b93e477 100644 --- a/plugins/modules/vertica_info.py +++ b/plugins/modules/vertica_info.py @@ -76,29 +76,36 @@ class NotSupportedError(Exception): pass + # module specific functions -def get_schema_facts(cursor, schema=''): +def get_schema_facts(cursor, schema=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select schema_name, schema_owner, create_time from schemata where not is_system_schema and schema_name not in ('public') and (? = '' or schema_name ilike ?) - """, schema, schema) + """, + schema, + schema, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" + "name": row.schema_name, + "owner": row.schema_owner, + "create_time": str(row.create_time), + "usage_roles": [], + "create_roles": [], + } + cursor.execute( + """ select g.object_name as schema_name, r.name as role_name, lower(g.privileges_description) privileges_description from roles r join grants g @@ -106,23 +113,27 @@ def get_schema_facts(cursor, schema=''): and g.privileges_description like '%USAGE%' and g.grantee not in ('public', 'dbadmin') and (? = '' or g.object_name ilike ?) - """, schema, schema) + """, + schema, + schema, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) + if "create" in row.privileges_description: + facts[schema_key]["create_roles"].append(row.role_name) else: - facts[schema_key]['usage_roles'].append(row.role_name) + facts[schema_key]["usage_roles"].append(row.role_name) return facts -def get_user_facts(cursor, user=''): +def get_user_facts(cursor, user=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select u.user_name, u.is_locked, u.lock_time, p.password, p.acctexpired as is_expired, u.profile_name, u.resource_pool, @@ -130,7 +141,10 @@ def get_user_facts(cursor, user=''): from users u join password_auditor p on p.user_id = u.user_id where not u.is_super_user and (? = '' or u.user_name ilike ?) - """, user, user) + """, + user, + user, + ) while True: rows = cursor.fetchmany(100) if not rows: @@ -138,65 +152,73 @@ def get_user_facts(cursor, user=''): for row in rows: user_key = row.user_name.lower() facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} + "name": row.user_name, + "locked": str(row.is_locked), + "password": row.password, + "expired": str(row.is_expired), + "profile": row.profile_name, + "resource_pool": row.resource_pool, + "roles": [], + "default_roles": [], + } if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) + facts[user_key]["locked_time"] = str(row.lock_time) if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + facts[user_key]["roles"] = row.all_roles.replace(" ", "").split(",") if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + facts[user_key]["default_roles"] = row.default_roles.replace(" ", "").split(",") return facts -def get_role_facts(cursor, role=''): +def get_role_facts(cursor, role=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select r.name, r.assigned_roles from roles r where (? = '' or r.name ilike ?) - """, role, role) + """, + role, + role, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} + facts[role_key] = {"name": row.name, "assigned_roles": []} if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + facts[role_key]["assigned_roles"] = row.assigned_roles.replace(" ", "").split(",") return facts -def get_configuration_facts(cursor, parameter=''): +def get_configuration_facts(cursor, parameter=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select c.parameter_name, c.current_value, c.default_value from configuration_parameters c where c.node_name = 'ALL' and (? = '' or c.parameter_name ilike ?) - """, parameter, parameter) + """, + parameter, + parameter, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.parameter_name.lower()] = { - 'parameter_name': row.parameter_name, - 'current_value': row.current_value, - 'default_value': row.default_value} + "parameter_name": row.parameter_name, + "current_value": row.current_value, + "default_value": row.default_value, + } return facts -def get_node_facts(cursor, schema=''): +def get_node_facts(cursor, schema=""): facts = {} cursor.execute(""" select node_name, node_address, export_address, node_state, node_type, @@ -209,33 +231,36 @@ def get_node_facts(cursor, schema=''): break for row in rows: facts[row.node_address] = { - 'node_name': row.node_name, - 'export_address': row.export_address, - 'node_state': row.node_state, - 'node_type': row.node_type, - 'catalog_path': row.catalog_path} + "node_name": row.node_name, + "export_address": row.export_address, + "node_state": row.node_state, + "node_type": row.node_type, + "catalog_path": row.catalog_path, + } return facts + # module logic def main(): - module = AnsibleModule( argument_spec=dict( - cluster=dict(default='localhost'), - port=dict(default='5433'), + cluster=dict(default="localhost"), + port=dict(default="5433"), db=dict(), - login_user=dict(default='dbadmin'), + login_user=dict(default="dbadmin"), login_password=dict(no_log=True), - ), supports_check_mode=True) + ), + supports_check_mode=True, + ) if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyodbc"), exception=PYODBC_IMP_ERR) - db = '' - if module.params['db']: - db = module.params['db'] + db = "" + if module.params["db"]: + db = module.params["db"] try: dsn = ( @@ -259,12 +284,14 @@ def main(): configuration_facts = get_configuration_facts(cursor) node_facts = get_node_facts(cursor) - module.exit_json(changed=False, - vertica_schemas=schema_facts, - vertica_users=user_facts, - vertica_roles=role_facts, - vertica_configuration=configuration_facts, - vertica_nodes=node_facts) + module.exit_json( + changed=False, + vertica_schemas=schema_facts, + vertica_users=user_facts, + vertica_roles=role_facts, + vertica_configuration=configuration_facts, + vertica_nodes=node_facts, + ) except NotSupportedError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except SystemExit: @@ -274,5 +301,5 @@ def main(): module.fail_json(msg=to_native(e), exception=traceback.format_exc()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vertica_role.py b/plugins/modules/vertica_role.py index 7938eed02af..413487e202a 100644 --- a/plugins/modules/vertica_role.py +++ b/plugins/modules/vertica_role.py @@ -100,32 +100,34 @@ class NotSupportedError(Exception): class CannotDropError(Exception): pass + # module specific functions -def get_role_facts(cursor, role=''): +def get_role_facts(cursor, role=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select r.name, r.assigned_roles from roles r where (? = '' or r.name ilike ?) - """, role, role) + """, + role, + role, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: role_key = row.name.lower() - facts[role_key] = { - 'name': row.name, - 'assigned_roles': []} + facts[role_key] = {"name": row.name, "assigned_roles": []} if row.assigned_roles: - facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + facts[role_key]["assigned_roles"] = row.assigned_roles.replace(" ", "").split(",") return facts -def update_roles(role_facts, cursor, role, - existing, required): +def update_roles(role_facts, cursor, role, existing, required): for assigned_role in set(existing) - set(required): cursor.execute(f"revoke {assigned_role} from {role}") for assigned_role in set(required) - set(existing): @@ -136,7 +138,7 @@ def check(role_facts, role, assigned_roles): role_key = role.lower() if role_key not in role_facts: return False - if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']): + if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]["assigned_roles"]): return False return True @@ -150,9 +152,8 @@ def present(role_facts, cursor, role, assigned_roles): return True else: changed = False - if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])): - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], assigned_roles) + if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]["assigned_roles"])): + update_roles(role_facts, cursor, role, role_facts[role_key]["assigned_roles"], assigned_roles) changed = True if changed: role_facts.update(get_role_facts(cursor, role)) @@ -162,43 +163,44 @@ def present(role_facts, cursor, role, assigned_roles): def absent(role_facts, cursor, role, assigned_roles): role_key = role.lower() if role_key in role_facts: - update_roles(role_facts, cursor, role, - role_facts[role_key]['assigned_roles'], []) + update_roles(role_facts, cursor, role, role_facts[role_key]["assigned_roles"], []) cursor.execute(f"drop role {role_facts[role_key]['name']} cascade") del role_facts[role_key] return True else: return False + # module logic def main(): - module = AnsibleModule( argument_spec=dict( - role=dict(required=True, aliases=['name']), - assigned_roles=dict(aliases=['assigned_role']), - state=dict(default='present', choices=['absent', 'present']), + role=dict(required=True, aliases=["name"]), + assigned_roles=dict(aliases=["assigned_role"]), + state=dict(default="present", choices=["absent", "present"]), db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), + cluster=dict(default="localhost"), + port=dict(default="5433"), + login_user=dict(default="dbadmin"), login_password=dict(no_log=True), - ), supports_check_mode=True) + ), + supports_check_mode=True, + ) if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyodbc"), exception=PYODBC_IMP_ERR) - role = module.params['role'] + role = module.params["role"] assigned_roles = [] - if module.params['assigned_roles']: - assigned_roles = module.params['assigned_roles'].split(',') + if module.params["assigned_roles"]: + assigned_roles = module.params["assigned_roles"].split(",") assigned_roles = [_f for _f in assigned_roles if _f] - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] + state = module.params["state"] + db = "" + if module.params["db"]: + db = module.params["db"] changed = False @@ -221,28 +223,28 @@ def main(): role_facts = get_role_facts(cursor) if module.check_mode: changed = not check(role_facts, role, assigned_roles) - elif state == 'absent': + elif state == "absent": try: changed = absent(role_facts, cursor, role, assigned_roles) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': + elif state == "present": try: changed = present(role_facts, cursor, role, assigned_roles) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_roles": role_facts}) except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_roles": role_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + module.exit_json(changed=changed, role=role, ansible_facts={"vertica_roles": role_facts}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vertica_schema.py b/plugins/modules/vertica_schema.py index a006000b688..aaecf0c9dce 100644 --- a/plugins/modules/vertica_schema.py +++ b/plugins/modules/vertica_schema.py @@ -116,29 +116,36 @@ class NotSupportedError(Exception): class CannotDropError(Exception): pass + # module specific functions -def get_schema_facts(cursor, schema=''): +def get_schema_facts(cursor, schema=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select schema_name, schema_owner, create_time from schemata where not is_system_schema and schema_name not in ('public', 'TxtIndex') and (? = '' or schema_name ilike ?) - """, schema, schema) + """, + schema, + schema, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.schema_name.lower()] = { - 'name': row.schema_name, - 'owner': row.schema_owner, - 'create_time': str(row.create_time), - 'usage_roles': [], - 'create_roles': []} - cursor.execute(""" + "name": row.schema_name, + "owner": row.schema_owner, + "create_time": str(row.create_time), + "usage_roles": [], + "create_roles": [], + } + cursor.execute( + """ select g.object_name as schema_name, r.name as role_name, lower(g.privileges_description) privileges_description from roles r join grants g @@ -146,23 +153,24 @@ def get_schema_facts(cursor, schema=''): and g.privileges_description like '%USAGE%' and g.grantee not in ('public', 'dbadmin') and (? = '' or g.object_name ilike ?) - """, schema, schema) + """, + schema, + schema, + ) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: schema_key = row.schema_name.lower() - if 'create' in row.privileges_description: - facts[schema_key]['create_roles'].append(row.role_name) + if "create" in row.privileges_description: + facts[schema_key]["create_roles"].append(row.role_name) else: - facts[schema_key]['usage_roles'].append(row.role_name) + facts[schema_key]["usage_roles"].append(row.role_name) return facts -def update_roles(schema_facts, cursor, schema, - existing, required, - create_existing, create_required): +def update_roles(schema_facts, cursor, schema, existing, required, create_existing, create_required): for role in set(existing + create_existing) - set(required + create_required): cursor.execute(f"drop role {role} cascade") for role in set(create_existing) - set(create_required): @@ -178,11 +186,11 @@ def check(schema_facts, schema, usage_roles, create_roles, owner): schema_key = schema.lower() if schema_key not in schema_facts: return False - if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + if owner and owner.lower() == schema_facts[schema_key]["owner"].lower(): return False - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']): + if sorted(usage_roles) != sorted(schema_facts[schema_key]["usage_roles"]): return False - if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): + if sorted(create_roles) != sorted(schema_facts[schema_key]["create_roles"]): return False return True @@ -193,20 +201,28 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): query_fragments = [f"create schema {schema}"] if owner: query_fragments.append(f"authorization {owner}") - cursor.execute(' '.join(query_fragments)) + cursor.execute(" ".join(query_fragments)) update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) schema_facts.update(get_schema_facts(cursor, schema)) return True else: changed = False - if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): - raise NotSupportedError(f"Changing schema owner is not supported. Current owner: {schema_facts[schema_key]['owner']}.") - if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \ - sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): - - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], usage_roles, - schema_facts[schema_key]['create_roles'], create_roles) + if owner and owner.lower() != schema_facts[schema_key]["owner"].lower(): + raise NotSupportedError( + f"Changing schema owner is not supported. Current owner: {schema_facts[schema_key]['owner']}." + ) + if sorted(usage_roles) != sorted(schema_facts[schema_key]["usage_roles"]) or sorted(create_roles) != sorted( + schema_facts[schema_key]["create_roles"] + ): + update_roles( + schema_facts, + cursor, + schema, + schema_facts[schema_key]["usage_roles"], + usage_roles, + schema_facts[schema_key]["create_roles"], + create_roles, + ) changed = True if changed: schema_facts.update(get_schema_facts(cursor, schema)) @@ -216,8 +232,15 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): def absent(schema_facts, cursor, schema, usage_roles, create_roles): schema_key = schema.lower() if schema_key in schema_facts: - update_roles(schema_facts, cursor, schema, - schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + update_roles( + schema_facts, + cursor, + schema, + schema_facts[schema_key]["usage_roles"], + [], + schema_facts[schema_key]["create_roles"], + [], + ) try: cursor.execute(f"drop schema {schema_facts[schema_key]['name']} restrict") except pyodbc.Error: @@ -227,42 +250,44 @@ def absent(schema_facts, cursor, schema, usage_roles, create_roles): else: return False + # module logic def main(): - module = AnsibleModule( argument_spec=dict( - schema=dict(required=True, aliases=['name']), - usage_roles=dict(aliases=['usage_role']), - create_roles=dict(aliases=['create_role']), + schema=dict(required=True, aliases=["name"]), + usage_roles=dict(aliases=["usage_role"]), + create_roles=dict(aliases=["create_role"]), owner=dict(), - state=dict(default='present', choices=['absent', 'present']), + state=dict(default="present", choices=["absent", "present"]), db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), + cluster=dict(default="localhost"), + port=dict(default="5433"), + login_user=dict(default="dbadmin"), login_password=dict(no_log=True), - ), supports_check_mode=True) + ), + supports_check_mode=True, + ) if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyodbc"), exception=PYODBC_IMP_ERR) - schema = module.params['schema'] + schema = module.params["schema"] usage_roles = [] - if module.params['usage_roles']: - usage_roles = module.params['usage_roles'].split(',') + if module.params["usage_roles"]: + usage_roles = module.params["usage_roles"].split(",") usage_roles = [_f for _f in usage_roles if _f] create_roles = [] - if module.params['create_roles']: - create_roles = module.params['create_roles'].split(',') + if module.params["create_roles"]: + create_roles = module.params["create_roles"].split(",") create_roles = [_f for _f in create_roles if _f] - owner = module.params['owner'] - state = module.params['state'] - db = '' - if module.params['db']: - db = module.params['db'] + owner = module.params["owner"] + state = module.params["state"] + db = "" + if module.params["db"]: + db = module.params["db"] changed = False @@ -285,28 +310,28 @@ def main(): schema_facts = get_schema_facts(cursor) if module.check_mode: changed = not check(schema_facts, schema, usage_roles, create_roles, owner) - elif state == 'absent': + elif state == "absent": try: changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state == 'present': + elif state == "present": try: changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_schemas": schema_facts}) except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_schemas": schema_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + module.exit_json(changed=changed, schema=schema, ansible_facts={"vertica_schemas": schema_facts}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vertica_user.py b/plugins/modules/vertica_user.py index 5b6bc0cf915..19060e6da6c 100644 --- a/plugins/modules/vertica_user.py +++ b/plugins/modules/vertica_user.py @@ -124,12 +124,14 @@ class NotSupportedError(Exception): class CannotDropError(Exception): pass + # module specific functions -def get_user_facts(cursor, user=''): +def get_user_facts(cursor, user=""): facts = {} - cursor.execute(""" + cursor.execute( + """ select u.user_name, u.is_locked, u.lock_time, p.password, p.acctexpired as is_expired, u.profile_name, u.resource_pool, @@ -137,7 +139,10 @@ def get_user_facts(cursor, user=''): from users u join password_auditor p on p.user_id = u.user_id where not u.is_super_user and (? = '' or u.user_name ilike ?) - """, user, user) + """, + user, + user, + ) while True: rows = cursor.fetchmany(100) if not rows: @@ -145,25 +150,25 @@ def get_user_facts(cursor, user=''): for row in rows: user_key = row.user_name.lower() facts[user_key] = { - 'name': row.user_name, - 'locked': str(row.is_locked), - 'password': row.password, - 'expired': str(row.is_expired), - 'profile': row.profile_name, - 'resource_pool': row.resource_pool, - 'roles': [], - 'default_roles': []} + "name": row.user_name, + "locked": str(row.is_locked), + "password": row.password, + "expired": str(row.is_expired), + "profile": row.profile_name, + "resource_pool": row.resource_pool, + "roles": [], + "default_roles": [], + } if row.is_locked: - facts[user_key]['locked_time'] = str(row.lock_time) + facts[user_key]["locked_time"] = str(row.lock_time) if row.all_roles: - facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + facts[user_key]["roles"] = row.all_roles.replace(" ", "").split(",") if row.default_roles: - facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + facts[user_key]["default_roles"] = row.default_roles.replace(" ", "").split(",") return facts -def update_roles(user_facts, cursor, user, - existing_all, existing_default, required): +def update_roles(user_facts, cursor, user, existing_all, existing_default, required): del_roles = list(set(existing_all) - set(required)) if del_roles: cursor.execute(f"revoke {','.join(del_roles)} from {user}") @@ -174,30 +179,34 @@ def update_roles(user_facts, cursor, user, cursor.execute(f"alter user {user} default role {','.join(required)}") -def check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles): +def check(user_facts, user, profile, resource_pool, locked, password, expired, ldap, roles): user_key = user.lower() if user_key not in user_facts: return False - if profile and profile != user_facts[user_key]['profile']: + if profile and profile != user_facts[user_key]["profile"]: return False - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + if resource_pool and resource_pool != user_facts[user_key]["resource_pool"]: return False - if locked != (user_facts[user_key]['locked'] == 'True'): + if locked != (user_facts[user_key]["locked"] == "True"): return False - if password and password != user_facts[user_key]['password']: + if password and password != user_facts[user_key]["password"]: return False - if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or - ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')): + if ( + expired is not None + and expired != (user_facts[user_key]["expired"] == "True") + or ldap is not None + and ldap != (user_facts[user_key]["expired"] == "True") + ): return False - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): + if roles and ( + sorted(roles) != sorted(user_facts[user_key]["roles"]) + or sorted(roles) != sorted(user_facts[user_key]["default_roles"]) + ): return False return True -def present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles): +def present(user_facts, cursor, user, profile, resource_pool, locked, password, expired, ldap, roles): user_key = user.lower() if user_key not in user_facts: query_fragments = [f"create user {user}"] @@ -214,8 +223,8 @@ def present(user_facts, cursor, user, profile, resource_pool, query_fragments.append(f"profile {profile}") if resource_pool: query_fragments.append(f"resource pool {resource_pool}") - cursor.execute(' '.join(query_fragments)) - if resource_pool and resource_pool != 'general': + cursor.execute(" ".join(query_fragments)) + if resource_pool and resource_pool != "general": cursor.execute(f"grant usage on resource pool {resource_pool} to {user}") update_roles(user_facts, cursor, user, [], [], roles) user_facts.update(get_user_facts(cursor, user)) @@ -223,42 +232,45 @@ def present(user_facts, cursor, user, profile, resource_pool, else: changed = False query_fragments = [f"alter user {user}"] - if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + if locked is not None and locked != (user_facts[user_key]["locked"] == "True"): if locked: - state = 'lock' + state = "lock" else: - state = 'unlock' + state = "unlock" query_fragments.append(f"account {state}") changed = True - if password and password != user_facts[user_key]['password']: + if password and password != user_facts[user_key]["password"]: query_fragments.append(f"identified by '{password}'") changed = True if ldap: - if ldap != (user_facts[user_key]['expired'] == 'True'): + if ldap != (user_facts[user_key]["expired"] == "True"): query_fragments.append("password expire") changed = True - elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + elif expired is not None and expired != (user_facts[user_key]["expired"] == "True"): if expired: query_fragments.append("password expire") changed = True else: raise NotSupportedError("Unexpiring user password is not supported.") - if profile and profile != user_facts[user_key]['profile']: + if profile and profile != user_facts[user_key]["profile"]: query_fragments.append(f"profile {profile}") changed = True - if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + if resource_pool and resource_pool != user_facts[user_key]["resource_pool"]: query_fragments.append(f"resource pool {resource_pool}") - if user_facts[user_key]['resource_pool'] != 'general': + if user_facts[user_key]["resource_pool"] != "general": cursor.execute(f"revoke usage on resource pool {user_facts[user_key]['resource_pool']} from {user}") - if resource_pool != 'general': + if resource_pool != "general": cursor.execute(f"grant usage on resource pool {resource_pool} to {user}") changed = True if changed: - cursor.execute(' '.join(query_fragments)) - if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or - sorted(roles) != sorted(user_facts[user_key]['default_roles'])): - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + cursor.execute(" ".join(query_fragments)) + if roles and ( + sorted(roles) != sorted(user_facts[user_key]["roles"]) + or sorted(roles) != sorted(user_facts[user_key]["default_roles"]) + ): + update_roles( + user_facts, cursor, user, user_facts[user_key]["roles"], user_facts[user_key]["default_roles"], roles + ) changed = True if changed: user_facts.update(get_user_facts(cursor, user)) @@ -268,8 +280,7 @@ def present(user_facts, cursor, user, profile, resource_pool, def absent(user_facts, cursor, user, roles): user_key = user.lower() if user_key in user_facts: - update_roles(user_facts, cursor, user, - user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + update_roles(user_facts, cursor, user, user_facts[user_key]["roles"], user_facts[user_key]["default_roles"], []) try: cursor.execute(f"drop user {user_facts[user_key]['name']}") except pyodbc.Error: @@ -279,53 +290,55 @@ def absent(user_facts, cursor, user, roles): else: return False + # module logic def main(): - module = AnsibleModule( argument_spec=dict( - user=dict(required=True, aliases=['name']), + user=dict(required=True, aliases=["name"]), profile=dict(), resource_pool=dict(), password=dict(no_log=True), - expired=dict(type='bool'), - ldap=dict(type='bool'), - roles=dict(aliases=['role']), - state=dict(default='present', choices=['absent', 'present', 'locked']), + expired=dict(type="bool"), + ldap=dict(type="bool"), + roles=dict(aliases=["role"]), + state=dict(default="present", choices=["absent", "present", "locked"]), db=dict(), - cluster=dict(default='localhost'), - port=dict(default='5433'), - login_user=dict(default='dbadmin'), + cluster=dict(default="localhost"), + port=dict(default="5433"), + login_user=dict(default="dbadmin"), login_password=dict(no_log=True), - ), supports_check_mode=True) + ), + supports_check_mode=True, + ) if not pyodbc_found: - module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + module.fail_json(msg=missing_required_lib("pyodbc"), exception=PYODBC_IMP_ERR) - user = module.params['user'] - profile = module.params['profile'] + user = module.params["user"] + profile = module.params["profile"] if profile: profile = profile.lower() - resource_pool = module.params['resource_pool'] + resource_pool = module.params["resource_pool"] if resource_pool: resource_pool = resource_pool.lower() - password = module.params['password'] - expired = module.params['expired'] - ldap = module.params['ldap'] + password = module.params["password"] + expired = module.params["expired"] + ldap = module.params["ldap"] roles = [] - if module.params['roles']: - roles = module.params['roles'].split(',') + if module.params["roles"]: + roles = module.params["roles"].split(",") roles = [_f for _f in roles if _f] - state = module.params['state'] - if state == 'locked': + state = module.params["state"] + if state == "locked": locked = True else: locked = False - db = '' - if module.params['db']: - db = module.params['db'] + db = "" + if module.params["db"]: + db = module.params["db"] changed = False @@ -347,31 +360,31 @@ def main(): try: user_facts = get_user_facts(cursor) if module.check_mode: - changed = not check(user_facts, user, profile, resource_pool, - locked, password, expired, ldap, roles) - elif state == 'absent': + changed = not check(user_facts, user, profile, resource_pool, locked, password, expired, ldap, roles) + elif state == "absent": try: changed = absent(user_facts, cursor, user, roles) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - elif state in ['present', 'locked']: + elif state in ["present", "locked"]: try: - changed = present(user_facts, cursor, user, profile, resource_pool, - locked, password, expired, ldap, roles) + changed = present( + user_facts, cursor, user, profile, resource_pool, locked, password, expired, ldap, roles + ) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_users": user_facts}) except CannotDropError as e: - module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + module.fail_json(msg=to_native(e), ansible_facts={"vertica_users": user_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + module.exit_json(changed=changed, user=user, ansible_facts={"vertica_users": user_facts}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vexata_eg.py b/plugins/modules/vexata_eg.py index a644a329a96..134b3de1458 100644 --- a/plugins/modules/vexata_eg.py +++ b/plugins/modules/vexata_eg.py @@ -76,69 +76,72 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together) + argument_spec, + get_array, + required_together, +) def get_eg(module, array): """Retrieve a named vg if it exists, None if absent.""" - name = module.params['name'] + name = module.params["name"] try: egs = array.list_egs() - eg = [eg for eg in egs if eg['name'] == name] + eg = [eg for eg in egs if eg["name"] == name] if len(eg) == 1: return eg[0] else: return None except Exception: - module.fail_json(msg='Error while attempting to retrieve export groups.') + module.fail_json(msg="Error while attempting to retrieve export groups.") def get_vg_id(module, array): """Retrieve a named vg's id if it exists, error if absent.""" - name = module.params['vg'] + name = module.params["vg"] try: vgs = array.list_vgs() - vg = [vg for vg in vgs if vg['name'] == name] + vg = [vg for vg in vgs if vg["name"] == name] if len(vg) == 1: - return vg[0]['id'] + return vg[0]["id"] else: - module.fail_json(msg=f'Volume group {name} was not found.') + module.fail_json(msg=f"Volume group {name} was not found.") except Exception: - module.fail_json(msg='Error while attempting to retrieve volume groups.') + module.fail_json(msg="Error while attempting to retrieve volume groups.") def get_ig_id(module, array): """Retrieve a named ig's id if it exists, error if absent.""" - name = module.params['ig'] + name = module.params["ig"] try: igs = array.list_igs() - ig = [ig for ig in igs if ig['name'] == name] + ig = [ig for ig in igs if ig["name"] == name] if len(ig) == 1: - return ig[0]['id'] + return ig[0]["id"] else: - module.fail_json(msg=f'Initiator group {name} was not found.') + module.fail_json(msg=f"Initiator group {name} was not found.") except Exception: - module.fail_json(msg='Error while attempting to retrieve initiator groups.') + module.fail_json(msg="Error while attempting to retrieve initiator groups.") def get_pg_id(module, array): """Retrieve a named pg's id if it exists, error if absent.""" - name = module.params['pg'] + name = module.params["pg"] try: pgs = array.list_pgs() - pg = [pg for pg in pgs if pg['name'] == name] + pg = [pg for pg in pgs if pg["name"] == name] if len(pg) == 1: - return pg[0]['id'] + return pg[0]["id"] else: - module.fail_json(msg=f'Port group {name} was not found.') + module.fail_json(msg=f"Port group {name} was not found.") except Exception: - module.fail_json(msg='Error while attempting to retrieve port groups.') + module.fail_json(msg="Error while attempting to retrieve port groups.") def create_eg(module, array): - """"Create a new export group.""" + """ "Create a new export group.""" changed = False - eg_name = module.params['name'] + eg_name = module.params["name"] vg_id = get_vg_id(module, array) ig_id = get_ig_id(module, array) pg_id = get_pg_id(module, array) @@ -146,36 +149,32 @@ def create_eg(module, array): module.exit_json(changed=changed) try: - eg = array.create_eg( - eg_name, - 'Ansible export group', - (vg_id, ig_id, pg_id)) + eg = array.create_eg(eg_name, "Ansible export group", (vg_id, ig_id, pg_id)) if eg: - module.log(msg=f'Created export group {eg_name}') + module.log(msg=f"Created export group {eg_name}") changed = True else: raise Exception except Exception: - module.fail_json(msg=f'Export group {eg_name} create failed.') + module.fail_json(msg=f"Export group {eg_name} create failed.") module.exit_json(changed=changed) def delete_eg(module, array, eg): changed = False - eg_name = eg['name'] + eg_name = eg["name"] if module.check_mode: module.exit_json(changed=changed) try: - ok = array.delete_eg( - eg['id']) + ok = array.delete_eg(eg["id"]) if ok: - module.log(msg=f'Export group {eg_name} deleted.') + module.log(msg=f"Export group {eg_name} deleted.") changed = True else: raise Exception except Exception: - module.fail_json(msg=f'Export group {eg_name} delete failed.') + module.fail_json(msg=f"Export group {eg_name} delete failed.") module.exit_json(changed=changed) @@ -183,29 +182,27 @@ def main(): arg_spec = argument_spec() arg_spec.update( dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent']), - vg=dict(type='str'), - ig=dict(type='str'), - pg=dict(type='str') + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["present", "absent"]), + vg=dict(type="str"), + ig=dict(type="str"), + pg=dict(type="str"), ) ) - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) + module = AnsibleModule(arg_spec, supports_check_mode=True, required_together=required_together()) - state = module.params['state'] + state = module.params["state"] array = get_array(module) eg = get_eg(module, array) - if state == 'present' and not eg: + if state == "present" and not eg: create_eg(module, array) - elif state == 'absent' and eg: + elif state == "absent" and eg: delete_eg(module, array, eg) else: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vexata_volume.py b/plugins/modules/vexata_volume.py index 5240b9a642d..443dd2af9c6 100644 --- a/plugins/modules/vexata_volume.py +++ b/plugins/modules/vexata_volume.py @@ -73,50 +73,51 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.vexata import ( - argument_spec, get_array, required_together, size_to_MiB) + argument_spec, + get_array, + required_together, + size_to_MiB, +) def get_volume(module, array): """Retrieve a named volume if it exists, None if absent.""" - name = module.params['name'] + name = module.params["name"] try: vols = array.list_volumes() - vol = [v for v in vols if v['name'] == name] + vol = [v for v in vols if v["name"] == name] if len(vol) == 1: return vol[0] else: return None except Exception: - module.fail_json(msg='Error while attempting to retrieve volumes.') + module.fail_json(msg="Error while attempting to retrieve volumes.") def validate_size(module, err_msg): - size = module.params.get('size', False) + size = module.params.get("size", False) if not size: module.fail_json(msg=err_msg) size = size_to_MiB(size) if size <= 0: - module.fail_json(msg='Invalid volume size, must be [MGT].') + module.fail_json(msg="Invalid volume size, must be [MGT].") return size def create_volume(module, array): - """"Create a new volume.""" + """ "Create a new volume.""" changed = False - size = validate_size(module, err_msg='Size is required to create volume.') + size = validate_size(module, err_msg="Size is required to create volume.") if module.check_mode: module.exit_json(changed=changed) try: - vol = array.create_volume( - module.params['name'], - 'Ansible volume', - size) + vol = array.create_volume(module.params["name"], "Ansible volume", size) if vol: module.log(msg=f"Created volume {vol['id']}") changed = True else: - module.fail_json(msg='Volume create failed.') + module.fail_json(msg="Volume create failed.") except Exception: pass module.exit_json(changed=changed) @@ -125,20 +126,15 @@ def create_volume(module, array): def update_volume(module, array, volume): """Expand the volume size.""" changed = False - size = validate_size(module, err_msg='Size is required to update volume') - prev_size = volume['volSize'] + size = validate_size(module, err_msg="Size is required to update volume") + prev_size = volume["volSize"] if size <= prev_size: - module.log(msg='Volume expanded size needs to be larger ' - 'than current size.') + module.log(msg="Volume expanded size needs to be larger than current size.") if module.check_mode: module.exit_json(changed=changed) try: - vol = array.grow_volume( - volume['name'], - volume['description'], - volume['id'], - size) + vol = array.grow_volume(volume["name"], volume["description"], volume["id"], size) if vol: changed = True except Exception: @@ -149,15 +145,14 @@ def update_volume(module, array, volume): def delete_volume(module, array, volume): changed = False - vol_name = volume['name'] + vol_name = volume["name"] if module.check_mode: module.exit_json(changed=changed) try: - ok = array.delete_volume( - volume['id']) + ok = array.delete_volume(volume["id"]) if ok: - module.log(msg=f'Volume {vol_name} deleted.') + module.log(msg=f"Volume {vol_name} deleted.") changed = True else: raise Exception @@ -170,30 +165,28 @@ def main(): arg_spec = argument_spec() arg_spec.update( dict( - name=dict(type='str', required=True), - state=dict(default='present', choices=['present', 'absent']), - size=dict(type='str') + name=dict(type="str", required=True), + state=dict(default="present", choices=["present", "absent"]), + size=dict(type="str"), ) ) - module = AnsibleModule(arg_spec, - supports_check_mode=True, - required_together=required_together()) + module = AnsibleModule(arg_spec, supports_check_mode=True, required_together=required_together()) - state = module.params['state'] + state = module.params["state"] array = get_array(module) volume = get_volume(module, array) - if state == 'present': + if state == "present": if not volume: create_volume(module, array) else: update_volume(module, array, volume) - elif state == 'absent' and volume: + elif state == "absent" and volume: delete_volume(module, array, volume) else: module.exit_json(changed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py index 5c8e7b73c5b..d9a9e3a9872 100644 --- a/plugins/modules/vmadm.py +++ b/plugins/modules/vmadm.py @@ -417,20 +417,21 @@ def get_vm_prop(module, uuid, prop): # Lookup a property for the given VM. # Returns the property, or None if not found. - cmd = [module.vmadm, 'lookup', '-j', '-o', prop, f'uuid={uuid}'] + cmd = [module.vmadm, "lookup", "-j", "-o", prop, f"uuid={uuid}"] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: - module.fail_json( - msg=f'Could not perform lookup of {prop} on {uuid}', exception=stderr) + module.fail_json(msg=f"Could not perform lookup of {prop} on {uuid}", exception=stderr) try: stdout_json = json.loads(stdout) except Exception as e: module.fail_json( - msg=f'Invalid JSON returned by vmadm for uuid lookup of {prop}', - details=to_native(e), exception=traceback.format_exc()) + msg=f"Invalid JSON returned by vmadm for uuid lookup of {prop}", + details=to_native(e), + exception=traceback.format_exc(), + ) if stdout_json: return stdout_json[0].get(prop) @@ -439,13 +440,12 @@ def get_vm_prop(module, uuid, prop): def get_vm_uuid(module, alias): # Lookup the uuid that goes with the given alias. # Returns the uuid or '' if not found. - cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid', f'alias={alias}'] + cmd = [module.vmadm, "lookup", "-j", "-o", "uuid", f"alias={alias}"] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: - module.fail_json( - msg=f'Could not retrieve UUID of {alias}', exception=stderr) + module.fail_json(msg=f"Could not retrieve UUID of {alias}", exception=stderr) # If no VM was found matching the given alias, we get back an empty array. # That is not an error condition as we might be explicitly checking for its @@ -454,28 +454,29 @@ def get_vm_uuid(module, alias): stdout_json = json.loads(stdout) except Exception as e: module.fail_json( - msg=f'Invalid JSON returned by vmadm for uuid lookup of {alias}', - details=to_native(e), exception=traceback.format_exc()) + msg=f"Invalid JSON returned by vmadm for uuid lookup of {alias}", + details=to_native(e), + exception=traceback.format_exc(), + ) if stdout_json: - return stdout_json[0].get('uuid') + return stdout_json[0].get("uuid") def get_all_vm_uuids(module): # Retrieve the UUIDs for all VMs. - cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid'] + cmd = [module.vmadm, "lookup", "-j", "-o", "uuid"] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: - module.fail_json(msg='Failed to get VMs list', exception=stderr) + module.fail_json(msg="Failed to get VMs list", exception=stderr) try: stdout_json = json.loads(stdout) - return [v['uuid'] for v in stdout_json] + return [v["uuid"] for v in stdout_json] except Exception as e: - module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e), - exception=traceback.format_exc()) + module.fail_json(msg="Could not retrieve VM UUIDs", details=to_native(e), exception=traceback.format_exc()) def new_vm(module, uuid, vm_state): @@ -485,38 +486,39 @@ def new_vm(module, uuid, vm_state): if rc != 0: changed = False - module.fail_json(msg='Could not create VM', exception=stderr) + module.fail_json(msg="Could not create VM", exception=stderr) else: changed = True # 'vmadm create' returns all output to stderr... - match = re.match('Successfully created VM (.*)', stderr) + match = re.match("Successfully created VM (.*)", stderr) if match: vm_uuid = match.groups()[0] if not is_valid_uuid(vm_uuid): - module.fail_json(msg=f'Invalid UUID for VM {vm_uuid}?') + module.fail_json(msg=f"Invalid UUID for VM {vm_uuid}?") else: - module.fail_json(msg='Could not retrieve UUID of newly created(?) VM') + module.fail_json(msg="Could not retrieve UUID of newly created(?) VM") # Now that the VM is created, ensure it is in the desired state (if not 'running') - if vm_state != 'running': + if vm_state != "running": ret = set_vm_state(module, vm_uuid, vm_state) if not ret: - module.fail_json(msg=f'Could not set VM {vm_uuid} to state {vm_state}') + module.fail_json(msg=f"Could not set VM {vm_uuid} to state {vm_state}") try: os.unlink(payload_file) except Exception as e: # Since the payload may contain sensitive information, fail hard # if we cannot remove the file so the operator knows about it. - module.fail_json(msg=f'Could not remove temporary JSON payload file {payload_file}: {e}', - exception=traceback.format_exc()) + module.fail_json( + msg=f"Could not remove temporary JSON payload file {payload_file}: {e}", exception=traceback.format_exc() + ) return changed, vm_uuid def vmadm_create_vm(module, payload_file): # Create a new VM using the provided payload. - cmd = [module.vmadm, 'create', '-f', payload_file] + cmd = [module.vmadm, "create", "-f", payload_file] return module.run_command(cmd) @@ -525,27 +527,27 @@ def set_vm_state(module, vm_uuid, vm_state): p = module.params # Check if the VM is already in the desired state. - state = get_vm_prop(module, vm_uuid, 'state') + state = get_vm_prop(module, vm_uuid, "state") if state and (state == vm_state): return None # Lookup table for the state to be in, and which command to use for that. # vm_state: [vmadm commandm, forceable?] cmds = { - 'stopped': ['stop', True], - 'running': ['start', False], - 'deleted': ['delete', True], - 'rebooted': ['reboot', False] + "stopped": ["stop", True], + "running": ["start", False], + "deleted": ["delete", True], + "rebooted": ["reboot", False], } command, forceable = cmds[vm_state] - force = ['-F'] if p['force'] and forceable else [] + force = ["-F"] if p["force"] and forceable else [] cmd = [module.vmadm, command] + force + [vm_uuid] (dummy, dummy, stderr) = module.run_command(cmd) - match = re.match('^Successfully.*', stderr) + match = re.match("^Successfully.*", stderr) return match is not None @@ -553,18 +555,13 @@ def create_payload(module, uuid): # Create the JSON payload (vmdef) and return the filename. # Filter out the few options that are not valid VM properties. - module_options = ['force', 'state'] - vmdef = { - k: v - for k, v in module.params.items() - if k not in module_options and v - } + module_options = ["force", "state"] + vmdef = {k: v for k, v in module.params.items() if k not in module_options and v} try: vmdef_json = json.dumps(vmdef) except Exception as e: - module.fail_json( - msg='Could not create valid JSON payload', exception=traceback.format_exc()) + module.fail_json(msg="Could not create valid JSON payload", exception=traceback.format_exc()) # Create the temporary file that contains our payload, and set tight # permissions for it may container sensitive information. @@ -574,10 +571,10 @@ def create_payload(module, uuid): # the payload (thus removing the `save_payload` option). fname = tempfile.mkstemp()[1] os.chmod(fname, 0o400) - with open(fname, 'w') as fh: + with open(fname, "w") as fh: fh.write(vmdef_json) except Exception as e: - module.fail_json(msg=f'Could not save JSON payload: {e}', exception=traceback.format_exc()) + module.fail_json(msg=f"Could not save JSON payload: {e}", exception=traceback.format_exc()) return fname @@ -591,18 +588,18 @@ def vm_state_transition(module, uuid, vm_state): elif ret: return True else: - module.fail_json(msg=f'Failed to set VM {uuid} to state {vm_state}') + module.fail_json(msg=f"Failed to set VM {uuid} to state {vm_state}") def is_valid_uuid(uuid): - return re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE) is not None + return re.match("^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$", uuid, re.IGNORECASE) is not None def validate_uuids(module): failed = [ name - for name, pvalue in [(x, module.params[x]) for x in ['uuid', 'image_uuid']] - if pvalue and pvalue != '*' and not is_valid_uuid(pvalue) + for name, pvalue in [(x, module.params[x]) for x in ["uuid", "image_uuid"]] + if pvalue and pvalue != "*" and not is_valid_uuid(pvalue) ] if failed: @@ -612,9 +609,9 @@ def validate_uuids(module): def manage_all_vms(module, vm_state): # Handle operations for all VMs, which can by definition only # be state transitions. - state = module.params['state'] + state = module.params["state"] - if state == 'created': + if state == "created": module.fail_json(msg='State "created" is only valid for tasks with a single VM') # If any of the VMs has a change, the task as a whole has a change. @@ -622,12 +619,12 @@ def manage_all_vms(module, vm_state): # First get all VM uuids and for each check their state, and adjust it if needed. for uuid in get_all_vm_uuids(module): - current_vm_state = get_vm_prop(module, uuid, 'state') - if not current_vm_state and vm_state == 'deleted': + current_vm_state = get_vm_prop(module, uuid, "state") + if not current_vm_state and vm_state == "deleted": any_changed = False else: if module.check_mode: - if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): + if (not current_vm_state) or (get_vm_prop(module, uuid, "state") != state): any_changed = True else: any_changed = vm_state_transition(module, uuid, vm_state) or any_changed @@ -641,57 +638,81 @@ def main(): # Dict of all options that are simple to define based on their type. # They're not required and have a default of None. properties = { - 'str': [ - 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', - 'image_uuid', 'internal_metadata_namespace', 'kernel_version', - 'limit_priv', 'nic_driver', 'owner_uuid', 'qemu_opts', - 'qemu_extra_opts', 'spice_opts', 'uuid', 'vga', - 'zfs_data_compression', 'zfs_root_compression', 'zpool' + "str": [ + "boot", + "disk_driver", + "dns_domain", + "fs_allowed", + "hostname", + "image_uuid", + "internal_metadata_namespace", + "kernel_version", + "limit_priv", + "nic_driver", + "owner_uuid", + "qemu_opts", + "qemu_extra_opts", + "spice_opts", + "uuid", + "vga", + "zfs_data_compression", + "zfs_root_compression", + "zpool", ], - 'bool': [ - 'archive_on_delete', 'autoboot', 'delegate_dataset', - 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', - 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' + "bool": [ + "archive_on_delete", + "autoboot", + "delegate_dataset", + "docker", + "firewall_enabled", + "force", + "indestructible_delegated", + "indestructible_zoneroot", + "maintain_resolvers", + "nowait", ], - 'int': [ - 'cpu_cap', 'cpu_shares', 'flexible_disk_size', - 'max_locked_memory', 'max_lwps', 'max_physical_memory', - 'max_swap', 'mdata_exec_timeout', 'quota', 'ram', - 'tmpfs', 'vcpus', 'virtio_txburst', 'virtio_txtimer', - 'vnc_port', 'zfs_data_recsize', 'zfs_filesystem_limit', - 'zfs_io_priority', 'zfs_root_recsize', 'zfs_snapshot_limit' + "int": [ + "cpu_cap", + "cpu_shares", + "flexible_disk_size", + "max_locked_memory", + "max_lwps", + "max_physical_memory", + "max_swap", + "mdata_exec_timeout", + "quota", + "ram", + "tmpfs", + "vcpus", + "virtio_txburst", + "virtio_txtimer", + "vnc_port", + "zfs_data_recsize", + "zfs_filesystem_limit", + "zfs_io_priority", + "zfs_root_recsize", + "zfs_snapshot_limit", ], - 'dict': ['customer_metadata', 'internal_metadata', 'routes'], + "dict": ["customer_metadata", "internal_metadata", "routes"], } # Start with the options that are not as trivial as those above. options = dict( state=dict( - default='running', - type='str', - choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] - ), - name=dict( - type='str', - aliases=['alias'] - ), - brand=dict( - default='joyent', - type='str', - choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve'] - ), - cpu_type=dict( - default='qemu64', - type='str', - choices=['host', 'qemu64'] + default="running", + type="str", + choices=["present", "running", "absent", "deleted", "stopped", "created", "restarted", "rebooted"], ), + name=dict(type="str", aliases=["alias"]), + brand=dict(default="joyent", type="str", choices=["joyent", "joyent-minimal", "lx", "kvm", "bhyve"]), + cpu_type=dict(default="qemu64", type="str", choices=["host", "qemu64"]), # Regular strings, however these require additional options. - spice_password=dict(type='str', no_log=True), - vnc_password=dict(type='str', no_log=True), - disks=dict(type='list', elements='dict'), - nics=dict(type='list', elements='dict'), - resolvers=dict(type='list', elements='str'), - filesystems=dict(type='list', elements='dict'), + spice_password=dict(type="str", no_log=True), + vnc_password=dict(type="str", no_log=True), + disks=dict(type="list", elements="dict"), + nics=dict(type="list", elements="dict"), + resolvers=dict(type="list", elements="str"), + filesystems=dict(type="list", elements="dict"), ) # Add our 'simple' options to options dict. @@ -700,52 +721,48 @@ def main(): option = dict(type=type) options[p] = option - module = AnsibleModule( - argument_spec=options, - supports_check_mode=True, - required_one_of=[['name', 'uuid']] - ) + module = AnsibleModule(argument_spec=options, supports_check_mode=True, required_one_of=[["name", "uuid"]]) - module.vmadm = module.get_bin_path('vmadm', required=True) + module.vmadm = module.get_bin_path("vmadm", required=True) p = module.params - uuid = p['uuid'] - state = p['state'] + uuid = p["uuid"] + state = p["state"] # Translate the state parameter into something we can use later on. - if state in ['present', 'running']: - vm_state = 'running' - elif state in ['stopped', 'created']: - vm_state = 'stopped' - elif state in ['absent', 'deleted']: - vm_state = 'deleted' - elif state in ['restarted', 'rebooted']: - vm_state = 'rebooted' + if state in ["present", "running"]: + vm_state = "running" + elif state in ["stopped", "created"]: + vm_state = "stopped" + elif state in ["absent", "deleted"]: + vm_state = "deleted" + elif state in ["restarted", "rebooted"]: + vm_state = "rebooted" - result = {'state': state} + result = {"state": state} # While it is possible to refer to a given VM by its `alias`, it is easier # to operate on VMs by their UUID. So if we're not given a `uuid`, look # it up. if not uuid: - uuid = get_vm_uuid(module, p['name']) + uuid = get_vm_uuid(module, p["name"]) # Bit of a chicken and egg problem here for VMs with state == deleted. # If they're going to be removed in this play, we have to lookup the # uuid. If they're already deleted there's nothing to lookup. # So if state == deleted and get_vm_uuid() returned '', the VM is already # deleted and there's nothing else to do. - if uuid is None and vm_state == 'deleted': - result['name'] = p['name'] + if uuid is None and vm_state == "deleted": + result["name"] = p["name"] module.exit_json(**result) validate_uuids(module) - if p['name']: - result['name'] = p['name'] - result['uuid'] = uuid + if p["name"]: + result["name"] = p["name"] + result["uuid"] = uuid - if uuid == '*': - result['changed'] = manage_all_vms(module, vm_state) + if uuid == "*": + result["changed"] = manage_all_vms(module, vm_state) module.exit_json(**result) # The general flow is as follows: @@ -763,24 +780,24 @@ def main(): # Managing VM snapshots should be part of a standalone module. # First obtain the VM state to determine what needs to be done with it. - current_vm_state = get_vm_prop(module, uuid, 'state') + current_vm_state = get_vm_prop(module, uuid, "state") # First handle the case where the VM should be deleted and is not present. - if not current_vm_state and vm_state == 'deleted': - result['changed'] = False + if not current_vm_state and vm_state == "deleted": + result["changed"] = False elif module.check_mode: # Shortcut for check mode, if there is no VM yet, it will need to be created. # Or, if the VM is not in the desired state yet, it needs to transition. - result['changed'] = (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state) + result["changed"] = (not current_vm_state) or (get_vm_prop(module, uuid, "state") != state) elif not current_vm_state: # No VM was found that matched the given ID (alias or uuid), so we create it. - result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) + result["changed"], result["uuid"] = new_vm(module, uuid, vm_state) else: # VM was found, operate on its state directly. - result['changed'] = vm_state_transition(module, uuid, vm_state) + result["changed"] = vm_state_transition(module, uuid, vm_state) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wakeonlan.py b/plugins/modules/wakeonlan.py index 3d9c401571d..d6265e3984f 100644 --- a/plugins/modules/wakeonlan.py +++ b/plugins/modules/wakeonlan.py @@ -74,13 +74,13 @@ def wakeonlan(module, mac, broadcast, port): - """ Send a magic Wake-on-LAN packet. """ + """Send a magic Wake-on-LAN packet.""" mac_orig = mac # Remove possible separator from MAC address if len(mac) == 12 + 5: - mac = mac.replace(mac[2], '') + mac = mac.replace(mac[2], "") # If we don't end up with 12 hexadecimal characters, fail if len(mac) != 12: @@ -93,17 +93,16 @@ def wakeonlan(module, mac, broadcast, port): module.fail_json(msg=f"Incorrect MAC address format: {mac_orig}") # Create payload for magic packet - data = b'' + data = b"" padding = f"FFFFFFFFFFFF{mac * 20}" for i in range(0, len(padding), 2): - data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) + data = b"".join([data, struct.pack("B", int(padding[i : i + 2], 16))]) # Broadcast payload to network sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) if not module.check_mode: - try: sock.sendto(data, (broadcast, port)) except socket.error as e: @@ -116,21 +115,21 @@ def wakeonlan(module, mac, broadcast, port): def main(): module = AnsibleModule( argument_spec=dict( - mac=dict(type='str', required=True), - broadcast=dict(type='str', default='255.255.255.255'), - port=dict(type='int', default=7), + mac=dict(type="str", required=True), + broadcast=dict(type="str", default="255.255.255.255"), + port=dict(type="int", default=7), ), supports_check_mode=True, ) - mac = module.params['mac'] - broadcast = module.params['broadcast'] - port = module.params['port'] + mac = module.params["mac"] + broadcast = module.params["broadcast"] + port = module.params["port"] wakeonlan(module, mac, broadcast, port) module.exit_json(changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/wdc_redfish_command.py b/plugins/modules/wdc_redfish_command.py index 387af7340bf..43d4e490f18 100644 --- a/plugins/modules/wdc_redfish_command.py +++ b/plugins/modules/wdc_redfish_command.py @@ -205,125 +205,106 @@ from ansible.module_utils.common.text.converters import to_native CATEGORY_COMMANDS_ALL = { - "Update": [ - "FWActivate", - "UpdateAndActivate" - ], + "Update": ["FWActivate", "UpdateAndActivate"], "Chassis": [ "IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal", - ] + ], } def main(): argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - ioms=dict(type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), + ioms=dict(type="list", elements="str"), baseuri=dict(), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), + update_creds=dict(type="dict", options=dict(username=dict(), password=dict(no_log=True))), resource_id=dict(), update_image_uri=dict(), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ('baseuri', 'ioms') + ("username", "password"), ], + required_one_of=[("username", "auth_token"), ("baseuri", "ioms")], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=True + supports_check_mode=True, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # Resource to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Build root URI(s) if module.params.get("baseuri") is not None: root_uris = [f"https://{module.params['baseuri']}"] else: - root_uris = [ - f"https://{iom}" for iom in module.params['ioms'] - ] - rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, - resource_id=resource_id, data_modification=True) + root_uris = [f"https://{iom}" for iom in module.params["ioms"]] + rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, resource_id=resource_id, data_modification=True) # Organize by Categories / Commands if category == "Update": # execute only if we find UpdateService resources resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) # update options - update_opts = { - 'update_creds': module.params['update_creds'] - } + update_opts = {"update_creds": module.params["update_creds"]} for command in command_list: if command == "FWActivate": if module.check_mode: - result = { - 'ret': True, - 'changed': True, - 'msg': 'FWActivate not performed in check mode.' - } + result = {"ret": True, "changed": True, "msg": "FWActivate not performed in check mode."} else: result = rf_utils.firmware_activate(update_opts) elif command == "UpdateAndActivate": - update_opts["update_image_uri"] = module.params['update_image_uri'] + update_opts["update_image_uri"] = module.params["update_image_uri"] result = rf_utils.update_and_activate(update_opts) elif category == "Chassis": result = rf_utils._find_chassis_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) led_commands = ["IndicatorLedOn", "IndicatorLedOff"] # Check if more than one led_command is present num_led_commands = sum([command in led_commands for command in command_list]) if num_led_commands > 1: - result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + result = {"ret": False, "msg": "Only one IndicatorLed command should be sent at a time."} else: for command in command_list: if command.startswith("IndicatorLed"): @@ -331,18 +312,20 @@ def main(): elif command.startswith("PowerMode"): result = rf_utils.manage_chassis_power_mode(command) - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) else: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) - module.exit_json(changed=changed, - session=session, - msg='Action was successful' if not module.check_mode else result.get( - 'msg', "No action performed in check mode." - )) - - -if __name__ == '__main__': + del result["ret"] + changed = result.get("changed", True) + session = result.get("session", dict()) + module.exit_json( + changed=changed, + session=session, + msg="Action was successful" + if not module.check_mode + else result.get("msg", "No action performed in check mode."), + ) + + +if __name__ == "__main__": main() diff --git a/plugins/modules/wdc_redfish_info.py b/plugins/modules/wdc_redfish_info.py index 0f5313fa571..64606c8cf5c 100644 --- a/plugins/modules/wdc_redfish_info.py +++ b/plugins/modules/wdc_redfish_info.py @@ -125,89 +125,81 @@ from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC -CATEGORY_COMMANDS_ALL = { - "Update": ["SimpleUpdateStatus"] -} +CATEGORY_COMMANDS_ALL = {"Update": ["SimpleUpdateStatus"]} def main(): result = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - ioms=dict(type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), + ioms=dict(type="list", elements="str"), baseuri=dict(), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) + timeout=dict(type="int", default=10), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ('baseuri', 'ioms') + ("username", "password"), ], + required_one_of=[("username", "auth_token"), ("baseuri", "ioms")], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=True + supports_check_mode=True, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {sorted(CATEGORY_COMMANDS_ALL.keys())}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Build root URI(s) if module.params.get("baseuri") is not None: root_uris = [f"https://{module.params['baseuri']}"] else: - root_uris = [ - f"https://{iom}" for iom in module.params['ioms'] - ] - rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, - resource_id=None, - data_modification=False - ) + root_uris = [f"https://{iom}" for iom in module.params["ioms"]] + rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, resource_id=None, data_modification=False) # Organize by Categories / Commands if category == "Update": # execute only if we find UpdateService resources resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) + if resource["ret"] is False: + module.fail_json(msg=resource["msg"]) for command in command_list: if command == "SimpleUpdateStatus": simple_update_status_result = rf_utils.get_simple_update_status() - if simple_update_status_result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if simple_update_status_result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) else: - del simple_update_status_result['ret'] + del simple_update_status_result["ret"] result["simple_update_status"] = simple_update_status_result module.exit_json(changed=False, redfish_facts=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xattr.py b/plugins/modules/xattr.py index 18cc4bb0ad1..b8843fb2ea1 100644 --- a/plugins/modules/xattr.py +++ b/plugins/modules/xattr.py @@ -99,24 +99,24 @@ def get_xattr_keys(module, path, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] + cmd = [module.get_bin_path("getfattr", True), "--absolute-names"] if not follow: - cmd.append('-h') + cmd.append("-h") cmd.append(path) return _run_xattr(module, cmd) def get_xattr(module, path, key, follow): - cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] + cmd = [module.get_bin_path("getfattr", True), "--absolute-names"] if not follow: - cmd.append('-h') + cmd.append("-h") if key is None: - cmd.append('-d') + cmd.append("-d") else: - cmd.append('-n') + cmd.append("-n") cmd.append(key) cmd.append(path) @@ -124,13 +124,12 @@ def get_xattr(module, path, key, follow): def set_xattr(module, path, key, value, follow): - - cmd = [module.get_bin_path('setfattr', True)] + cmd = [module.get_bin_path("setfattr", True)] if not follow: - cmd.append('-h') - cmd.append('-n') + cmd.append("-h") + cmd.append("-n") cmd.append(key) - cmd.append('-v') + cmd.append("-v") cmd.append(value) cmd.append(path) @@ -138,11 +137,10 @@ def set_xattr(module, path, key, value, follow): def rm_xattr(module, path, key, follow): - - cmd = [module.get_bin_path('setfattr', True)] + cmd = [module.get_bin_path("setfattr", True)] if not follow: - cmd.append('-h') - cmd.append('-x') + cmd.append("-h") + cmd.append("-x") cmd.append(key) cmd.append(path) @@ -150,7 +148,6 @@ def rm_xattr(module, path, key, follow): def _run_xattr(module, cmd, check_rc=True): - try: (rc, out, err) = module.run_command(cmd, check_rc=check_rc) except Exception as e: @@ -159,34 +156,34 @@ def _run_xattr(module, cmd, check_rc=True): # result = {'raw': out} result = {} for line in out.splitlines(): - if line.startswith('#') or line == '': + if line.startswith("#") or line == "": pass - elif '=' in line: - (key, val) = line.split('=', 1) + elif "=" in line: + (key, val) = line.split("=", 1) result[key] = val.strip('"') else: - result[line] = '' + result[line] = "" return result def main(): module = AnsibleModule( argument_spec=dict( - path=dict(type='path', required=True, aliases=['name']), - namespace=dict(type='str', default='user'), - key=dict(type='str', no_log=False), - value=dict(type='str'), - state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), - follow=dict(type='bool', default=True), + path=dict(type="path", required=True, aliases=["name"]), + namespace=dict(type="str", default="user"), + key=dict(type="str", no_log=False), + value=dict(type="str"), + state=dict(type="str", default="read", choices=["absent", "all", "keys", "present", "read"]), + follow=dict(type="bool", default=True), ), supports_check_mode=True, ) - path = module.params.get('path') - namespace = module.params.get('namespace') - key = module.params.get('key') - value = module.params.get('value') - state = module.params.get('state') - follow = module.params.get('follow') + path = module.params.get("path") + namespace = module.params.get("namespace") + key = module.params.get("key") + value = module.params.get("value") + state = module.params.get("state") + follow = module.params.get("follow") if not os.path.exists(path): module.fail_json(msg="path not found or not accessible!") @@ -195,18 +192,19 @@ def main(): msg = "" res = {} - if key is None and state in ['absent', 'present']: + if key is None and state in ["absent", "present"]: module.fail_json(msg=f"{state} needs a key parameter") # Prepend the key with the namespace if defined if ( - key is not None and - namespace is not None and - len(namespace) > 0 and - not (namespace == 'user' and key.startswith('user.'))): - key = f'{namespace}.{key}' - - if state == 'present' or value is not None: + key is not None + and namespace is not None + and len(namespace) > 0 + and not (namespace == "user" and key.startswith("user.")) + ): + key = f"{namespace}.{key}" + + if state == "present" or value is not None: current = get_xattr(module, path, key, follow) if current is None or key not in current or value != current[key]: if not module.check_mode: @@ -214,7 +212,7 @@ def main(): changed = True res = current msg = f"{key} set to {value}" - elif state == 'absent': + elif state == "absent": current = get_xattr(module, path, key, follow) if current is not None and key in current: if not module.check_mode: @@ -222,10 +220,10 @@ def main(): changed = True res = current msg = f"{key} removed" - elif state == 'keys': + elif state == "keys": res = get_xattr_keys(module, path, follow) msg = "returning all keys" - elif state == 'all': + elif state == "all": res = get_xattr(module, path, None, follow) msg = "dumping all" else: @@ -235,5 +233,5 @@ def main(): module.exit_json(changed=changed, msg=msg, xattr=res) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xbps.py b/plugins/modules/xbps.py index c66ce1b4760..ab76bbf326f 100644 --- a/plugins/modules/xbps.py +++ b/plugins/modules/xbps.py @@ -183,14 +183,14 @@ def append_flags(module, xbps_path, cmd, skip_repo=False): def query_package(module, xbps_path, name, state="present"): """Returns Package info""" if state == "present": - lcmd = [xbps_path['query'], name] + lcmd = [xbps_path["query"], name] lcmd = append_flags(module, xbps_path, lcmd, skip_repo=True) lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) if not is_installed(lstdout): # package is not installed locally return False, False - rcmd = [xbps_path['install'], "-Sun"] + rcmd = [xbps_path["install"], "-Sun"] rcmd = append_flags(module, xbps_path, rcmd) rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) if rrc == 0 or rrc == 17: @@ -204,9 +204,9 @@ def query_package(module, xbps_path, name, state="present"): def update_package_db(module, xbps_path): """Returns True if update_package_db changed""" - cmd = [xbps_path['install'], "-S"] + cmd = [xbps_path["install"], "-S"] cmd = append_flags(module, xbps_path, cmd) - if module.params['accept_pubkey']: + if module.params["accept_pubkey"]: stdin = "y\n" else: stdin = "n\n" @@ -223,34 +223,34 @@ def update_package_db(module, xbps_path): def upgrade_xbps(module, xbps_path, exit_on_success=False): - cmdupgradexbps = [xbps_path['install'], "-uy", "xbps"] + cmdupgradexbps = [xbps_path["install"], "-uy", "xbps"] cmdupgradexbps = append_flags(module, xbps_path, cmdupgradexbps) rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False) if rc != 0: - module.fail_json(msg='Could not upgrade xbps itself') + module.fail_json(msg="Could not upgrade xbps itself") def upgrade(module, xbps_path): """Returns true is full upgrade succeeds""" - cmdupgrade = [xbps_path['install'], "-uy"] - cmdneedupgrade = [xbps_path['install'], "-un"] + cmdupgrade = [xbps_path["install"], "-uy"] + cmdneedupgrade = [xbps_path["install"], "-un"] cmdupgrade = append_flags(module, xbps_path, cmdupgrade) cmdneedupgrade = append_flags(module, xbps_path, cmdneedupgrade) rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) if rc == 0: if len(stdout.splitlines()) == 0: - module.exit_json(changed=False, msg='Nothing to upgrade') + module.exit_json(changed=False, msg="Nothing to upgrade") elif module.check_mode: - module.exit_json(changed=True, msg='Would have performed upgrade') + module.exit_json(changed=True, msg="Would have performed upgrade") else: rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) if rc == 0: - module.exit_json(changed=True, msg='System upgraded') - elif rc == 16 and module.params['upgrade_xbps']: + module.exit_json(changed=True, msg="System upgraded") + elif rc == 16 and module.params["upgrade_xbps"]: upgrade_xbps(module, xbps_path) # avoid loops by not trying self-upgrade again - module.params['upgrade_xbps'] = False + module.params["upgrade_xbps"] = False upgrade(module, xbps_path) else: module.fail_json(msg="Could not upgrade") @@ -268,7 +268,7 @@ def remove_packages(module, xbps_path, packages): if not installed: continue - cmd = [xbps_path['remove'], "-y", package] + cmd = [xbps_path["remove"], "-y", package] cmd = append_flags(module, xbps_path, cmd, skip_repo=True) rc, stdout, stderr = module.run_command(cmd, check_rc=False) @@ -278,7 +278,6 @@ def remove_packages(module, xbps_path, packages): changed_packages.append(package) if len(changed_packages) > 0: - module.exit_json(changed=True, msg=f"removed {len(changed_packages)} package(s)", packages=changed_packages) module.exit_json(changed=False, msg="package(s) already absent") @@ -291,8 +290,7 @@ def install_packages(module, xbps_path, state, packages): """If the package is installed and state == present or state == latest and is up-to-date then skip""" installed, updated = query_package(module, xbps_path, package) - if installed and (state == 'present' or - (state == 'latest' and updated)): + if installed and (state == "present" or (state == "latest" and updated)): continue toInstall.append(package) @@ -300,21 +298,19 @@ def install_packages(module, xbps_path, state, packages): if len(toInstall) == 0: module.exit_json(changed=False, msg="Nothing to Install") - cmd = [xbps_path['install'], "-y"] + toInstall + cmd = [xbps_path["install"], "-y"] + toInstall cmd = append_flags(module, xbps_path, cmd) rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 16 and module.params['upgrade_xbps']: + if rc == 16 and module.params["upgrade_xbps"]: upgrade_xbps(module, xbps_path) # avoid loops by not trying self-update again - module.params['upgrade_xbps'] = False + module.params["upgrade_xbps"] = False install_packages(module, xbps_path, state, packages) - elif rc != 0 and not (state == 'latest' and rc == 17): - module.fail_json(msg=f"failed to install {len(toInstall)} packages(s)", - packages=toInstall) + elif rc != 0 and not (state == "latest" and rc == 17): + module.fail_json(msg=f"failed to install {len(toInstall)} packages(s)", packages=toInstall) - module.exit_json(changed=True, msg=f"installed {len(toInstall)} package(s)", - packages=toInstall) + module.exit_json(changed=True, msg=f"installed {len(toInstall)} package(s)", packages=toInstall) def check_packages(module, xbps_path, packages, state): @@ -322,17 +318,20 @@ def check_packages(module, xbps_path, packages, state): would_be_changed = [] for package in packages: installed, updated = query_package(module, xbps_path, package) - if ((state in ["present", "latest"] and not installed) or - (state == "absent" and installed) or - (state == "latest" and not updated)): + if ( + (state in ["present", "latest"] and not installed) + or (state == "absent" and installed) + or (state == "latest" and not updated) + ): would_be_changed.append(package) if would_be_changed: if state == "absent": state = "removed" - module.exit_json(changed=True, msg=f"{len(would_be_changed)} package(s) would be {state}", packages=would_be_changed) + module.exit_json( + changed=True, msg=f"{len(would_be_changed)} package(s) would be {state}", packages=would_be_changed + ) else: - module.exit_json(changed=False, msg=f"package(s) already {state}", - packages=[]) + module.exit_json(changed=False, msg=f"package(s) already {state}", packages=[]) def update_cache(module, xbps_path, upgrade_planned): @@ -340,15 +339,12 @@ def update_cache(module, xbps_path, upgrade_planned): if module.check_mode: if upgrade_planned: return - module.exit_json( - changed=True, msg='Would have updated the package cache' - ) + module.exit_json(changed=True, msg="Would have updated the package cache") changed = update_package_db(module, xbps_path) if not upgrade_planned: - module.exit_json(changed=changed, msg=( - 'Updated the package master lists' if changed - else 'Package list already up to date' - )) + module.exit_json( + changed=changed, msg=("Updated the package master lists" if changed else "Package list already up to date") + ) def main(): @@ -356,52 +352,51 @@ def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['pkg', 'package'], type='list', elements='str'), - state=dict(default='present', choices=['present', 'installed', - 'latest', 'absent', - 'removed']), - recurse=dict(default=False, type='bool'), - upgrade=dict(default=False, type='bool'), - update_cache=dict(default=True, type='bool'), - upgrade_xbps=dict(default=True, type='bool'), - root=dict(type='path'), - repositories=dict(type='list', elements='str'), - accept_pubkey=dict(default=False, type='bool') + name=dict(aliases=["pkg", "package"], type="list", elements="str"), + state=dict(default="present", choices=["present", "installed", "latest", "absent", "removed"]), + recurse=dict(default=False, type="bool"), + upgrade=dict(default=False, type="bool"), + update_cache=dict(default=True, type="bool"), + upgrade_xbps=dict(default=True, type="bool"), + root=dict(type="path"), + repositories=dict(type="list", elements="str"), + accept_pubkey=dict(default=False, type="bool"), ), - required_one_of=[['name', 'update_cache', 'upgrade']], - supports_check_mode=True) + required_one_of=[["name", "update_cache", "upgrade"]], + supports_check_mode=True, + ) xbps_path = dict() - xbps_path['install'] = module.get_bin_path('xbps-install', True) - xbps_path['query'] = module.get_bin_path('xbps-query', True) - xbps_path['remove'] = module.get_bin_path('xbps-remove', True) + xbps_path["install"] = module.get_bin_path("xbps-install", True) + xbps_path["query"] = module.get_bin_path("xbps-query", True) + xbps_path["remove"] = module.get_bin_path("xbps-remove", True) - if not os.path.exists(xbps_path['install']): + if not os.path.exists(xbps_path["install"]): module.fail_json(msg=f"cannot find xbps, in path {xbps_path['install']}") p = module.params # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' + if p["state"] in ["present", "installed"]: + p["state"] = "present" + elif p["state"] in ["absent", "removed"]: + p["state"] = "absent" - if p['update_cache']: - update_cache(module, xbps_path, (p['name'] or p['upgrade'])) + if p["update_cache"]: + update_cache(module, xbps_path, (p["name"] or p["upgrade"])) - if p['upgrade']: + if p["upgrade"]: upgrade(module, xbps_path) - if p['name']: - pkgs = p['name'] + if p["name"]: + pkgs = p["name"] if module.check_mode: - check_packages(module, xbps_path, pkgs, p['state']) + check_packages(module, xbps_path, pkgs, p["state"]) - if p['state'] in ['present', 'latest']: - install_packages(module, xbps_path, p['state'], pkgs) - elif p['state'] == 'absent': + if p["state"] in ["present", "latest"]: + install_packages(module, xbps_path, p["state"], pkgs) + elif p["state"] == "absent": remove_packages(module, xbps_path, pkgs) diff --git a/plugins/modules/xcc_redfish_command.py b/plugins/modules/xcc_redfish_command.py index aa20d5f6a54..ee09b4f3ebd 100644 --- a/plugins/modules/xcc_redfish_command.py +++ b/plugins/modules/xcc_redfish_command.py @@ -303,376 +303,368 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible_collections.community.general.plugins.module_utils.redfish_utils import ( + RedfishUtils, + REDFISH_COMMON_ARGUMENT_SPEC, +) class XCCRedfishUtils(RedfishUtils): @staticmethod - def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): + def _find_empty_virt_media_slot(resources, media_types, media_match_strict=True): for uri, data in resources.items(): # check MediaTypes - if 'MediaTypes' in data and media_types: - if not set(media_types).intersection(set(data['MediaTypes'])): + if "MediaTypes" in data and media_types: + if not set(media_types).intersection(set(data["MediaTypes"])): continue else: if media_match_strict: continue - if 'RDOC' in uri: + if "RDOC" in uri: continue - if 'Remote' in uri: + if "Remote" in uri: continue # if ejected, 'Inserted' should be False and 'ImageName' cleared - if (not data.get('Inserted', False) and - not data.get('ImageName')): + if not data.get("Inserted", False) and not data.get("ImageName"): return uri, data return None, None def virtual_media_eject_one(self, image_url): # read the VirtualMedia resources from systems response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: + data = response["data"] + if "VirtualMedia" not in data: # read the VirtualMedia resources from manager response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} + data = response["data"] + if "VirtualMedia" not in data: + return {"ret": False, "msg": "VirtualMedia resource not found"} virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] virt_media_list = [] - for member in data['Members']: - virt_media_list.append(member['@odata.id']) + for member in data["Members"]: + virt_media_list.append(member["@odata.id"]) resources, headers = self._read_virt_media_resources(virt_media_list) # find the VirtualMedia resource to eject uri, data, eject = self._find_virt_media_to_eject(resources, image_url) if uri and eject: - if ('Actions' not in data or - '#VirtualMedia.EjectMedia' not in data['Actions']): + if "Actions" not in data or "#VirtualMedia.EjectMedia" not in data["Actions"]: # try to eject via PATCH if no EjectMedia action found h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: + if "allow" in h: + methods = [m.strip() for m in h.get("allow").split(",")] + if "PATCH" not in methods: # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "#VirtualMedia.EjectMedia action not found and PATCH not allowed"} + return {"ret": False, "msg": "#VirtualMedia.EjectMedia action not found and PATCH not allowed"} return self.virtual_media_eject_via_patch(uri) else: # POST to the EjectMedia Action - action = data['Actions']['#VirtualMedia.EjectMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI property missing from Action " - "#VirtualMedia.EjectMedia"} - action_uri = action['target'] + action = data["Actions"]["#VirtualMedia.EjectMedia"] + if "target" not in action: + return {"ret": False, "msg": "target URI property missing from Action #VirtualMedia.EjectMedia"} + action_uri = action["target"] # empty payload for Eject action payload = {} # POST to action - response = self.post_request(self.root_uri + action_uri, - payload) - if response['ret'] is False: + response = self.post_request(self.root_uri + action_uri, payload) + if response["ret"] is False: return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} + return {"ret": True, "changed": True, "msg": "VirtualMedia ejected"} elif uri and not eject: # already ejected: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': f"VirtualMedia image '{image_url}' already ejected"} + return {"ret": True, "changed": False, "msg": f"VirtualMedia image '{image_url}' already ejected"} else: # return failure (no resources matching image_url found) - return {'ret': False, 'changed': False, - 'msg': f"No VirtualMedia resource found with image '{image_url}' inserted"} + return { + "ret": False, + "changed": False, + "msg": f"No VirtualMedia resource found with image '{image_url}' inserted", + } def virtual_media_eject(self, options): if options: - image_url = options.get('image_url') + image_url = options.get("image_url") if image_url: # eject specified one media return self.virtual_media_eject_one(image_url) # eject all inserted media when no image_url specified # read the VirtualMedia resources from systems response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: + data = response["data"] + if "VirtualMedia" not in data: # read the VirtualMedia resources from manager response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} + data = response["data"] + if "VirtualMedia" not in data: + return {"ret": False, "msg": "VirtualMedia resource not found"} # read all the VirtualMedia resources virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] virt_media_list = [] - for member in data['Members']: - virt_media_list.append(member['@odata.id']) + for member in data["Members"]: + virt_media_list.append(member["@odata.id"]) resources, headers = self._read_virt_media_resources(virt_media_list) # eject all inserted media one by one ejected_media_list = [] for uri, data in resources.items(): - if data.get('Image') and data.get('Inserted', True): - returndict = self.virtual_media_eject_one(data.get('Image')) - if not returndict['ret']: + if data.get("Image") and data.get("Inserted", True): + returndict = self.virtual_media_eject_one(data.get("Image")) + if not returndict["ret"]: return returndict - ejected_media_list.append(data.get('Image')) + ejected_media_list.append(data.get("Image")) if len(ejected_media_list) == 0: # no media inserted: return success but changed=False - return {'ret': True, 'changed': False, - 'msg': "No VirtualMedia image inserted"} + return {"ret": True, "changed": False, "msg": "No VirtualMedia image inserted"} else: - return {'ret': True, 'changed': True, - 'msg': f"VirtualMedia {ejected_media_list!s} ejected"} + return {"ret": True, "changed": True, "msg": f"VirtualMedia {ejected_media_list!s} ejected"} def virtual_media_insert(self, options): param_map = { - 'Inserted': 'inserted', - 'WriteProtected': 'write_protected', - 'UserName': 'username', - 'Password': 'password', - 'TransferProtocolType': 'transfer_protocol_type', - 'TransferMethod': 'transfer_method' + "Inserted": "inserted", + "WriteProtected": "write_protected", + "UserName": "username", + "Password": "password", + "TransferProtocolType": "transfer_protocol_type", + "TransferMethod": "transfer_method", } - image_url = options.get('image_url') + image_url = options.get("image_url") if not image_url: - return {'ret': False, - 'msg': "image_url option required for VirtualMediaInsert"} - media_types = options.get('media_types') + return {"ret": False, "msg": "image_url option required for VirtualMediaInsert"} + media_types = options.get("media_types") # read the VirtualMedia resources from systems response = self.get_request(self.root_uri + self.systems_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: + data = response["data"] + if "VirtualMedia" not in data: # read the VirtualMedia resources from manager response = self.get_request(self.root_uri + self.manager_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - if 'VirtualMedia' not in data: - return {'ret': False, 'msg': "VirtualMedia resource not found"} + data = response["data"] + if "VirtualMedia" not in data: + return {"ret": False, "msg": "VirtualMedia resource not found"} virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] virt_media_list = [] - for member in data['Members']: - virt_media_list.append(member['@odata.id']) + for member in data["Members"]: + virt_media_list.append(member["@odata.id"]) resources, headers = self._read_virt_media_resources(virt_media_list) # see if image already inserted; if so, nothing to do if self._virt_media_image_inserted(resources, image_url): - return {'ret': True, 'changed': False, - 'msg': f"VirtualMedia '{image_url}' already inserted"} + return {"ret": True, "changed": False, "msg": f"VirtualMedia '{image_url}' already inserted"} # find an empty slot to insert the media # try first with strict media_type matching - uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=True) + uri, data = self._find_empty_virt_media_slot(resources, media_types, media_match_strict=True) if not uri: # if not found, try without strict media_type matching - uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=False) + uri, data = self._find_empty_virt_media_slot(resources, media_types, media_match_strict=False) if not uri: - return {'ret': False, - 'msg': f'Unable to find an available VirtualMedia resource {f"supporting {media_types}" if media_types else ""}'} + return { + "ret": False, + "msg": f"Unable to find an available VirtualMedia resource {f'supporting {media_types}' if media_types else ''}", + } # confirm InsertMedia action found - if ('Actions' not in data or - '#VirtualMedia.InsertMedia' not in data['Actions']): + if "Actions" not in data or "#VirtualMedia.InsertMedia" not in data["Actions"]: # try to insert via PATCH if no InsertMedia action found h = headers[uri] - if 'allow' in h: - methods = [m.strip() for m in h.get('allow').split(',')] - if 'PATCH' not in methods: + if "allow" in h: + methods = [m.strip() for m in h.get("allow").split(",")] + if "PATCH" not in methods: # if Allow header present and PATCH missing, return error - return {'ret': False, - 'msg': "#VirtualMedia.InsertMedia action not found and PATCH not allowed"} - return self.virtual_media_insert_via_patch(options, param_map, - uri, data) + return {"ret": False, "msg": "#VirtualMedia.InsertMedia action not found and PATCH not allowed"} + return self.virtual_media_insert_via_patch(options, param_map, uri, data) # get the action property - action = data['Actions']['#VirtualMedia.InsertMedia'] - if 'target' not in action: - return {'ret': False, - 'msg': "target URI missing from Action " - "#VirtualMedia.InsertMedia"} - action_uri = action['target'] + action = data["Actions"]["#VirtualMedia.InsertMedia"] + if "target" not in action: + return {"ret": False, "msg": "target URI missing from Action #VirtualMedia.InsertMedia"} + action_uri = action["target"] # get ActionInfo or AllowableValues ai = self._get_all_action_info_values(action) # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) # POST to action response = self.post_request(self.root_uri + action_uri, payload) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} + return {"ret": True, "changed": True, "msg": "VirtualMedia inserted"} def raw_get_resource(self, resource_uri): if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} + return {"ret": False, "msg": "resource_uri is missing"} response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] - return {'ret': True, 'data': data} + data = response["data"] + return {"ret": True, "data": data} def raw_get_collection_resource(self, resource_uri): if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} + return {"ret": False, "msg": "resource_uri is missing"} response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - if 'Members' not in response['data']: - return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"} - member_list = [i['@odata.id'] for i in response['data'].get('Members', [])] + if "Members" not in response["data"]: + return {"ret": False, "msg": "Specified resource_uri doesn't have Members property"} + member_list = [i["@odata.id"] for i in response["data"].get("Members", [])] # get member resource one by one data_list = [] for member_uri in member_list: uri = self.root_uri + member_uri response = self.get_request(uri) - if response['ret'] is False: + if response["ret"] is False: return response - data = response['data'] + data = response["data"] data_list.append(data) - return {'ret': True, 'data_list': data_list} + return {"ret": True, "data_list": data_list} def raw_patch_resource(self, resource_uri, request_body): if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} + return {"ret": False, "msg": "resource_uri is missing"} if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} + return {"ret": False, "msg": "request_body is missing"} # check whether resource_uri existing or not response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: + if response["ret"] is False: return response - original_etag = response['data']['@odata.etag'] + original_etag = response["data"]["@odata.etag"] # check validity of keys in request_body - data = response['data'] + data = response["data"] for key in request_body.keys(): if key not in data: - return {'ret': False, 'msg': f"Key {key} not found. Supported key list: {data.keys()}"} + return {"ret": False, "msg": f"Key {key} not found. Supported key list: {data.keys()}"} # perform patch response = self.patch_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: + if response["ret"] is False: return response # check whether changed or not - current_etag = '' - if 'data' in response and '@odata.etag' in response['data']: - current_etag = response['data']['@odata.etag'] + current_etag = "" + if "data" in response and "@odata.etag" in response["data"]: + current_etag = response["data"]["@odata.etag"] if current_etag != original_etag: - return {'ret': True, 'changed': True} + return {"ret": True, "changed": True} else: - return {'ret': True, 'changed': False} + return {"ret": True, "changed": False} def raw_post_resource(self, resource_uri, request_body): if resource_uri is None: - return {'ret': False, 'msg': "resource_uri is missing"} + return {"ret": False, "msg": "resource_uri is missing"} resource_uri_has_actions = True - if '/Actions/' not in resource_uri: + if "/Actions/" not in resource_uri: resource_uri_has_actions = False if request_body is None: - return {'ret': False, 'msg': "request_body is missing"} + return {"ret": False, "msg": "request_body is missing"} # get action base uri data for further checking - action_base_uri = resource_uri.split('/Actions/')[0] + action_base_uri = resource_uri.split("/Actions/")[0] response = self.get_request(self.root_uri + action_base_uri) - if response['ret'] is False: + if response["ret"] is False: return response - if 'Actions' not in response['data']: + if "Actions" not in response["data"]: if resource_uri_has_actions: - return {'ret': False, 'msg': f"Actions property not found in {action_base_uri}"} + return {"ret": False, "msg": f"Actions property not found in {action_base_uri}"} else: - response['data']['Actions'] = {} + response["data"]["Actions"] = {} # check resouce_uri with target uri found in action base uri data action_found = False action_info_uri = None action_target_uri_list = [] - for key in response['data']['Actions'].keys(): + for key in response["data"]["Actions"].keys(): if action_found: break - if not key.startswith('#'): + if not key.startswith("#"): continue - if 'target' in response['data']['Actions'][key]: - if resource_uri == response['data']['Actions'][key]['target']: + if "target" in response["data"]["Actions"][key]: + if resource_uri == response["data"]["Actions"][key]["target"]: action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions'][key]: - action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo'] + if "@Redfish.ActionInfo" in response["data"]["Actions"][key]: + action_info_uri = response["data"]["Actions"][key]["@Redfish.ActionInfo"] else: - action_target_uri_list.append(response['data']['Actions'][key]['target']) - if not action_found and 'Oem' in response['data']['Actions']: - for key in response['data']['Actions']['Oem'].keys(): + action_target_uri_list.append(response["data"]["Actions"][key]["target"]) + if not action_found and "Oem" in response["data"]["Actions"]: + for key in response["data"]["Actions"]["Oem"].keys(): if action_found: break - if not key.startswith('#'): + if not key.startswith("#"): continue - if 'target' in response['data']['Actions']['Oem'][key]: - if resource_uri == response['data']['Actions']['Oem'][key]['target']: + if "target" in response["data"]["Actions"]["Oem"][key]: + if resource_uri == response["data"]["Actions"]["Oem"][key]["target"]: action_found = True - if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]: - action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo'] + if "@Redfish.ActionInfo" in response["data"]["Actions"]["Oem"][key]: + action_info_uri = response["data"]["Actions"]["Oem"][key]["@Redfish.ActionInfo"] else: - action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target']) + action_target_uri_list.append(response["data"]["Actions"]["Oem"][key]["target"]) if not action_found and resource_uri_has_actions: - return {'ret': False, - 'msg': (f'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. ' - f'Supported uri: {action_target_uri_list}')} + return { + "ret": False, + "msg": ( + f"Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. " + f"Supported uri: {action_target_uri_list}" + ), + } # check request_body with parameter name defined by @Redfish.ActionInfo if action_info_uri is not None: response = self.get_request(self.root_uri + action_info_uri) - if response['ret'] is False: + if response["ret"] is False: return response for key in request_body.keys(): key_found = False - for para in response['data']['Parameters']: - if key == para['Name']: + for para in response["data"]["Parameters"]: + if key == para["Name"]: key_found = True break if not key_found: - return {'ret': False, - 'msg': (f"Invalid property {key} found in request_body. " - f"Please refer to @Redfish.ActionInfo Parameters: {response['data']['Parameters']}")} + return { + "ret": False, + "msg": ( + f"Invalid property {key} found in request_body. " + f"Please refer to @Redfish.ActionInfo Parameters: {response['data']['Parameters']}" + ), + } # perform post response = self.post_request(self.root_uri + resource_uri, request_body) - if response['ret'] is False: + if response["ret"] is False: return response - return {'ret': True, 'changed': True} + return {"ret": True, "changed": True} # More will be added as module features are expanded CATEGORY_COMMANDS_ALL = { - "Manager": ["VirtualMediaInsert", - "VirtualMediaEject"], - "Raw": ["GetResource", - "GetCollectionResource", - "PatchResource", - "PostResource"] + "Manager": ["VirtualMediaInsert", "VirtualMediaEject"], + "Raw": ["GetResource", "GetCollectionResource", "PatchResource", "PostResource"], } @@ -680,68 +672,66 @@ def main(): result = {} argument_spec = dict( category=dict(required=True), - command=dict(required=True, type='list', elements='str'), + command=dict(required=True, type="list", elements="str"), baseuri=dict(required=True), username=dict(), password=dict(no_log=True), auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), + timeout=dict(type="int", default=10), resource_id=dict(), virtual_media=dict( - type='dict', + type="dict", options=dict( - media_types=dict(type='list', elements='str', default=[]), + media_types=dict(type="list", elements="str", default=[]), image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), + inserted=dict(type="bool", default=True), + write_protected=dict(type="bool", default=True), username=dict(), password=dict(no_log=True), transfer_protocol_type=dict(), transfer_method=dict(), - ) + ), ), resource_uri=dict(), request_body=dict( - type='dict', + type="dict", ), ) argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( argument_spec, required_together=[ - ('username', 'password'), + ("username", "password"), ], required_one_of=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], mutually_exclusive=[ - ('username', 'auth_token'), + ("username", "auth_token"), ], - supports_check_mode=False + supports_check_mode=False, ) - category = module.params['category'] - command_list = module.params['command'] + category = module.params["category"] + command_list = module.params["command"] # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} + creds = {"user": module.params["username"], "pswd": module.params["password"], "token": module.params["auth_token"]} # timeout - timeout = module.params['timeout'] + timeout = module.params["timeout"] # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] + resource_id = module.params["resource_id"] # VirtualMedia options - virtual_media = module.params['virtual_media'] + virtual_media = module.params["virtual_media"] # resource_uri - resource_uri = module.params['resource_uri'] + resource_uri = module.params["resource_uri"] # request_body - request_body = module.params['request_body'] + request_body = module.params["request_body"] # Build root URI root_uri = f"https://{module.params['baseuri']}" @@ -749,52 +739,56 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native(f"Invalid Category '{category}'. Valid Categories = {CATEGORY_COMMANDS_ALL.keys()}")) + module.fail_json( + msg=to_native(f"Invalid Category '{category}'. Valid Categories = {CATEGORY_COMMANDS_ALL.keys()}") + ) # Check that all commands are valid for cmd in command_list: # Fail if even one command given is invalid if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}")) + module.fail_json( + msg=to_native(f"Invalid Command '{cmd}'. Valid Commands = {CATEGORY_COMMANDS_ALL[category]}") + ) # Organize by Categories / Commands if category == "Manager": # For virtual media resource locates on Systems service result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) # For virtual media resource locates on Managers service result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) + if result["ret"] is False: + module.fail_json(msg=to_native(result["msg"])) for command in command_list: - if command == 'VirtualMediaInsert': + if command == "VirtualMediaInsert": result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': + elif command == "VirtualMediaEject": result = rf_utils.virtual_media_eject(virtual_media) elif category == "Raw": for command in command_list: - if command == 'GetResource': + if command == "GetResource": result = rf_utils.raw_get_resource(resource_uri) - elif command == 'GetCollectionResource': + elif command == "GetCollectionResource": result = rf_utils.raw_get_collection_resource(resource_uri) - elif command == 'PatchResource': + elif command == "PatchResource": result = rf_utils.raw_patch_resource(resource_uri, request_body) - elif command == 'PostResource': + elif command == "PostResource": result = rf_utils.raw_post_resource(resource_uri, request_body) # Return data back or fail with proper message - if result['ret'] is True: - if command == 'GetResource' or command == 'GetCollectionResource': + if result["ret"] is True: + if command == "GetResource" or command == "GetCollectionResource": module.exit_json(redfish_facts=result) else: - changed = result.get('changed', True) - msg = result.get('msg', 'Action was successful') + changed = result.get("changed", True) + msg = result.get("msg", "Action was successful") module.exit_json(changed=changed, msg=msg) else: - module.fail_json(msg=to_native(result['msg'])) + module.fail_json(msg=to_native(result["msg"])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xdg_mime.py b/plugins/modules/xdg_mime.py index 98f1ef9aecf..95d83677852 100644 --- a/plugins/modules/xdg_mime.py +++ b/plugins/modules/xdg_mime.py @@ -91,12 +91,12 @@ class XdgMime(ModuleHelper): - output_params = ['handler'] + output_params = ["handler"] module = dict( argument_spec=dict( - mime_types=dict(type='list', elements='str', required=True), - handler=dict(type='str', required=True), + mime_types=dict(type="list", elements="str", required=True), + handler=dict(type="str", required=True), ), supports_check_mode=True, ) @@ -115,17 +115,19 @@ def __init_module__(self): for mime in self.vars.mime_types: handler_value = xdg_mime_get(self.runner, mime) if not handler_value: - handler_value = '' + handler_value = "" self.vars.current_handlers.append(handler_value) def __run__(self): - check_mode_return = (0, 'Module executed in check mode', '') + check_mode_return = (0, "Module executed in check mode", "") if any(h != self.vars.handler for h in self.vars.current_handlers): self.changed = True if self.has_changed(): - with self.runner.context(args_order="default handler mime_types", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + with self.runner.context( + args_order="default handler mime_types", check_mode_skip=True, check_mode_return=check_mode_return + ) as ctx: rc, out, err = ctx.run() self.vars.stdout = out self.vars.stderr = err @@ -136,5 +138,5 @@ def main(): XdgMime.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xenserver_facts.py b/plugins/modules/xenserver_facts.py index 36fe92538c8..20253c77a6b 100644 --- a/plugins/modules/xenserver_facts.py +++ b/plugins/modules/xenserver_facts.py @@ -51,6 +51,7 @@ HAVE_XENAPI = False try: import XenAPI + HAVE_XENAPI = True except ImportError: pass @@ -62,11 +63,11 @@ class XenServerFacts: def __init__(self): self.codes = { - '5.5.0': 'george', - '5.6.100': 'oxford', - '6.0.0': 'boston', - '6.1.0': 'tampa', - '6.2.0': 'clearwater' + "5.5.0": "george", + "5.6.100": "oxford", + "6.0.0": "boston", + "6.1.0": "tampa", + "6.2.0": "clearwater", } @property @@ -86,38 +87,38 @@ def codename(self): def get_xenapi_session(): session = XenAPI.xapi_local() - session.xenapi.login_with_password('', '') + session.xenapi.login_with_password("", "") return session def get_networks(session): recs = session.xenapi.network.get_all_records() - networks = change_keys(recs, key='name_label') + networks = change_keys(recs, key="name_label") return networks def get_pifs(session): recs = session.xenapi.PIF.get_all_records() - pifs = change_keys(recs, key='uuid') + pifs = change_keys(recs, key="uuid") xs_pifs = {} devicenums = range(0, 7) for pif in pifs.values(): for eth in devicenums: interface_name = f"eth{eth}" - bond_name = interface_name.replace('eth', 'bond') - if pif['device'] == interface_name: + bond_name = interface_name.replace("eth", "bond") + if pif["device"] == interface_name: xs_pifs[interface_name] = pif - elif pif['device'] == bond_name: + elif pif["device"] == bond_name: xs_pifs[bond_name] = pif return xs_pifs def get_vlans(session): recs = session.xenapi.VLAN.get_all_records() - return change_keys(recs, key='tag') + return change_keys(recs, key="tag") -def change_keys(recs, key='uuid', filter_func=None): +def change_keys(recs, key="uuid", filter_func=None): """ Take a xapi dict, and make the keys the value of recs[ref][key]. @@ -138,7 +139,7 @@ def change_keys(recs, key='uuid', filter_func=None): if hasattr(param_value, "value"): rec[param_name] = param_value.value new_recs[rec[key]] = rec - new_recs[rec[key]]['ref'] = ref + new_recs[rec[key]]["ref"] = ref return new_recs @@ -154,7 +155,7 @@ def get_vms(session): recs = session.xenapi.VM.get_all_records() if not recs: return None - vms = change_keys(recs, key='name_label') + vms = change_keys(recs, key="name_label") return vms @@ -162,7 +163,7 @@ def get_srs(session): recs = session.xenapi.SR.get_all_records() if not recs: return None - srs = change_keys(recs, key='name_label') + srs = change_keys(recs, key="name_label") return srs @@ -178,10 +179,7 @@ def main(): except XenAPI.Failure as e: module.fail_json(msg=str(e)) - data = { - 'xenserver_version': obj.version, - 'xenserver_codename': obj.codename - } + data = {"xenserver_version": obj.version, "xenserver_codename": obj.codename} xs_networks = get_networks(session) xs_pifs = get_pifs(session) @@ -190,20 +188,20 @@ def main(): xs_srs = get_srs(session) if xs_vlans: - data['xs_vlans'] = xs_vlans + data["xs_vlans"] = xs_vlans if xs_pifs: - data['xs_pifs'] = xs_pifs + data["xs_pifs"] = xs_pifs if xs_networks: - data['xs_networks'] = xs_networks + data["xs_networks"] = xs_networks if xs_vms: - data['xs_vms'] = xs_vms + data["xs_vms"] = xs_vms if xs_srs: - data['xs_srs'] = xs_srs + data["xs_srs"] = xs_srs module.exit_json(ansible_facts=data) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xenserver_guest.py b/plugins/modules/xenserver_guest.py index f7ad4baf54a..6e816e0577e 100644 --- a/plugins/modules/xenserver_guest.py +++ b/plugins/modules/xenserver_guest.py @@ -536,6 +536,7 @@ HAS_XENAPI = False try: import XenAPI + HAS_XENAPI = True except ImportError: pass @@ -543,11 +544,21 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac from ansible_collections.community.general.plugins.module_utils.xenserver import ( - xenserver_common_argument_spec, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, - is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, - is_valid_ip6_addr, is_valid_ip6_prefix) + xenserver_common_argument_spec, + XenServerObject, + get_object_ref, + gather_vm_params, + gather_vm_facts, + set_vm_power_state, + wait_for_vm_ip_address, + is_valid_ip_addr, + is_valid_ip_netmask, + is_valid_ip_prefix, + ip_prefix_to_netmask, + ip_netmask_to_prefix, + is_valid_ip6_addr, + is_valid_ip6_prefix, +) class XenServerVM(XenServerObject): @@ -567,7 +578,14 @@ def __init__(self, module): """ super().__init__(module) - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ") + self.vm_ref = get_object_ref( + self.module, + self.module.params["name"], + self.module.params["uuid"], + obj_type="VM", + fail=False, + msg_prefix="VM search: ", + ) self.gather_params() def exists(self): @@ -584,17 +602,21 @@ def gather_facts(self): def set_power_state(self, power_state): """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + state_changed, current_state = set_vm_power_state( + self.module, self.vm_ref, power_state, self.module.params["state_change_timeout"] + ) # If state has changed, update vm_params. if state_changed: - self.vm_params['power_state'] = current_state.capitalize() + self.vm_params["power_state"] = current_state.capitalize() return state_changed def wait_for_ip_address(self): """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + self.vm_params["guest_metrics"] = wait_for_vm_ip_address( + self.module, self.vm_ref, self.module.params["state_change_timeout"] + ) def deploy(self): """Deploys new VM from template.""" @@ -603,37 +625,46 @@ def deploy(self): self.module.fail_json(msg="Called deploy on existing VM!") try: - templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True, - msg_prefix="VM deploy: ") + templ_ref = get_object_ref( + self.module, + self.module.params["template"], + self.module.params["template_uuid"], + obj_type="template", + fail=True, + msg_prefix="VM deploy: ", + ) # Is this an existing running VM? - if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted': + if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != "halted": self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!") # Find a SR we can use for VM.copy(). We use SR of the first disk # if specified or default SR if not specified. - disk_params_list = self.module.params['disks'] + disk_params_list = self.module.params["disks"] sr_ref = None if disk_params_list: disk_params = disk_params_list[0] - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') + disk_sr_uuid = disk_params.get("sr_uuid") + disk_sr = disk_params.get("sr") if disk_sr_uuid is not None or disk_sr is not None: - sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix="VM deploy disks[0]: ") + sr_ref = get_object_ref( + self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, msg_prefix="VM deploy disks[0]: " + ) if not sr_ref: if self.default_sr_ref != "OpaqueRef:NULL": sr_ref = self.default_sr_ref else: - self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.") + self.module.fail_json( + msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly." + ) # VM name could be an empty string which is bad. - if self.module.params['name'] is not None and not self.module.params['name']: + if self.module.params["name"] is not None and not self.module.params["name"]: self.module.fail_json(msg="VM deploy: VM name must not be an empty string!") # Support for Ansible check mode. @@ -642,10 +673,10 @@ def deploy(self): # Now we can instantiate VM. We use VM.clone for linked_clone and # VM.copy for non linked_clone. - if self.module.params['linked_clone']: - self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name']) + if self.module.params["linked_clone"]: + self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params["name"]) else: - self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref) + self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params["name"], sr_ref) # Description is copied over from template so we reset it. self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "") @@ -656,7 +687,7 @@ def deploy(self): # onward so we use an alternative way. templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref) - if "default_template" in templ_other_config and templ_other_config['default_template']: + if "default_template" in templ_other_config and templ_other_config["default_template"]: # other_config of built-in XenServer templates have a key called # 'disks' with the following content: # disks: @@ -667,7 +698,7 @@ def deploy(self): vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref) if "disks" in vm_other_config: - del vm_other_config['disks'] + del vm_other_config["disks"] self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config) @@ -681,7 +712,7 @@ def deploy(self): self.reconfigure() # Power on VM if needed. - if self.module.params['state'] == "poweredon": + if self.module.params["state"] == "poweredon": self.set_power_state("poweredon") except XenAPI.Failure as f: @@ -699,92 +730,129 @@ def reconfigure(self): config_changes = self.get_changes() - vm_power_state_save = self.vm_params['power_state'].lower() + vm_power_state_save = self.vm_params["power_state"].lower() - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!") + if "need_poweredoff" in config_changes and vm_power_state_save != "halted" and not self.module.params["force"]: + self.module.fail_json( + msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!" + ) # Support for Ansible check mode. if self.module.check_mode: return config_changes - if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']: + if "need_poweredoff" in config_changes and vm_power_state_save != "halted" and self.module.params["force"]: self.set_power_state("shutdownguest") try: for change in config_changes: if isinstance(change, str): if change == "name": - self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name']) + self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params["name"]) elif change == "name_desc": - self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc']) + self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params["name_desc"]) elif change == "folder": - self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder') + self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, "folder") - if self.module.params['folder']: - self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder']) + if self.module.params["folder"]: + self.xapi_session.xenapi.VM.add_to_other_config( + self.vm_ref, "folder", self.module.params["folder"] + ) elif change == "home_server": - if self.module.params['home_server']: - host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0] + if self.module.params["home_server"]: + host_ref = self.xapi_session.xenapi.host.get_by_name_label( + self.module.params["home_server"] + )[0] else: host_ref = "OpaqueRef:NULL" self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref) elif isinstance(change, dict): - if change.get('hardware'): - for hardware_change in change['hardware']: + if change.get("hardware"): + for hardware_change in change["hardware"]: if hardware_change == "num_cpus": - num_cpus = int(self.module.params['hardware']['num_cpus']) + num_cpus = int(self.module.params["hardware"]["num_cpus"]) - if num_cpus < int(self.vm_params['VCPUs_at_startup']): + if num_cpus < int(self.vm_params["VCPUs_at_startup"]): self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) else: self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) elif hardware_change == "num_cpu_cores_per_socket": - self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket') - num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket']) + self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, "cores-per-socket") + num_cpu_cores_per_socket = int( + self.module.params["hardware"]["num_cpu_cores_per_socket"] + ) if num_cpu_cores_per_socket > 1: - self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket)) + self.xapi_session.xenapi.VM.add_to_platform( + self.vm_ref, "cores-per-socket", str(num_cpu_cores_per_socket) + ) elif hardware_change == "memory_mb": - memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576) - vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min']))) - - self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b) - elif change.get('disks_changed'): - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + memory_b = str(int(self.module.params["hardware"]["memory_mb"]) * 1048576) + vm_memory_static_min_b = str( + min(int(memory_b), int(self.vm_params["memory_static_min"])) + ) + + self.xapi_session.xenapi.VM.set_memory_limits( + self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b + ) + elif change.get("disks_changed"): + vm_disk_params_list = [ + disk_params for disk_params in self.vm_params["VBDs"] if disk_params["type"] == "Disk" + ] position = 0 - for disk_change_list in change['disks_changed']: + for disk_change_list in change["disks_changed"]: for disk_change in disk_change_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid']) + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid( + vm_disk_params_list[position]["VDI"]["uuid"] + ) if disk_change == "name": - self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name']) + self.xapi_session.xenapi.VDI.set_name_label( + vdi_ref, self.module.params["disks"][position]["name"] + ) elif disk_change == "name_desc": - self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc']) + self.xapi_session.xenapi.VDI.set_name_description( + vdi_ref, self.module.params["disks"][position]["name_desc"] + ) elif disk_change == "size": - self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position], - f"VM reconfigure disks[{position}]: "))) + self.xapi_session.xenapi.VDI.resize( + vdi_ref, + str( + self.get_normalized_disk_size( + self.module.params["disks"][position], + f"VM reconfigure disks[{position}]: ", + ) + ), + ) position += 1 - elif change.get('disks_new'): - for position, disk_userdevice in change['disks_new']: - disk_params = self.module.params['disks'][position] - - disk_name = disk_params['name'] if disk_params.get('name') else f"{self.vm_params['name_label']}-{position}" - disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else "" - - if disk_params.get('sr_uuid'): - sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid']) - elif disk_params.get('sr'): - sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0] + elif change.get("disks_new"): + for position, disk_userdevice in change["disks_new"]: + disk_params = self.module.params["disks"][position] + + disk_name = ( + disk_params["name"] + if disk_params.get("name") + else f"{self.vm_params['name_label']}-{position}" + ) + disk_name_desc = disk_params["name_desc"] if disk_params.get("name_desc") else "" + + if disk_params.get("sr_uuid"): + sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params["sr_uuid"]) + elif disk_params.get("sr"): + sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params["sr"])[0] else: sr_ref = self.default_sr_ref - disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], f"VM reconfigure disks[{position}]: ")) + disk_size = str( + self.get_normalized_disk_size( + self.module.params["disks"][position], f"VM reconfigure disks[{position}]: " + ) + ) new_disk_vdi = { "name_label": disk_name, @@ -810,14 +878,16 @@ def reconfigure(self): "qos_algorithm_params": {}, } - new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) + new_disk_vbd["VDI"] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd) - if self.vm_params['power_state'].lower() == "running": + if self.vm_params["power_state"].lower() == "running": self.xapi_session.xenapi.VBD.plug(vbd_ref_new) - elif change.get('cdrom'): - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + elif change.get("cdrom"): + vm_cdrom_params_list = [ + cdrom_params for cdrom_params in self.vm_params["VBDs"] if cdrom_params["type"] == "CD" + ] # If there is no CD present, we have to create one. if not vm_cdrom_params_list: @@ -846,13 +916,13 @@ def reconfigure(self): cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd) else: - cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid']) + cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]["uuid"]) cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref) - for cdrom_change in change['cdrom']: + for cdrom_change in change["cdrom"]: if cdrom_change == "type": - cdrom_type = self.module.params['cdrom']['type'] + cdrom_type = self.module.params["cdrom"]["type"] if cdrom_type == "none" and not cdrom_is_empty: self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) @@ -864,18 +934,22 @@ def reconfigure(self): if not cdrom_is_empty: self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) - cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0] + cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label( + self.module.params["cdrom"]["iso_name"] + )[0] self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref) - elif change.get('networks_changed'): + elif change.get("networks_changed"): position = 0 - for network_change_list in change['networks_changed']: + for network_change_list in change["networks_changed"]: if network_change_list: - vm_vif_params = self.vm_params['VIFs'][position] - network_params = self.module.params['networks'][position] + vm_vif_params = self.vm_params["VIFs"][position] + network_params = self.module.params["networks"][position] - vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid']) - network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid']) + vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params["uuid"]) + network_ref = self.xapi_session.xenapi.network.get_by_uuid( + vm_vif_params["network"]["uuid"] + ) vif_recreated = False @@ -886,110 +960,114 @@ def reconfigure(self): # Copy all old parameters to new VIF record. vif = { - "device": vm_vif_params['device'], + "device": vm_vif_params["device"], "network": network_ref, - "VM": vm_vif_params['VM'], - "MAC": vm_vif_params['MAC'], - "MTU": vm_vif_params['MTU'], - "other_config": vm_vif_params['other_config'], - "qos_algorithm_type": vm_vif_params['qos_algorithm_type'], - "qos_algorithm_params": vm_vif_params['qos_algorithm_params'], - "locking_mode": vm_vif_params['locking_mode'], - "ipv4_allowed": vm_vif_params['ipv4_allowed'], - "ipv6_allowed": vm_vif_params['ipv6_allowed'], + "VM": vm_vif_params["VM"], + "MAC": vm_vif_params["MAC"], + "MTU": vm_vif_params["MTU"], + "other_config": vm_vif_params["other_config"], + "qos_algorithm_type": vm_vif_params["qos_algorithm_type"], + "qos_algorithm_params": vm_vif_params["qos_algorithm_params"], + "locking_mode": vm_vif_params["locking_mode"], + "ipv4_allowed": vm_vif_params["ipv4_allowed"], + "ipv6_allowed": vm_vif_params["ipv6_allowed"], } if "name" in network_change_list: - network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - vif['network'] = network_ref_new - vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) + network_ref_new = self.xapi_session.xenapi.network.get_by_name_label( + network_params["name"] + )[0] + vif["network"] = network_ref_new + vif["MTU"] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) if "mac" in network_change_list: - vif['MAC'] = network_params['mac'].lower() + vif["MAC"] = network_params["mac"].lower() - if self.vm_params['power_state'].lower() == "running": + if self.vm_params["power_state"].lower() == "running": self.xapi_session.xenapi.VIF.unplug(vif_ref) self.xapi_session.xenapi.VIF.destroy(vif_ref) vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - if self.vm_params['power_state'].lower() == "running": + if self.vm_params["power_state"].lower() == "running": self.xapi_session.xenapi.VIF.plug(vif_ref_new) vif_ref = vif_ref_new vif_recreated = True - if self.vm_params['customization_agent'] == "native": + if self.vm_params["customization_agent"] == "native": vif_reconfigure_needed = False if "type" in network_change_list: - network_type = network_params['type'].capitalize() + network_type = network_params["type"].capitalize() vif_reconfigure_needed = True else: - network_type = vm_vif_params['ipv4_configuration_mode'] + network_type = vm_vif_params["ipv4_configuration_mode"] if "ip" in network_change_list: - network_ip = network_params['ip'] + network_ip = network_params["ip"] vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses']: - network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0] + elif vm_vif_params["ipv4_addresses"]: + network_ip = vm_vif_params["ipv4_addresses"][0].split("/")[0] else: network_ip = "" if "prefix" in network_change_list: network_prefix = f"/{network_params['prefix']}" vif_reconfigure_needed = True - elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: + elif vm_vif_params["ipv4_addresses"] and vm_vif_params["ipv4_addresses"][0]: network_prefix = f"/{vm_vif_params['ipv4_addresses'][0].split('/')[1]}" else: network_prefix = "" if "gateway" in network_change_list: - network_gateway = network_params['gateway'] + network_gateway = network_params["gateway"] vif_reconfigure_needed = True else: - network_gateway = vm_vif_params['ipv4_gateway'] + network_gateway = vm_vif_params["ipv4_gateway"] if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type, - f"{network_ip}{network_prefix}", network_gateway) + self.xapi_session.xenapi.VIF.configure_ipv4( + vif_ref, network_type, f"{network_ip}{network_prefix}", network_gateway + ) vif_reconfigure_needed = False if "type6" in network_change_list: - network_type6 = network_params['type6'].capitalize() + network_type6 = network_params["type6"].capitalize() vif_reconfigure_needed = True else: - network_type6 = vm_vif_params['ipv6_configuration_mode'] + network_type6 = vm_vif_params["ipv6_configuration_mode"] if "ip6" in network_change_list: - network_ip6 = network_params['ip6'] + network_ip6 = network_params["ip6"] vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses']: - network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0] + elif vm_vif_params["ipv6_addresses"]: + network_ip6 = vm_vif_params["ipv6_addresses"][0].split("/")[0] else: network_ip6 = "" if "prefix6" in network_change_list: network_prefix6 = f"/{network_params['prefix6']}" vif_reconfigure_needed = True - elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: + elif vm_vif_params["ipv6_addresses"] and vm_vif_params["ipv6_addresses"][0]: network_prefix6 = f"/{vm_vif_params['ipv6_addresses'][0].split('/')[1]}" else: network_prefix6 = "" if "gateway6" in network_change_list: - network_gateway6 = network_params['gateway6'] + network_gateway6 = network_params["gateway6"] vif_reconfigure_needed = True else: - network_gateway6 = vm_vif_params['ipv6_gateway'] + network_gateway6 = vm_vif_params["ipv6_gateway"] if vif_recreated or vif_reconfigure_needed: - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6, - f"{network_ip6}{network_prefix6}", network_gateway6) + self.xapi_session.xenapi.VIF.configure_ipv6( + vif_ref, network_type6, f"{network_ip6}{network_prefix6}", network_gateway6 + ) - elif self.vm_params['customization_agent'] == "custom": - vif_device = vm_vif_params['device'] + elif self.vm_params["customization_agent"] == "custom": + vif_device = vm_vif_params["device"] # A user could have manually changed network # or mac e.g. through XenCenter and then also @@ -1002,51 +1080,58 @@ def reconfigure(self): # Since we handle name and mac differently, # we have to remove them from # network_change_list. - network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']] + network_change_list_tmp = [ + net_chg for net_chg in network_change_list if net_chg not in ["name", "mac"] + ] - for network_change in network_change_list_tmp + ['name', 'mac']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/{network_change}") + for network_change in network_change_list_tmp + ["name", "mac"]: + self.xapi_session.xenapi.VM.remove_from_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/{network_change}" + ) - if network_params.get('name'): - network_name = network_params['name'] + if network_params.get("name"): + network_name = network_params["name"] else: - network_name = vm_vif_params['network']['name_label'] + network_name = vm_vif_params["network"]["name_label"] - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/name", network_name) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/name", network_name + ) - if network_params.get('mac'): - network_mac = network_params['mac'].lower() + if network_params.get("mac"): + network_mac = network_params["mac"].lower() else: - network_mac = vm_vif_params['MAC'].lower() + network_mac = vm_vif_params["MAC"].lower() - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/mac", network_mac) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/mac", network_mac + ) for network_change in network_change_list_tmp: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/{network_change}", - network_params[network_change]) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, + f"vm-data/networks/{vif_device}/{network_change}", + network_params[network_change], + ) position += 1 - elif change.get('networks_new'): - for position, vif_device in change['networks_new']: - network_params = self.module.params['networks'][position] - - network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] - - network_name = network_params['name'] - network_mac = network_params['mac'] if network_params.get('mac') else "" - network_type = network_params.get('type') - network_ip = network_params['ip'] if network_params.get('ip') else "" - network_prefix = network_params['prefix'] if network_params.get('prefix') else "" - network_netmask = network_params['netmask'] if network_params.get('netmask') else "" - network_gateway = network_params['gateway'] if network_params.get('gateway') else "" - network_type6 = network_params.get('type6') - network_ip6 = network_params['ip6'] if network_params.get('ip6') else "" - network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else "" - network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else "" + elif change.get("networks_new"): + for position, vif_device in change["networks_new"]: + network_params = self.module.params["networks"][position] + + network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params["name"])[0] + + network_name = network_params["name"] + network_mac = network_params["mac"] if network_params.get("mac") else "" + network_type = network_params.get("type") + network_ip = network_params["ip"] if network_params.get("ip") else "" + network_prefix = network_params["prefix"] if network_params.get("prefix") else "" + network_netmask = network_params["netmask"] if network_params.get("netmask") else "" + network_gateway = network_params["gateway"] if network_params.get("gateway") else "" + network_type6 = network_params.get("type6") + network_ip6 = network_params["ip6"] if network_params.get("ip6") else "" + network_prefix6 = network_params["prefix6"] if network_params.get("prefix6") else "" + network_gateway6 = network_params["gateway6"] if network_params.get("gateway6") else "" vif = { "device": vif_device, @@ -1061,67 +1146,102 @@ def reconfigure(self): vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) - if self.vm_params['power_state'].lower() == "running": + if self.vm_params["power_state"].lower() == "running": self.xapi_session.xenapi.VIF.plug(vif_ref_new) - if self.vm_params['customization_agent'] == "native": + if self.vm_params["customization_agent"] == "native": if network_type and network_type == "static": - self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static", - f"{network_ip}/{network_prefix}", network_gateway) + self.xapi_session.xenapi.VIF.configure_ipv4( + vif_ref_new, "Static", f"{network_ip}/{network_prefix}", network_gateway + ) if network_type6 and network_type6 == "static": - self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static", - f"{network_ip6}/{network_prefix6}", network_gateway6) - elif self.vm_params['customization_agent'] == "custom": + self.xapi_session.xenapi.VIF.configure_ipv6( + vif_ref_new, "Static", f"{network_ip6}/{network_prefix6}", network_gateway6 + ) + elif self.vm_params["customization_agent"] == "custom": # We first have to remove any existing data # from xenstore_data because there could be # some old leftover data from some interface # that once occupied same device location as # our new interface. - for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: - self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/{network_param}") - - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/name", network_name) + for network_param in [ + "name", + "mac", + "type", + "ip", + "prefix", + "netmask", + "gateway", + "type6", + "ip6", + "prefix6", + "gateway6", + ]: + self.xapi_session.xenapi.VM.remove_from_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/{network_param}" + ) + + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/name", network_name + ) # We get MAC from VIF itself instead of # networks.mac because it could be # autogenerated. vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/mac", vm_vif_mac) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/mac", vm_vif_mac + ) if network_type: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/type", network_type) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/type", network_type + ) if network_type == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/ip", network_ip) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/prefix", network_prefix) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/netmask", network_netmask) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/gateway", network_gateway) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/ip", network_ip + ) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/prefix", network_prefix + ) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/netmask", network_netmask + ) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/gateway", network_gateway + ) if network_type6: - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, f"vm-data/networks/{vif_device}/type6", network_type6) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/type6", network_type6 + ) if network_type6 == "static": - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/ip6", network_ip6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/prefix6", network_prefix6) - self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, - f"vm-data/networks/{vif_device}/gateway6", network_gateway6) - - elif change.get('custom_params'): - for position in change['custom_params']: - custom_param_key = self.module.params['custom_params'][position]['key'] - custom_param_value = self.module.params['custom_params'][position]['value'] - self.xapi_session.xenapi_request(f"VM.set_{custom_param_key}", (self.vm_ref, custom_param_value)) - - if self.module.params['is_template']: + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/ip6", network_ip6 + ) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/prefix6", network_prefix6 + ) + self.xapi_session.xenapi.VM.add_to_xenstore_data( + self.vm_ref, f"vm-data/networks/{vif_device}/gateway6", network_gateway6 + ) + + elif change.get("custom_params"): + for position in change["custom_params"]: + custom_param_key = self.module.params["custom_params"][position]["key"] + custom_param_value = self.module.params["custom_params"][position]["value"] + self.xapi_session.xenapi_request( + f"VM.set_{custom_param_key}", (self.vm_ref, custom_param_value) + ) + + if self.module.params["is_template"]: self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True) - elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted': + elif ( + "need_poweredoff" in config_changes and self.module.params["force"] and vm_power_state_save != "halted" + ): self.set_power_state("poweredon") # Gather new params after reconfiguration. @@ -1138,8 +1258,10 @@ def destroy(self): if not self.exists(): self.module.fail_json(msg="Called destroy on non existing VM!") - if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']: - self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!") + if self.vm_params["power_state"].lower() != "halted" and not self.module.params["force"]: + self.module.fail_json( + msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!" + ) # Support for Ansible check mode. if self.module.check_mode: @@ -1152,11 +1274,13 @@ def destroy(self): # Destroy VM! self.xapi_session.xenapi.VM.destroy(self.vm_ref) - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + vm_disk_params_list = [ + disk_params for disk_params in self.vm_params["VBDs"] if disk_params["type"] == "Disk" + ] # Destroy all VDIs associated with VM! for vm_disk_params in vm_disk_params_list: - vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid']) + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params["VDI"]["uuid"]) self.xapi_session.xenapi.VDI.destroy(vdi_ref) @@ -1179,55 +1303,69 @@ def get_changes(self): need_poweredoff = False - if self.module.params['is_template']: + if self.module.params["is_template"]: need_poweredoff = True try: # This VM could be a template or a snapshot. In that case we fail # because we can't reconfigure them or it would just be too # dangerous. - if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.") + if self.vm_params["is_a_template"] and not self.vm_params["is_a_snapshot"]: + self.module.fail_json( + msg="VM check: targeted VM is a template! Template reconfiguration is not supported." + ) - if self.vm_params['is_a_snapshot']: - self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.") + if self.vm_params["is_a_snapshot"]: + self.module.fail_json( + msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported." + ) # Let's build a list of parameters that changed. config_changes = [] # Name could only differ if we found an existing VM by uuid. - if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']: - if self.module.params['name']: - config_changes.append('name') + if self.module.params["name"] is not None and self.module.params["name"] != self.vm_params["name_label"]: + if self.module.params["name"]: + config_changes.append("name") else: self.module.fail_json(msg="VM check name: VM name cannot be an empty string!") - if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']: - config_changes.append('name_desc') + if ( + self.module.params["name_desc"] is not None + and self.module.params["name_desc"] != self.vm_params["name_description"] + ): + config_changes.append("name_desc") # Folder parameter is found in other_config. - vm_other_config = self.vm_params['other_config'] - vm_folder = vm_other_config.get('folder', '') + vm_other_config = self.vm_params["other_config"] + vm_folder = vm_other_config.get("folder", "") - if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder: - config_changes.append('folder') - - if self.module.params['home_server'] is not None: - if (self.module.params['home_server'] and - (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])): + if self.module.params["folder"] is not None and self.module.params["folder"] != vm_folder: + config_changes.append("folder") + if self.module.params["home_server"] is not None: + if self.module.params["home_server"] and ( + not self.vm_params["affinity"] + or self.module.params["home_server"] != self.vm_params["affinity"]["name_label"] + ): # Check existence only. Ignore return value. - get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True, - msg_prefix="VM check home_server: ") - - config_changes.append('home_server') - elif not self.module.params['home_server'] and self.vm_params['affinity']: - config_changes.append('home_server') + get_object_ref( + self.module, + self.module.params["home_server"], + uuid=None, + obj_type="home server", + fail=True, + msg_prefix="VM check home_server: ", + ) + + config_changes.append("home_server") + elif not self.module.params["home_server"] and self.vm_params["affinity"]: + config_changes.append("home_server") config_changes_hardware = [] - if self.module.params['hardware']: - num_cpus = self.module.params['hardware'].get('num_cpus') + if self.module.params["hardware"]: + num_cpus = self.module.params["hardware"].get("num_cpus") if num_cpus is not None: # Kept for compatibility with older Ansible versions that @@ -1243,13 +1381,13 @@ def get_changes(self): # We can use VCPUs_at_startup or VCPUs_max parameter. I'd # say the former is the way to go but this needs # confirmation and testing. - if num_cpus != int(self.vm_params['VCPUs_at_startup']): - config_changes_hardware.append('num_cpus') + if num_cpus != int(self.vm_params["VCPUs_at_startup"]): + config_changes_hardware.append("num_cpus") # For now, we don't support hotpluging so VM has to be in # poweredoff state to reconfigure. need_poweredoff = True - num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket') + num_cpu_cores_per_socket = self.module.params["hardware"].get("num_cpu_cores_per_socket") if num_cpu_cores_per_socket is not None: # Kept for compatibility with older Ansible versions that @@ -1257,24 +1395,30 @@ def get_changes(self): try: num_cpu_cores_per_socket = int(num_cpu_cores_per_socket) except ValueError as e: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!") + self.module.fail_json( + msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!" + ) if num_cpu_cores_per_socket < 1: - self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!") + self.module.fail_json( + msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!" + ) if num_cpus and num_cpus % num_cpu_cores_per_socket != 0: - self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!") + self.module.fail_json( + msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!" + ) - vm_platform = self.vm_params['platform'] - vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1)) + vm_platform = self.vm_params["platform"] + vm_cores_per_socket = int(vm_platform.get("cores-per-socket", 1)) if num_cpu_cores_per_socket != vm_cores_per_socket: - config_changes_hardware.append('num_cpu_cores_per_socket') + config_changes_hardware.append("num_cpu_cores_per_socket") # For now, we don't support hotpluging so VM has to be # in poweredoff state to reconfigure. need_poweredoff = True - memory_mb = self.module.params['hardware'].get('memory_mb') + memory_mb = self.module.params["hardware"].get("memory_mb") if memory_mb is not None: # Kept for compatibility with older Ansible versions that @@ -1303,8 +1447,11 @@ def get_changes(self): # # XenServer stores memory size in bytes so we need to divide # it by 1024*1024 = 1048576. - if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576): - config_changes_hardware.append('memory_mb') + if memory_mb != int( + max(int(self.vm_params["memory_dynamic_max"]), int(self.vm_params["memory_static_max"])) + / 1048576 + ): + config_changes_hardware.append("memory_mb") # For now, we don't support hotpluging so VM has to be in # poweredoff state to reconfigure. need_poweredoff = True @@ -1318,73 +1465,91 @@ def get_changes(self): # Find allowed userdevices. vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) - if self.module.params['disks']: + if self.module.params["disks"]: # Get the list of all disk. Filter out any CDs found. - vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + vm_disk_params_list = [ + disk_params for disk_params in self.vm_params["VBDs"] if disk_params["type"] == "Disk" + ] # Number of disks defined in module params have to be same or # higher than a number of existing disks attached to the VM. # We don't support removal or detachment of disks. - if len(self.module.params['disks']) < len(vm_disk_params_list): - self.module.fail_json(msg=f"VM check disks: provided disks configuration has less disks than the " - f"target VM ({len(self.module.params['disks'])} < {len(vm_disk_params_list)})!") + if len(self.module.params["disks"]) < len(vm_disk_params_list): + self.module.fail_json( + msg=f"VM check disks: provided disks configuration has less disks than the " + f"target VM ({len(self.module.params['disks'])} < {len(vm_disk_params_list)})!" + ) # Find the highest disk occupied userdevice. if not vm_disk_params_list: vm_disk_userdevice_highest = "-1" else: - vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice'] + vm_disk_userdevice_highest = vm_disk_params_list[-1]["userdevice"] - for position in range(len(self.module.params['disks'])): + for position in range(len(self.module.params["disks"])): if position < len(vm_disk_params_list): vm_disk_params = vm_disk_params_list[position] else: vm_disk_params = None - disk_params = self.module.params['disks'][position] + disk_params = self.module.params["disks"][position] - disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], f"VM check disks[{position}]: ") + disk_size = self.get_normalized_disk_size( + self.module.params["disks"][position], f"VM check disks[{position}]: " + ) - disk_name = disk_params.get('name') + disk_name = disk_params.get("name") if disk_name is not None and not disk_name: self.module.fail_json(msg=f"VM check disks[{position}]: disk name cannot be an empty string!") # If this is an existing disk. - if vm_disk_params and vm_disk_params['VDI']: + if vm_disk_params and vm_disk_params["VDI"]: disk_changes = [] - if disk_name and disk_name != vm_disk_params['VDI']['name_label']: - disk_changes.append('name') + if disk_name and disk_name != vm_disk_params["VDI"]["name_label"]: + disk_changes.append("name") - disk_name_desc = disk_params.get('name_desc') + disk_name_desc = disk_params.get("name_desc") - if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']: - disk_changes.append('name_desc') + if disk_name_desc is not None and disk_name_desc != vm_disk_params["VDI"]["name_description"]: + disk_changes.append("name_desc") if disk_size: - if disk_size > int(vm_disk_params['VDI']['virtual_size']): - disk_changes.append('size') + if disk_size > int(vm_disk_params["VDI"]["virtual_size"]): + disk_changes.append("size") need_poweredoff = True - elif disk_size < int(vm_disk_params['VDI']['virtual_size']): - self.module.fail_json(msg=f"VM check disks[{position}]: disk size is smaller than existing ({disk_size} bytes < " - f"{vm_disk_params['VDI']['virtual_size']} bytes). Reducing disk size is not allowed!") + elif disk_size < int(vm_disk_params["VDI"]["virtual_size"]): + self.module.fail_json( + msg=f"VM check disks[{position}]: disk size is smaller than existing ({disk_size} bytes < " + f"{vm_disk_params['VDI']['virtual_size']} bytes). Reducing disk size is not allowed!" + ) config_changes_disks.append(disk_changes) # If this is a new disk. else: if not disk_size: - self.module.fail_json(msg=f"VM check disks[{position}]: no valid disk size specification found!") + self.module.fail_json( + msg=f"VM check disks[{position}]: no valid disk size specification found!" + ) - disk_sr_uuid = disk_params.get('sr_uuid') - disk_sr = disk_params.get('sr') + disk_sr_uuid = disk_params.get("sr_uuid") + disk_sr = disk_params.get("sr") if disk_sr_uuid is not None or disk_sr is not None: # Check existence only. Ignore return value. - get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, - msg_prefix=f"VM check disks[{position}]: ") - elif self.default_sr_ref == 'OpaqueRef:NULL': - self.module.fail_json(msg=f"VM check disks[{position}]: no default SR found! You must specify SR explicitly.") + get_object_ref( + self.module, + disk_sr, + disk_sr_uuid, + obj_type="SR", + fail=True, + msg_prefix=f"VM check disks[{position}]: ", + ) + elif self.default_sr_ref == "OpaqueRef:NULL": + self.module.fail_json( + msg=f"VM check disks[{position}]: no default SR found! You must specify SR explicitly." + ) if not vbd_userdevices_allowed: self.module.fail_json(msg=f"VM check disks[{position}]: maximum number of devices reached!") @@ -1408,8 +1573,10 @@ def get_changes(self): # Highest occupied place could be a CD-ROM device # so we have to include all devices regardless of # type when calculating out-of-bound position. - disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1) - self.module.fail_json(msg=f"VM check disks[{position}]: new disk position {disk_userdevice} is out of bounds!") + disk_userdevice = str(int(self.vm_params["VBDs"][-1]["userdevice"]) + 1) + self.module.fail_json( + msg=f"VM check disks[{position}]: new disk position {disk_userdevice} is out of bounds!" + ) # For new disks we only track their position. config_new_disks.append((position, disk_userdevice)) @@ -1426,19 +1593,21 @@ def get_changes(self): config_changes_cdrom = [] - if self.module.params['cdrom']: + if self.module.params["cdrom"]: # Get the list of all CD-ROMs. Filter out any regular disks # found. If we found no existing CD-ROM, we will create it # later else take the first one found. - vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + vm_cdrom_params_list = [ + cdrom_params for cdrom_params in self.vm_params["VBDs"] if cdrom_params["type"] == "CD" + ] # If no existing CD-ROM is found, we will need to add one. # We need to check if there is any userdevice allowed. if not vm_cdrom_params_list and not vbd_userdevices_allowed: self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!") - cdrom_type = self.module.params['cdrom'].get('type') - cdrom_iso_name = self.module.params['cdrom'].get('iso_name') + cdrom_type = self.module.params["cdrom"].get("type") + cdrom_iso_name = self.module.params["cdrom"].get("iso_name") # If cdrom.iso_name is specified but cdrom.type is not, # then set cdrom.type to 'iso', unless cdrom.iso_name is @@ -1449,24 +1618,33 @@ def get_changes(self): elif cdrom_iso_name is not None: cdrom_type = "none" - self.module.params['cdrom']['type'] = cdrom_type + self.module.params["cdrom"]["type"] = cdrom_type # If type changed. - if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])): - config_changes_cdrom.append('type') + if cdrom_type and ( + not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0]) + ): + config_changes_cdrom.append("type") if cdrom_type == "iso": # Check if ISO exists. # Check existence only. Ignore return value. - get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True, - msg_prefix="VM check cdrom.iso_name: ") + get_object_ref( + self.module, + cdrom_iso_name, + uuid=None, + obj_type="ISO image", + fail=True, + msg_prefix="VM check cdrom.iso_name: ", + ) # Is ISO image changed? - if (cdrom_iso_name and - (not vm_cdrom_params_list or - not vm_cdrom_params_list[0]['VDI'] or - cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])): - config_changes_cdrom.append('iso_name') + if cdrom_iso_name and ( + not vm_cdrom_params_list + or not vm_cdrom_params_list[0]["VDI"] + or cdrom_iso_name != vm_cdrom_params_list[0]["VDI"]["name_label"] + ): + config_changes_cdrom.append("iso_name") if config_changes_cdrom: config_changes.append({"cdrom": config_changes_cdrom}) @@ -1477,50 +1655,62 @@ def get_changes(self): # Find allowed devices. vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref) - if self.module.params['networks']: + if self.module.params["networks"]: # Number of VIFs defined in module params have to be same or # higher than a number of existing VIFs attached to the VM. # We don't support removal of VIFs. - if len(self.module.params['networks']) < len(self.vm_params['VIFs']): - self.module.fail_json(msg=f"VM check networks: provided networks configuration has less interfaces than the target " - f"VM ({len(self.module.params['networks'])} < {len(self.vm_params['VIFs'])})!") + if len(self.module.params["networks"]) < len(self.vm_params["VIFs"]): + self.module.fail_json( + msg=f"VM check networks: provided networks configuration has less interfaces than the target " + f"VM ({len(self.module.params['networks'])} < {len(self.vm_params['VIFs'])})!" + ) # Find the highest occupied device. - if not self.vm_params['VIFs']: + if not self.vm_params["VIFs"]: vif_device_highest = "-1" else: - vif_device_highest = self.vm_params['VIFs'][-1]['device'] + vif_device_highest = self.vm_params["VIFs"][-1]["device"] - for position in range(len(self.module.params['networks'])): - if position < len(self.vm_params['VIFs']): - vm_vif_params = self.vm_params['VIFs'][position] + for position in range(len(self.module.params["networks"])): + if position < len(self.vm_params["VIFs"]): + vm_vif_params = self.vm_params["VIFs"][position] else: vm_vif_params = None - network_params = self.module.params['networks'][position] + network_params = self.module.params["networks"][position] - network_name = network_params.get('name') + network_name = network_params.get("name") if network_name is not None and not network_name: - self.module.fail_json(msg=f"VM check networks[{position}]: network name cannot be an empty string!") + self.module.fail_json( + msg=f"VM check networks[{position}]: network name cannot be an empty string!" + ) if network_name: # Check existence only. Ignore return value. - get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True, - msg_prefix=f"VM check networks[{position}]: ") + get_object_ref( + self.module, + network_name, + uuid=None, + obj_type="network", + fail=True, + msg_prefix=f"VM check networks[{position}]: ", + ) - network_mac = network_params.get('mac') + network_mac = network_params.get("mac") if network_mac is not None: network_mac = network_mac.lower() if not is_mac(network_mac): - self.module.fail_json(msg=f"VM check networks[{position}]: specified MAC address '{network_mac}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified MAC address '{network_mac}' is not valid!" + ) # IPv4 reconfiguration. - network_type = network_params.get('type') - network_ip = network_params.get('ip') - network_netmask = network_params.get('netmask') + network_type = network_params.get("type") + network_ip = network_params.get("ip") + network_netmask = network_params.get("netmask") network_prefix = None # If networks.ip is specified and networks.type is not, @@ -1530,26 +1720,32 @@ def get_changes(self): # XenServer natively supports only 'none' and 'static' # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp": + if self.vm_params["customization_agent"] == "native" and network_type and network_type == "dhcp": network_type = "none" if network_type and network_type == "static": if network_ip is not None: - network_ip_split = network_ip.split('/') + network_ip_split = network_ip.split("/") network_ip = network_ip_split[0] if network_ip and not is_valid_ip_addr(network_ip): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 address '{network_ip}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv4 address '{network_ip}' is not valid!" + ) if len(network_ip_split) > 1: network_prefix = network_ip_split[1] if not is_valid_ip_prefix(network_prefix): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 prefix '{network_prefix}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv4 prefix '{network_prefix}' is not valid!" + ) if network_netmask is not None: if not is_valid_ip_netmask(network_netmask): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 netmask '{network_netmask}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv4 netmask '{network_netmask}' is not valid!" + ) network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True) elif network_prefix is not None: @@ -1557,27 +1753,29 @@ def get_changes(self): # If any parameter is overridden at this point, update it. if network_type: - network_params['type'] = network_type + network_params["type"] = network_type if network_ip: - network_params['ip'] = network_ip + network_params["ip"] = network_ip if network_netmask: - network_params['netmask'] = network_netmask + network_params["netmask"] = network_netmask if network_prefix: - network_params['prefix'] = network_prefix + network_params["prefix"] = network_prefix - network_gateway = network_params.get('gateway') + network_gateway = network_params.get("gateway") # Gateway can be an empty string (when removing gateway # configuration) but if it is not, it should be validated. if network_gateway and not is_valid_ip_addr(network_gateway): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv4 gateway '{network_gateway}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv4 gateway '{network_gateway}' is not valid!" + ) # IPv6 reconfiguration. - network_type6 = network_params.get('type6') - network_ip6 = network_params.get('ip6') + network_type6 = network_params.get("type6") + network_ip6 = network_params.get("ip6") network_prefix6 = None # If networks.ip6 is specified and networks.type6 is not, @@ -1587,149 +1785,187 @@ def get_changes(self): # XenServer natively supports only 'none' and 'static' # type with 'none' being the same as 'dhcp'. - if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp": + if self.vm_params["customization_agent"] == "native" and network_type6 and network_type6 == "dhcp": network_type6 = "none" if network_type6 and network_type6 == "static": if network_ip6 is not None: - network_ip6_split = network_ip6.split('/') + network_ip6_split = network_ip6.split("/") network_ip6 = network_ip6_split[0] if network_ip6 and not is_valid_ip6_addr(network_ip6): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 address '{network_ip6}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv6 address '{network_ip6}' is not valid!" + ) if len(network_ip6_split) > 1: network_prefix6 = network_ip6_split[1] if not is_valid_ip6_prefix(network_prefix6): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 prefix '{network_prefix6}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv6 prefix '{network_prefix6}' is not valid!" + ) # If any parameter is overridden at this point, update it. if network_type6: - network_params['type6'] = network_type6 + network_params["type6"] = network_type6 if network_ip6: - network_params['ip6'] = network_ip6 + network_params["ip6"] = network_ip6 if network_prefix6: - network_params['prefix6'] = network_prefix6 + network_params["prefix6"] = network_prefix6 - network_gateway6 = network_params.get('gateway6') + network_gateway6 = network_params.get("gateway6") # Gateway can be an empty string (when removing gateway # configuration) but if it is not, it should be validated. if network_gateway6 and not is_valid_ip6_addr(network_gateway6): - self.module.fail_json(msg=f"VM check networks[{position}]: specified IPv6 gateway '{network_gateway6}' is not valid!") + self.module.fail_json( + msg=f"VM check networks[{position}]: specified IPv6 gateway '{network_gateway6}' is not valid!" + ) # If this is an existing VIF. - if vm_vif_params and vm_vif_params['network']: + if vm_vif_params and vm_vif_params["network"]: network_changes = [] - if network_name and network_name != vm_vif_params['network']['name_label']: - network_changes.append('name') + if network_name and network_name != vm_vif_params["network"]["name_label"]: + network_changes.append("name") - if network_mac and network_mac != vm_vif_params['MAC'].lower(): - network_changes.append('mac') + if network_mac and network_mac != vm_vif_params["MAC"].lower(): + network_changes.append("mac") - if self.vm_params['customization_agent'] == "native": - if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower(): - network_changes.append('type') + if self.vm_params["customization_agent"] == "native": + if network_type and network_type != vm_vif_params["ipv4_configuration_mode"].lower(): + network_changes.append("type") if network_type and network_type == "static": - if network_ip and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]): - network_changes.append('ip') - - if network_prefix and (not vm_vif_params['ipv4_addresses'] or - not vm_vif_params['ipv4_addresses'][0] or - network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]): - network_changes.append('prefix') - network_changes.append('netmask') - - if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']: - network_changes.append('gateway') - - if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower(): - network_changes.append('type6') + if network_ip and ( + not vm_vif_params["ipv4_addresses"] + or not vm_vif_params["ipv4_addresses"][0] + or network_ip != vm_vif_params["ipv4_addresses"][0].split("/")[0] + ): + network_changes.append("ip") + + if network_prefix and ( + not vm_vif_params["ipv4_addresses"] + or not vm_vif_params["ipv4_addresses"][0] + or network_prefix != vm_vif_params["ipv4_addresses"][0].split("/")[1] + ): + network_changes.append("prefix") + network_changes.append("netmask") + + if network_gateway is not None and network_gateway != vm_vif_params["ipv4_gateway"]: + network_changes.append("gateway") + + if network_type6 and network_type6 != vm_vif_params["ipv6_configuration_mode"].lower(): + network_changes.append("type6") if network_type6 and network_type6 == "static": - if network_ip6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]): - network_changes.append('ip6') - - if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or - not vm_vif_params['ipv6_addresses'][0] or - network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]): - network_changes.append('prefix6') - - if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']: - network_changes.append('gateway6') - - elif self.vm_params['customization_agent'] == "custom": - vm_xenstore_data = self.vm_params['xenstore_data'] - - if network_type and network_type != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/type", "none"): - network_changes.append('type') + if network_ip6 and ( + not vm_vif_params["ipv6_addresses"] + or not vm_vif_params["ipv6_addresses"][0] + or network_ip6 != vm_vif_params["ipv6_addresses"][0].split("/")[0] + ): + network_changes.append("ip6") + + if network_prefix6 and ( + not vm_vif_params["ipv6_addresses"] + or not vm_vif_params["ipv6_addresses"][0] + or network_prefix6 != vm_vif_params["ipv6_addresses"][0].split("/")[1] + ): + network_changes.append("prefix6") + + if network_gateway6 is not None and network_gateway6 != vm_vif_params["ipv6_gateway"]: + network_changes.append("gateway6") + + elif self.vm_params["customization_agent"] == "custom": + vm_xenstore_data = self.vm_params["xenstore_data"] + + if network_type and network_type != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/type", "none" + ): + network_changes.append("type") need_poweredoff = True if network_type and network_type == "static": - if network_ip and network_ip != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/ip", ""): - network_changes.append('ip') + if network_ip and network_ip != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/ip", "" + ): + network_changes.append("ip") need_poweredoff = True - if network_prefix and network_prefix != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/prefix", ""): - network_changes.append('prefix') - network_changes.append('netmask') + if network_prefix and network_prefix != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/prefix", "" + ): + network_changes.append("prefix") + network_changes.append("netmask") need_poweredoff = True _device_gw_path = f"vm-data/networks/{vm_vif_params['device']}/gateway" - if network_gateway is not None and network_gateway != vm_xenstore_data.get(_device_gw_path, ""): - network_changes.append('gateway') + if network_gateway is not None and network_gateway != vm_xenstore_data.get( + _device_gw_path, "" + ): + network_changes.append("gateway") need_poweredoff = True - if network_type6 and network_type6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/type6", "none"): - network_changes.append('type6') + if network_type6 and network_type6 != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/type6", "none" + ): + network_changes.append("type6") need_poweredoff = True if network_type6 and network_type6 == "static": - if network_ip6 and network_ip6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/ip6", ""): - network_changes.append('ip6') + if network_ip6 and network_ip6 != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/ip6", "" + ): + network_changes.append("ip6") need_poweredoff = True - if network_prefix6 and network_prefix6 != vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/prefix6", ""): - network_changes.append('prefix6') + if network_prefix6 and network_prefix6 != vm_xenstore_data.get( + f"vm-data/networks/{vm_vif_params['device']}/prefix6", "" + ): + network_changes.append("prefix6") need_poweredoff = True _device_gw6_path = f"vm-data/networks/{vm_vif_params['device']}/gateway6" - if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get(_device_gw6_path, ""): - network_changes.append('gateway6') + if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get( + _device_gw6_path, "" + ): + network_changes.append("gateway6") need_poweredoff = True config_changes_networks.append(network_changes) # If this is a new VIF. else: if not network_name: - self.module.fail_json(msg=f"VM check networks[{position}]: network name is required for new network interface!") + self.module.fail_json( + msg=f"VM check networks[{position}]: network name is required for new network interface!" + ) if network_type and network_type == "static" and network_ip and not network_netmask: - self.module.fail_json(msg=f"VM check networks[{position}]: IPv4 netmask or prefix is required for new network interface!") + self.module.fail_json( + msg=f"VM check networks[{position}]: IPv4 netmask or prefix is required for new network interface!" + ) if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6: - self.module.fail_json(msg=f"VM check networks[{position}]: IPv6 prefix is required for new network interface!") + self.module.fail_json( + msg=f"VM check networks[{position}]: IPv6 prefix is required for new network interface!" + ) # Restart is needed if we are adding new network # interface with IP/gateway parameters specified # and custom agent is used. - if self.vm_params['customization_agent'] == "custom": - for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: + if self.vm_params["customization_agent"] == "custom": + for parameter in ["type", "ip", "prefix", "gateway", "type6", "ip6", "prefix6", "gateway6"]: if network_params.get(parameter): need_poweredoff = True break if not vif_devices_allowed: - self.module.fail_json(msg=f"VM check networks[{position}]: maximum number of network interfaces reached!") + self.module.fail_json( + msg=f"VM check networks[{position}]: maximum number of network interfaces reached!" + ) # We need to place a new network interface right above the # highest placed existing interface to maintain relative @@ -1738,7 +1974,9 @@ def get_changes(self): vif_device = str(int(vif_device_highest) + 1) if vif_device not in vif_devices_allowed: - self.module.fail_json(msg=f"VM check networks[{position}]: new network interface position {vif_device} is out of bounds!") + self.module.fail_json( + msg=f"VM check networks[{position}]: new network interface position {vif_device} is out of bounds!" + ) vif_devices_allowed.remove(vif_device) vif_device_highest = vif_device @@ -1758,15 +1996,17 @@ def get_changes(self): config_changes_custom_params = [] - if self.module.params['custom_params']: - for position in range(len(self.module.params['custom_params'])): - custom_param = self.module.params['custom_params'][position] + if self.module.params["custom_params"]: + for position in range(len(self.module.params["custom_params"])): + custom_param = self.module.params["custom_params"][position] - custom_param_key = custom_param['key'] - custom_param_value = custom_param['value'] + custom_param_key = custom_param["key"] + custom_param_value = custom_param["value"] if custom_param_key not in self.vm_params: - self.module.fail_json(msg=f"VM check custom_params[{position}]: unknown VM param '{custom_param_key}'!") + self.module.fail_json( + msg=f"VM check custom_params[{position}]: unknown VM param '{custom_param_key}'!" + ) if custom_param_value != self.vm_params[custom_param_key]: # We only need to track custom param position. @@ -1776,7 +2016,7 @@ def get_changes(self): config_changes.append({"custom_params": config_changes_custom_params}) if need_poweredoff: - config_changes.append('need_poweredoff') + config_changes.append("need_poweredoff") return config_changes @@ -1802,14 +2042,16 @@ def get_normalized_disk_size(self, disk_params, msg_prefix=""): # specs just in case. Priority is given to 'size' but if not found, we # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one # found. - disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')] + disk_size_spec = [ + x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith("size_") or x == "size") + ] if disk_size_spec: try: # size if "size" in disk_size_spec: - size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)') - disk_size_m = size_regex.match(disk_params['size']) + size_regex = re.compile(r"(\d+(?:\.\d+)?)\s*(.*)") + disk_size_m = size_regex.match(disk_params["size"]) if disk_size_m: size = disk_size_m.group(1) @@ -1819,14 +2061,14 @@ def get_normalized_disk_size(self, disk_params, msg_prefix=""): # size_tb, size_gb, size_mb, size_kb, size_b else: size = disk_params[disk_size_spec[0]] - unit = disk_size_spec[0].split('_')[-1] + unit = disk_size_spec[0].split("_")[-1] if not unit: unit = "b" else: unit = unit.lower() - if re.match(r'\d+\.\d+', size): + if re.match(r"\d+\.\d+", size): # We found float value in string, let's typecast it. if unit == "b": # If we found float but unit is bytes, we get the integer part only. @@ -1842,15 +2084,19 @@ def get_normalized_disk_size(self, disk_params, msg_prefix=""): except (TypeError, ValueError, NameError): # Common failure - self.module.fail_json(msg=f"{msg_prefix}failed to parse disk size! Please review value provided using documentation.") + self.module.fail_json( + msg=f"{msg_prefix}failed to parse disk size! Please review value provided using documentation." + ) disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0) if unit in disk_units: return int(size * (1024 ** disk_units[unit])) else: - self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." % - (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key])))) + self.module.fail_json( + msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." + % (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))) + ) else: return None @@ -1859,7 +2105,7 @@ def get_cdrom_type(vm_cdrom_params): """Returns VM CD-ROM type.""" # TODO: implement support for detecting type host. No server to test # this on at the moment. - if vm_cdrom_params['empty']: + if vm_cdrom_params["empty"]: return "none" else: return "iso" @@ -1868,124 +2114,124 @@ def get_cdrom_type(vm_cdrom_params): def main(): argument_spec = xenserver_common_argument_spec() argument_spec.update( - state=dict(type='str', default='present', - choices=['present', 'absent', 'poweredon']), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - uuid=dict(type='str'), - template=dict(type='str', aliases=['template_src']), - template_uuid=dict(type='str'), - is_template=dict(type='bool', default=False), - folder=dict(type='str'), + state=dict(type="str", default="present", choices=["present", "absent", "poweredon"]), + name=dict(type="str", aliases=["name_label"]), + name_desc=dict(type="str"), + uuid=dict(type="str"), + template=dict(type="str", aliases=["template_src"]), + template_uuid=dict(type="str"), + is_template=dict(type="bool", default=False), + folder=dict(type="str"), hardware=dict( - type='dict', + type="dict", options=dict( - num_cpus=dict(type='int'), - num_cpu_cores_per_socket=dict(type='int'), - memory_mb=dict(type='int'), + num_cpus=dict(type="int"), + num_cpu_cores_per_socket=dict(type="int"), + memory_mb=dict(type="int"), ), ), disks=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - size=dict(type='str'), - size_tb=dict(type='str'), - size_gb=dict(type='str'), - size_mb=dict(type='str'), - size_kb=dict(type='str'), - size_b=dict(type='str'), - name=dict(type='str', aliases=['name_label']), - name_desc=dict(type='str'), - sr=dict(type='str'), - sr_uuid=dict(type='str'), + size=dict(type="str"), + size_tb=dict(type="str"), + size_gb=dict(type="str"), + size_mb=dict(type="str"), + size_kb=dict(type="str"), + size_b=dict(type="str"), + name=dict(type="str", aliases=["name_label"]), + name_desc=dict(type="str"), + sr=dict(type="str"), + sr_uuid=dict(type="str"), ), - aliases=['disk'], + aliases=["disk"], mutually_exclusive=[ - ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'], - ['sr', 'sr_uuid'], + ["size", "size_tb", "size_gb", "size_mb", "size_kb", "size_b"], + ["sr", "sr_uuid"], ], ), cdrom=dict( - type='dict', + type="dict", options=dict( - type=dict(type='str', choices=['none', 'iso']), - iso_name=dict(type='str'), + type=dict(type="str", choices=["none", "iso"]), + iso_name=dict(type="str"), ), required_if=[ - ['type', 'iso', ['iso_name']], + ["type", "iso", ["iso_name"]], ], ), networks=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - name=dict(type='str', aliases=['name_label']), - mac=dict(type='str'), - type=dict(type='str', choices=['none', 'dhcp', 'static']), - ip=dict(type='str'), - netmask=dict(type='str'), - gateway=dict(type='str'), - type6=dict(type='str', choices=['none', 'dhcp', 'static']), - ip6=dict(type='str'), - gateway6=dict(type='str'), + name=dict(type="str", aliases=["name_label"]), + mac=dict(type="str"), + type=dict(type="str", choices=["none", "dhcp", "static"]), + ip=dict(type="str"), + netmask=dict(type="str"), + gateway=dict(type="str"), + type6=dict(type="str", choices=["none", "dhcp", "static"]), + ip6=dict(type="str"), + gateway6=dict(type="str"), ), - aliases=['network'], + aliases=["network"], required_if=[ - ['type', 'static', ['ip']], - ['type6', 'static', ['ip6']], + ["type", "static", ["ip"]], + ["type6", "static", ["ip6"]], ], ), - home_server=dict(type='str'), + home_server=dict(type="str"), custom_params=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - key=dict(type='str', required=True, no_log=False), - value=dict(type='raw', required=True), + key=dict(type="str", required=True, no_log=False), + value=dict(type="raw", required=True), ), ), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - linked_clone=dict(type='bool', default=False), - force=dict(type='bool', default=False), + wait_for_ip_address=dict(type="bool", default=False), + state_change_timeout=dict(type="int", default=0), + linked_clone=dict(type="bool", default=False), + force=dict(type="bool", default=False), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - mutually_exclusive=[ - ['template', 'template_uuid'], - ], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ["name", "uuid"], + ], + mutually_exclusive=[ + ["template", "template_uuid"], + ], + ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} vm = XenServerVM(module) # Find existing VM if vm.exists(): - if module.params['state'] == "absent": + if module.params["state"] == "absent": vm.destroy() - result['changed'] = True - elif module.params['state'] == "present": + result["changed"] = True + elif module.params["state"] == "present": config_changes = vm.reconfigure() if config_changes: - result['changed'] = True + result["changed"] = True # Make new disk and network changes more user friendly # and informative. for change in config_changes: if isinstance(change, dict): - if change.get('disks_new'): + if change.get("disks_new"): disks_new = [] - for position, userdevice in change['disks_new']: + for position, userdevice in change["disks_new"]: disk_new_params = {"position": position, "vbd_userdevice": userdevice} - disk_params = module.params['disks'][position] + disk_params = module.params["disks"][position] for k in disk_params.keys(): if disk_params[k] is not None: @@ -1994,14 +2240,14 @@ def main(): disks_new.append(disk_new_params) if disks_new: - change['disks_new'] = disks_new + change["disks_new"] = disks_new - elif change.get('networks_new'): + elif change.get("networks_new"): networks_new = [] - for position, device in change['networks_new']: + for position, device in change["networks_new"]: network_new_params = {"position": position, "vif_device": device} - network_params = module.params['networks'][position] + network_params = module.params["networks"][position] for k in network_params.keys(): if network_params[k] is not None: @@ -2010,26 +2256,33 @@ def main(): networks_new.append(network_new_params) if networks_new: - change['networks_new'] = networks_new - - result['changes'] = config_changes - - elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]: - result['changed'] = vm.set_power_state(module.params['state']) - elif module.params['state'] != "absent": + change["networks_new"] = networks_new + + result["changes"] = config_changes + + elif module.params["state"] in [ + "poweredon", + "poweredoff", + "restarted", + "shutdownguest", + "rebootguest", + "suspended", + ]: + result["changed"] = vm.set_power_state(module.params["state"]) + elif module.params["state"] != "absent": vm.deploy() - result['changed'] = True + result["changed"] = True - if module.params['wait_for_ip_address'] and module.params['state'] != "absent": + if module.params["wait_for_ip_address"] and module.params["state"] != "absent": vm.wait_for_ip_address() - result['instance'] = vm.gather_facts() + result["instance"] = vm.gather_facts() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xenserver_guest_info.py b/plugins/modules/xenserver_guest_info.py index 989150e71da..7ec83835055 100644 --- a/plugins/modules/xenserver_guest_info.py +++ b/plugins/modules/xenserver_guest_info.py @@ -149,8 +149,13 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts) +from ansible_collections.community.general.plugins.module_utils.xenserver import ( + xenserver_common_argument_spec, + XenServerObject, + get_object_ref, + gather_vm_params, + gather_vm_facts, +) class XenServerVM(XenServerObject): @@ -170,7 +175,14 @@ def __init__(self, module): """ super().__init__(module) - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.vm_ref = get_object_ref( + self.module, + self.module.params["name"], + self.module.params["uuid"], + obj_type="VM", + fail=True, + msg_prefix="VM search: ", + ) self.gather_params() def gather_params(self): @@ -185,30 +197,31 @@ def gather_facts(self): def main(): argument_spec = xenserver_common_argument_spec() argument_spec.update( - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), + name=dict(type="str", aliases=["name_label"]), + uuid=dict(type="str"), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ["name", "uuid"], + ], + ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} # Module will exit with an error message if no VM is found. vm = XenServerVM(module) # Gather facts. - result['instance'] = vm.gather_facts() + result["instance"] = vm.gather_facts() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xenserver_guest_powerstate.py b/plugins/modules/xenserver_guest_powerstate.py index 3a74820b9b5..1a30ecff5fc 100644 --- a/plugins/modules/xenserver_guest_powerstate.py +++ b/plugins/modules/xenserver_guest_powerstate.py @@ -176,9 +176,15 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address) +from ansible_collections.community.general.plugins.module_utils.xenserver import ( + xenserver_common_argument_spec, + XenServerObject, + get_object_ref, + gather_vm_params, + gather_vm_facts, + set_vm_power_state, + wait_for_vm_ip_address, +) class XenServerVM(XenServerObject): @@ -198,7 +204,14 @@ def __init__(self, module): """ super().__init__(module) - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.vm_ref = get_object_ref( + self.module, + self.module.params["name"], + self.module.params["uuid"], + obj_type="VM", + fail=True, + msg_prefix="VM search: ", + ) self.gather_params() def gather_params(self): @@ -211,56 +224,72 @@ def gather_facts(self): def set_power_state(self, power_state): """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + state_changed, current_state = set_vm_power_state( + self.module, self.vm_ref, power_state, self.module.params["state_change_timeout"] + ) # If state has changed, update vm_params. if state_changed: - self.vm_params['power_state'] = current_state.capitalize() + self.vm_params["power_state"] = current_state.capitalize() return state_changed def wait_for_ip_address(self): """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + self.vm_params["guest_metrics"] = wait_for_vm_ip_address( + self.module, self.vm_ref, self.module.params["state_change_timeout"] + ) def main(): argument_spec = xenserver_common_argument_spec() argument_spec.update( - state=dict(type='str', default='present', - choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), + state=dict( + type="str", + default="present", + choices=[ + "powered-on", + "powered-off", + "restarted", + "shutdown-guest", + "reboot-guest", + "suspended", + "present", + ], + ), + name=dict(type="str", aliases=["name_label"]), + uuid=dict(type="str"), + wait_for_ip_address=dict(type="bool", default=False), + state_change_timeout=dict(type="int", default=0), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ["name", "uuid"], + ], + ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} # Module will exit with an error message if no VM is found. vm = XenServerVM(module) # Set VM power state. - if module.params['state'] != "present": - result['changed'] = vm.set_power_state(module.params['state']) + if module.params["state"] != "present": + result["changed"] = vm.set_power_state(module.params["state"]) - if module.params['wait_for_ip_address']: + if module.params["wait_for_ip_address"]: vm.wait_for_ip_address() - result['instance'] = vm.gather_facts() + result["instance"] = vm.gather_facts() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py index 9918b3fccbd..1c33e6ecdfe 100644 --- a/plugins/modules/xfconf.py +++ b/plugins/modules/xfconf.py @@ -171,21 +171,24 @@ class XFConfProperty(StateModuleHelper): - change_params = ('value', ) - diff_params = ('value', ) - output_params = ('property', 'channel', 'value') + change_params = ("value",) + diff_params = ("value",) + output_params = ("property", "channel", "value") module = dict( argument_spec=dict( - state=dict(type='str', choices=('present', 'absent'), default='present'), - channel=dict(type='str', required=True), - property=dict(type='str', required=True), - value_type=dict(type='list', elements='str', - choices=('string', 'int', 'double', 'bool', 'uint', 'uchar', 'char', 'uint64', 'int64', 'float')), - value=dict(type='list', elements='raw'), - force_array=dict(type='bool', default=False, aliases=['array']), + state=dict(type="str", choices=("present", "absent"), default="present"), + channel=dict(type="str", required=True), + property=dict(type="str", required=True), + value_type=dict( + type="list", + elements="str", + choices=("string", "int", "double", "bool", "uint", "uchar", "char", "uint64", "int64", "float"), + ), + value=dict(type="list", elements="raw"), + force_array=dict(type="bool", default=False, aliases=["array"]), ), - required_if=[('state', 'present', ['value', 'value_type'])], - required_together=[('value', 'value_type')], + required_if=[("state", "present", ["value", "value_type"])], + required_together=[("value", "value_type")], supports_check_mode=True, ) @@ -193,35 +196,35 @@ def __init_module__(self): self.runner = xfconf_runner(self.module) self.vars.version = get_xfconf_version(self.runner) self.does_not = f'Property "{self.vars.property}" does not exist on channel "{self.vars.channel}".' - self.vars.set('previous_value', self._get()) - self.vars.set('type', self.vars.value_type) - self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set("previous_value", self._get()) + self.vars.set("type", self.vars.value_type) + self.vars.set_meta("value", initial_value=self.vars.previous_value) def process_command_output(self, rc, out, err): if err.rstrip() == self.does_not: return None if rc or len(err): - self.do_raise(f'xfconf-query failed with error (rc={rc}): {err}') + self.do_raise(f"xfconf-query failed with error (rc={rc}): {err}") result = out.rstrip() - if 'Value is an array with' in result: - result = result.split('\n') + if "Value is an array with" in result: + result = result.split("\n") result.pop(0) result.pop(0) return result def _get(self): - with self.runner('channel property', output_process=self.process_command_output) as ctx: + with self.runner("channel property", output_process=self.process_command_output) as ctx: return ctx.run() def state_absent(self): - with self.runner('channel property reset', check_mode_skip=True) as ctx: + with self.runner("channel property reset", check_mode_skip=True) as ctx: ctx.run(reset=True) self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set("run_info", ctx.run_info, verbosity=4) self.vars.value = None def state_present(self): @@ -241,17 +244,14 @@ def state_present(self): self.do_raise('Number of elements in "value" and "value_type" must be the same') # calculates if it is an array - self.vars.is_array = \ - bool(self.vars.force_array) or \ - isinstance(self.vars.previous_value, list) or \ - values_len > 1 + self.vars.is_array = bool(self.vars.force_array) or isinstance(self.vars.previous_value, list) or values_len > 1 - with self.runner('channel property create force_array values_and_types', check_mode_skip=True) as ctx: + with self.runner("channel property create force_array values_and_types", check_mode_skip=True) as ctx: ctx.run(create=True, force_array=self.vars.is_array, values_and_types=(self.vars.value, value_type)) self.vars.stdout = ctx.results_out self.vars.stderr = ctx.results_err self.vars.cmd = ctx.cmd - self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set("run_info", ctx.run_info, verbosity=4) if not self.vars.is_array: self.vars.value = self.vars.value[0] @@ -264,5 +264,5 @@ def main(): XFConfProperty.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py index f131072edd0..d4d6499dae3 100644 --- a/plugins/modules/xfconf_info.py +++ b/plugins/modules/xfconf_info.py @@ -132,12 +132,10 @@ class XFConfInfo(ModuleHelper): module = dict( argument_spec=dict( - channel=dict(type='str'), - property=dict(type='str'), - ), - required_by=dict( - property=['channel'] + channel=dict(type="str"), + property=dict(type="str"), ), + required_by=dict(property=["channel"]), supports_check_mode=True, ) @@ -168,16 +166,16 @@ def _process_list_channels(self, rc, out, err): def __run__(self): self.vars.list_arg = not (bool(self.vars.channel) and bool(self.vars.property)) - output = 'value' + output = "value" proc = self.process_command_output if self.vars.channel is None: - output = 'channels' + output = "channels" proc = self._process_list_channels elif self.vars.property is None: - output = 'properties' + output = "properties" proc = self._process_list_properties - with self.runner.context('list_arg channel property', output_process=proc) as ctx: + with self.runner.context("list_arg channel property", output_process=proc) as ctx: result = ctx.run(**self.vars.as_dict()) if not self.vars.list_arg and self.vars.is_array: @@ -189,5 +187,5 @@ def main(): XFConfInfo.execute() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/xfs_quota.py b/plugins/modules/xfs_quota.py index c3bacb99963..bc28460070e 100644 --- a/plugins/modules/xfs_quota.py +++ b/plugins/modules/xfs_quota.py @@ -200,8 +200,7 @@ def main(): mp = get_fs_by_mountpoint(mountpoint) if mp is None: module.fail_json( - msg=f"Path '{mountpoint}' is not a mount point or not located on an xfs file system.", - **result + msg=f"Path '{mountpoint}' is not a mount point or not located on an xfs file system.", **result ) if quota_type == "user": @@ -219,7 +218,7 @@ def main(): ): module.fail_json( msg=f"Path '{mountpoint}' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.", - **result + **result, ) try: pwd.getpwnam(name) @@ -232,14 +231,10 @@ def main(): if name is None: name = quota_default - if ( - "gquota" not in mp["mntopts"] - and "grpquota" not in mp["mntopts"] - and "gqnoenforce" not in mp["mntopts"] - ): + if "gquota" not in mp["mntopts"] and "grpquota" not in mp["mntopts"] and "gqnoenforce" not in mp["mntopts"]: module.fail_json( msg=f"Path '{mountpoint}' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: {mp['mntopts']})", - **result + **result, ) try: grp.getgrnam(name) @@ -252,14 +247,9 @@ def main(): if name is None: name = quota_default - if ( - "pquota" not in mp["mntopts"] - and "prjquota" not in mp["mntopts"] - and "pqnoenforce" not in mp["mntopts"] - ): + if "pquota" not in mp["mntopts"] and "prjquota" not in mp["mntopts"] and "pqnoenforce" not in mp["mntopts"]: module.fail_json( - msg=f"Path '{mountpoint}' is not mounted with the pquota/prjquota/pqnoenforce option.", - **result + msg=f"Path '{mountpoint}' is not mounted with the pquota/prjquota/pqnoenforce option.", **result ) if name != quota_default and not os.path.isfile("/etc/projects"): @@ -269,9 +259,7 @@ def main(): module.fail_json(msg="Path '/etc/projid' does not exist.", **result) if name != quota_default and name is not None and get_project_id(name) is None: - module.fail_json( - msg=f"Entry '{name}' has not been defined in /etc/projid.", **result - ) + module.fail_json(msg=f"Entry '{name}' has not been defined in /etc/projid.", **result) prj_set = True if name != quota_default: @@ -285,10 +273,7 @@ def main(): module.fail_json(msg="Could not get project state.", **result) else: for line in stdout.split("\n"): - if ( - "Project Id '%s' - is not set." in line - or "project identifier is not set" in line - ): + if "Project Id '%s' - is not set." in line or "project identifier is not set" in line: prj_set = False break @@ -301,9 +286,7 @@ def main(): result["rc"] = rc result["stdout"] = stdout result["stderr"] = stderr - module.fail_json( - msg="Could not get quota realtime block report.", **result - ) + module.fail_json(msg="Could not get quota realtime block report.", **result) result["changed"] = True @@ -316,21 +299,13 @@ def main(): result["rc"] = rc result["stdout"] = stdout result["stderr"] = stderr - module.fail_json( - msg="Failed to clear managed tree from project quota control.", **result - ) + module.fail_json(msg="Failed to clear managed tree from project quota control.", **result) result["changed"] = True - current_bsoft, current_bhard = quota_report( - module, xfs_quota_bin, mountpoint, name, quota_type, "b" - ) - current_isoft, current_ihard = quota_report( - module, xfs_quota_bin, mountpoint, name, quota_type, "i" - ) - current_rtbsoft, current_rtbhard = quota_report( - module, xfs_quota_bin, mountpoint, name, quota_type, "rtb" - ) + current_bsoft, current_bhard = quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, "b") + current_isoft, current_ihard = quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, "i") + current_rtbsoft, current_rtbhard = quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, "rtb") # Set limits if state == "absent": @@ -427,9 +402,7 @@ def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type) used_name = "realtime blocks" factor = 1024 - rc, stdout, stderr = exec_quota( - module, xfs_quota_bin, f"report {type_arg} {used_arg}", mountpoint - ) + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, f"report {type_arg} {used_arg}", mountpoint) if rc != 0: result = dict( @@ -456,12 +429,9 @@ def exec_quota(module, xfs_quota_bin, cmd, mountpoint): if ( "XFS_GETQUOTA: Operation not permitted" in stderr.split("\n") or rc == 1 - and "xfs_quota: cannot set limits: Operation not permitted" - in stderr.split("\n") + and "xfs_quota: cannot set limits: Operation not permitted" in stderr.split("\n") ): - module.fail_json( - msg="You need to be root or have CAP_SYS_ADMIN capability to perform this operation" - ) + module.fail_json(msg="You need to be root or have CAP_SYS_ADMIN capability to perform this operation") return rc, stdout, stderr @@ -472,9 +442,7 @@ def get_fs_by_mountpoint(mountpoint): for line in s.readlines(): mp = line.strip().split() if len(mp) == 6 and mp[1] == mountpoint and mp[2] == "xfs": - mpr = dict( - zip(["spec", "file", "vfstype", "mntopts", "freq", "passno"], mp) - ) + mpr = dict(zip(["spec", "file", "vfstype", "mntopts", "freq", "passno"], mp)) mpr["mntopts"] = mpr["mntopts"].split(",") break return mpr diff --git a/plugins/modules/xml.py b/plugins/modules/xml.py index f93980e1293..52fd2cbc056 100644 --- a/plugins/modules/xml.py +++ b/plugins/modules/xml.py @@ -369,6 +369,7 @@ LXML_IMP_ERR = None try: from lxml import etree, objectify + HAS_LXML = True except ImportError: LXML_IMP_ERR = traceback.format_exc() @@ -384,17 +385,17 @@ _XPSTR = "('(?:.*)'|\"(?:.*)\")" _RE_SPLITSIMPLELAST = re.compile(f"^(.*)/({_NSIDENT})$") -_RE_SPLITSIMPLELASTEQVALUE = re.compile(f'^(.*)/({_NSIDENT}' + ')/text\\(\\)=' + _XPSTR + '$') +_RE_SPLITSIMPLELASTEQVALUE = re.compile(f"^(.*)/({_NSIDENT}" + ")/text\\(\\)=" + _XPSTR + "$") _RE_SPLITSIMPLEATTRLAST = re.compile(f"^(.*)/(@(?:{_NSIDENT}))$") _RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile(f"^(.*)/(@(?:{_NSIDENT}))={_XPSTR}$") -_RE_SPLITSUBLAST = re.compile(f'^(.*)/({_NSIDENT}' + ')\\[(.*)\\]$') +_RE_SPLITSUBLAST = re.compile(f"^(.*)/({_NSIDENT}" + ")\\[(.*)\\]$") _RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") def has_changed(doc): orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) - return (orig_obj != obj) + return orig_obj != obj def do_print_match(module, tree, xpath, namespaces): @@ -408,14 +409,14 @@ def do_print_match(module, tree, xpath, namespaces): def count_nodes(module, tree, xpath, namespaces): - """ Return the count of nodes matching the xpath """ + """Return the count of nodes matching the xpath""" hits = tree.xpath(f"count(/{xpath})", namespaces=namespaces) msg = f"found {hits} nodes" finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) def is_node(tree, xpath, namespaces): - """ Test if a given xpath matches anything and if that match is a node. + """Test if a given xpath matches anything and if that match is a node. For now we just assume you're only searching for one specific thing.""" if xpath_matches(tree, xpath, namespaces): @@ -428,13 +429,13 @@ def is_node(tree, xpath, namespaces): def is_attribute(tree, xpath, namespaces): - """ Test if a given xpath matches and that match is an attribute + """Test if a given xpath matches and that match is an attribute An xpath attribute search will only match one item""" # lxml 5.1.1 removed etree._ElementStringResult, so we can no longer simply assume it is there # (https://github.com/lxml/lxml/commit/eba79343d0e7ad1ce40169f60460cdd4caa29eb3) - ElementStringResult = getattr(etree, '_ElementStringResult', None) + ElementStringResult = getattr(etree, "_ElementStringResult", None) if xpath_matches(tree, xpath, namespaces): match = tree.xpath(xpath, namespaces=namespaces) @@ -446,14 +447,14 @@ def is_attribute(tree, xpath, namespaces): def xpath_matches(tree, xpath, namespaces): - """ Test if a node exists """ + """Test if a node exists""" if tree.xpath(xpath, namespaces=namespaces): return True return False def delete_xpath_target(module, tree, xpath, namespaces): - """ Delete an attribute or element from a tree """ + """Delete an attribute or element from a tree""" changed = False try: for result in tree.xpath(xpath, namespaces=namespaces): @@ -593,7 +594,9 @@ def nsnameToClark(name, namespaces): def check_or_make_target(module, tree, xpath, namespaces): (inner_xpath, changes) = split_xpath_last(xpath) if (inner_xpath == xpath) or (changes is None): - module.fail_json(msg=f"Can't process Xpath {xpath} in order to spawn nodes! tree is {etree.tostring(tree, pretty_print=True)}") + module.fail_json( + msg=f"Can't process Xpath {xpath} in order to spawn nodes! tree is {etree.tostring(tree, pretty_print=True)}" + ) return False changed = False @@ -603,8 +606,8 @@ def check_or_make_target(module, tree, xpath, namespaces): # we test again after calling check_or_make_target if is_node(tree, inner_xpath, namespaces) and changes: - for (eoa, eoa_value) in changes: - if eoa and eoa[0] != '@' and eoa[0] != '/': + for eoa, eoa_value in changes: + if eoa and eoa[0] != "@" and eoa[0] != "/": # implicitly creating an element new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") if eoa_value: @@ -615,7 +618,7 @@ def check_or_make_target(module, tree, xpath, namespaces): node.extend(new_kids) changed = True # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) - elif eoa and eoa[0] == '/': + elif eoa and eoa[0] == "/": element = eoa[1:] new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") for node in tree.xpath(inner_xpath, namespaces=namespaces): @@ -634,11 +637,11 @@ def check_or_make_target(module, tree, xpath, namespaces): node.text = eoa_value changed = True - elif eoa and eoa[0] == '@': + elif eoa and eoa[0] == "@": attribute = nsnameToClark(eoa[1:], namespaces) for element in tree.xpath(inner_xpath, namespaces=namespaces): - changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) + changing = attribute not in element.attrib or element.attrib[attribute] != eoa_value if changing: changed = changed or changing @@ -678,11 +681,15 @@ def set_target_inner(module, tree, xpath, namespaces, attribute, value): # TODO: Implement a more robust check to check for child namespaces' existence if tree.getroot().nsmap and ":" not in xpath: missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" - module.fail_json(msg=f"{missing_namespace}Xpath {xpath} causes a failure: {e}\n -- tree is {etree.tostring(tree, pretty_print=True)}", - exception=traceback.format_exc()) + module.fail_json( + msg=f"{missing_namespace}Xpath {xpath} causes a failure: {e}\n -- tree is {etree.tostring(tree, pretty_print=True)}", + exception=traceback.format_exc(), + ) if not is_node(tree, xpath, namespaces): - module.fail_json(msg=f"Xpath {xpath} does not reference a node! tree is {etree.tostring(tree, pretty_print=True)}") + module.fail_json( + msg=f"Xpath {xpath} does not reference a node! tree is {etree.tostring(tree, pretty_print=True)}" + ) for element in tree.xpath(xpath, namespaces=namespaces): if not attribute: @@ -733,8 +740,8 @@ def get_element_attr(module, tree, xpath, namespaces): def child_to_element(module, child, in_type): - if in_type == 'xml': - infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) + if in_type == "xml": + infile = BytesIO(to_bytes(child, errors="surrogate_or_strict")) try: parser = etree.XMLParser() @@ -742,7 +749,7 @@ def child_to_element(module, child, in_type): return node.getroot() except etree.XMLSyntaxError as e: module.fail_json(msg=f"Error while parsing child element: {e}") - elif in_type == 'yaml': + elif in_type == "yaml": if isinstance(child, str): return etree.Element(child) elif isinstance(child, MutableMapping): @@ -751,8 +758,8 @@ def child_to_element(module, child, in_type): (key, value) = list(child.items())[0] if isinstance(value, MutableMapping): - children = value.pop('_', None) - child_value = value.pop('+value', None) + children = value.pop("_", None) + child_value = value.pop("+value", None) node = etree.Element(key, value) @@ -775,7 +782,7 @@ def child_to_element(module, child, in_type): module.fail_json(msg=f"Invalid child input type: {in_type}. Type must be either xml or yaml.") -def children_to_nodes(module=None, children=None, type='yaml'): +def children_to_nodes(module=None, children=None, type="yaml"): """turn a str/hash/list of str&hash into a list of elements""" children = [] if children is None else children @@ -783,65 +790,71 @@ def children_to_nodes(module=None, children=None, type='yaml'): def make_pretty(module, tree): - xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + xml_string = etree.tostring( + tree, xml_declaration=True, encoding="UTF-8", pretty_print=module.params["pretty_print"] + ) result = dict( changed=False, ) - if module.params['path']: - xml_file = module.params['path'] - with open(xml_file, 'rb') as xml_content: + if module.params["path"]: + xml_file = module.params["path"] + with open(xml_file, "rb") as xml_content: if xml_string != xml_content.read(): - result['changed'] = True + result["changed"] = True if not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) - - elif module.params['xmlstring']: - result['xmlstring'] = xml_string + if module.params["backup"]: + result["backup_file"] = module.backup_local(module.params["path"]) + tree.write( + xml_file, xml_declaration=True, encoding="UTF-8", pretty_print=module.params["pretty_print"] + ) + + elif module.params["xmlstring"]: + result["xmlstring"] = xml_string # NOTE: Modifying a string is not considered a change ! - if xml_string != module.params['xmlstring']: - result['changed'] = True + if xml_string != module.params["xmlstring"]: + result["changed"] = True module.exit_json(**result) -def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): - +def finish(module, tree, xpath, namespaces, changed=False, msg="", hitcount=0, matches=tuple()): result = dict( - actions=dict( - xpath=xpath, - namespaces=namespaces, - state=module.params['state'] - ), + actions=dict(xpath=xpath, namespaces=namespaces, state=module.params["state"]), changed=has_changed(tree), ) - if module.params['count'] or hitcount: - result['count'] = hitcount + if module.params["count"] or hitcount: + result["count"] = hitcount - if module.params['print_match'] or matches: - result['matches'] = matches + if module.params["print_match"] or matches: + result["matches"] = matches if msg: - result['msg'] = msg + result["msg"] = msg - if result['changed']: + if result["changed"]: if module._diff: - result['diff'] = dict( - before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), - after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), + result["diff"] = dict( + before=etree.tostring(orig_doc, xml_declaration=True, encoding="UTF-8", pretty_print=True), + after=etree.tostring(tree, xml_declaration=True, encoding="UTF-8", pretty_print=True), ) - if module.params['path'] and not module.check_mode: - if module.params['backup']: - result['backup_file'] = module.backup_local(module.params['path']) - tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + if module.params["path"] and not module.check_mode: + if module.params["backup"]: + result["backup_file"] = module.backup_local(module.params["path"]) + tree.write( + module.params["path"], + xml_declaration=True, + encoding="UTF-8", + pretty_print=module.params["pretty_print"], + ) - if module.params['xmlstring']: - result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + if module.params["xmlstring"]: + result["xmlstring"] = etree.tostring( + tree, xml_declaration=True, encoding="UTF-8", pretty_print=module.params["pretty_print"] + ) module.exit_json(**result) @@ -849,84 +862,84 @@ def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, m def main(): module = AnsibleModule( argument_spec=dict( - path=dict(type='path', aliases=['dest', 'file']), - xmlstring=dict(type='str'), - xpath=dict(type='str'), - namespaces=dict(type='dict', default={}), - state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), - value=dict(type='raw'), - attribute=dict(type='raw'), - add_children=dict(type='list', elements='raw'), - set_children=dict(type='list', elements='raw'), - count=dict(type='bool', default=False), - print_match=dict(type='bool', default=False), - pretty_print=dict(type='bool', default=False), - content=dict(type='str', choices=['attribute', 'text']), - input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), - backup=dict(type='bool', default=False), - strip_cdata_tags=dict(type='bool', default=False), - insertbefore=dict(type='bool', default=False), - insertafter=dict(type='bool', default=False), + path=dict(type="path", aliases=["dest", "file"]), + xmlstring=dict(type="str"), + xpath=dict(type="str"), + namespaces=dict(type="dict", default={}), + state=dict(type="str", default="present", choices=["absent", "present"], aliases=["ensure"]), + value=dict(type="raw"), + attribute=dict(type="raw"), + add_children=dict(type="list", elements="raw"), + set_children=dict(type="list", elements="raw"), + count=dict(type="bool", default=False), + print_match=dict(type="bool", default=False), + pretty_print=dict(type="bool", default=False), + content=dict(type="str", choices=["attribute", "text"]), + input_type=dict(type="str", default="yaml", choices=["xml", "yaml"]), + backup=dict(type="bool", default=False), + strip_cdata_tags=dict(type="bool", default=False), + insertbefore=dict(type="bool", default=False), + insertafter=dict(type="bool", default=False), ), supports_check_mode=True, required_by=dict( - add_children=['xpath'], - attribute=['value'], - content=['xpath'], - set_children=['xpath'], - value=['xpath'], + add_children=["xpath"], + attribute=["value"], + content=["xpath"], + set_children=["xpath"], + value=["xpath"], ), required_if=[ - ['count', True, ['xpath']], - ['print_match', True, ['xpath']], - ['insertbefore', True, ['xpath']], - ['insertafter', True, ['xpath']], + ["count", True, ["xpath"]], + ["print_match", True, ["xpath"]], + ["insertbefore", True, ["xpath"]], + ["insertafter", True, ["xpath"]], ], required_one_of=[ - ['path', 'xmlstring'], - ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], + ["path", "xmlstring"], + ["add_children", "content", "count", "pretty_print", "print_match", "set_children", "value"], ], mutually_exclusive=[ - ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], - ['path', 'xmlstring'], - ['insertbefore', 'insertafter'], + ["add_children", "content", "count", "print_match", "set_children", "value"], + ["path", "xmlstring"], + ["insertbefore", "insertafter"], ], ) - xml_file = module.params['path'] - xml_string = module.params['xmlstring'] - xpath = module.params['xpath'] - namespaces = module.params['namespaces'] - state = module.params['state'] - value = json_dict_bytes_to_unicode(module.params['value']) - attribute = module.params['attribute'] - set_children = json_dict_bytes_to_unicode(module.params['set_children']) - add_children = json_dict_bytes_to_unicode(module.params['add_children']) - pretty_print = module.params['pretty_print'] - content = module.params['content'] - input_type = module.params['input_type'] - print_match = module.params['print_match'] - count = module.params['count'] - backup = module.params['backup'] - strip_cdata_tags = module.params['strip_cdata_tags'] - insertbefore = module.params['insertbefore'] - insertafter = module.params['insertafter'] + xml_file = module.params["path"] + xml_string = module.params["xmlstring"] + xpath = module.params["xpath"] + namespaces = module.params["namespaces"] + state = module.params["state"] + value = json_dict_bytes_to_unicode(module.params["value"]) + attribute = module.params["attribute"] + set_children = json_dict_bytes_to_unicode(module.params["set_children"]) + add_children = json_dict_bytes_to_unicode(module.params["add_children"]) + pretty_print = module.params["pretty_print"] + content = module.params["content"] + input_type = module.params["input_type"] + print_match = module.params["print_match"] + count = module.params["count"] + backup = module.params["backup"] + strip_cdata_tags = module.params["strip_cdata_tags"] + insertbefore = module.params["insertbefore"] + insertafter = module.params["insertafter"] # Check if we have lxml 2.3.0 or newer installed if not HAS_LXML: module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): - module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') - elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): - module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') + elif LooseVersion(".".join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion("2.3.0"): + module.fail_json(msg="The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine") + elif LooseVersion(".".join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion("3.0.0"): + module.warn("Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.") infile = None try: # Check if the file exists if xml_string: - infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + infile = BytesIO(to_bytes(xml_string, errors="surrogate_or_strict")) elif os.path.isfile(xml_file): - infile = open(xml_file, 'rb') + infile = open(xml_file, "rb") else: module.fail_json(msg=f"The target XML source '{xml_file}' does not exist.") @@ -959,13 +972,13 @@ def main(): if count: count_nodes(module, doc, xpath, namespaces) - if content == 'attribute': + if content == "attribute": get_element_attr(module, doc, xpath, namespaces) - elif content == 'text': + elif content == "text": get_element_text(module, doc, xpath, namespaces) # File exists: - if state == 'absent': + if state == "absent": # - absent: delete xpath target delete_xpath_target(module, doc, xpath, namespaces) @@ -999,5 +1012,5 @@ def main(): module.fail_json(msg="Don't know what to do") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/yarn.py b/plugins/modules/yarn.py index 119288dc06a..04703fa1922 100644 --- a/plugins/modules/yarn.py +++ b/plugins/modules/yarn.py @@ -140,43 +140,41 @@ class Yarn: - def __init__(self, module, **kwargs): self.module = module - self.globally = kwargs['globally'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - self.executable = kwargs['executable'] + self.globally = kwargs["globally"] + self.name = kwargs["name"] + self.version = kwargs["version"] + self.path = kwargs["path"] + self.registry = kwargs["registry"] + self.production = kwargs["production"] + self.ignore_scripts = kwargs["ignore_scripts"] + self.executable = kwargs["executable"] # Specify a version of package if version arg passed in self.name_version = None - if kwargs['version'] and self.name is not None: + if kwargs["version"] and self.name is not None: self.name_version = f"{self.name}@{self.version!s}" elif self.name is not None: self.name_version = self.name def _exec(self, args, run_in_check_mode=False, check_rc=True, unsupported_with_global=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - with_global_arg = self.globally and not unsupported_with_global if with_global_arg: # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`) - args.insert(0, 'global') + args.insert(0, "global") cmd = self.executable + args if self.production: - cmd.append('--production') + cmd.append("--production") if self.ignore_scripts: - cmd.append('--ignore-scripts') + cmd.append("--ignore-scripts") if self.registry: - cmd.append('--registry') + cmd.append("--registry") cmd.append(self.registry) # If path is specified, cd into that path and run the command. @@ -189,7 +187,7 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True, unsupported_with_g self.module.fail_json(msg=f"Path provided {self.path} is not a directory") cwd = self.path - if not os.path.isfile(os.path.join(self.path, 'package.json')): + if not os.path.isfile(os.path.join(self.path, "package.json")): self.module.fail_json(msg="Package.json does not exist in provided path.") rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) @@ -201,18 +199,18 @@ def _process_yarn_error(self, err): try: # We need to filter for errors, since Yarn warnings are included in stderr for line in err.splitlines(): - if json.loads(line)['type'] == 'error': + if json.loads(line)["type"] == "error": self.module.fail_json(msg=err) except Exception: self.module.fail_json(msg=f"Unexpected stderr output from Yarn: {err}", stderr=err) def list(self): - cmd = ['list', '--depth=0', '--json'] + cmd = ["list", "--depth=0", "--json"] installed = list() missing = list() - if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + if not os.path.isfile(os.path.join(self.path, "yarn.lock")): missing.append(self.name) return installed, missing @@ -222,13 +220,13 @@ def list(self): self._process_yarn_error(error) - for json_line in result.strip().split('\n'): + for json_line in result.strip().split("\n"): data = json.loads(json_line) - if data['type'] == 'tree': - dependencies = data['data']['trees'] + if data["type"] == "tree": + dependencies = data["data"]["trees"] for dep in dependencies: - name, version = dep['name'].rsplit('@', 1) + name, version = dep["name"].rsplit("@", 1) installed.append(name) if self.name not in installed: @@ -239,23 +237,23 @@ def list(self): def install(self): if self.name_version: # Yarn has a separate command for installing packages by name... - return self._exec(['add', self.name_version]) + return self._exec(["add", self.name_version]) # And one for installing all packages in package.json - return self._exec(['install', '--non-interactive']) + return self._exec(["install", "--non-interactive"]) def update(self): - return self._exec(['upgrade', '--latest']) + return self._exec(["upgrade", "--latest"]) def uninstall(self): - return self._exec(['remove', self.name]) + return self._exec(["remove", self.name]) def list_outdated(self): outdated = list() - if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + if not os.path.isfile(os.path.join(self.path, "yarn.lock")): return outdated - cmd_result, err = self._exec(['outdated', '--json'], True, False, unsupported_with_global=True) + cmd_result, err = self._exec(["outdated", "--json"], True, False, unsupported_with_global=True) # the package.json in the global dir is missing a license field, so warnings are expected on stderr self._process_yarn_error(err) @@ -268,7 +266,7 @@ def list_outdated(self): data = json.loads(outdated_packages_data) try: - outdated_dependencies = data['data']['body'] + outdated_dependencies = data["data"]["body"] except KeyError: return outdated @@ -282,66 +280,64 @@ def list_outdated(self): def main(): arg_spec = dict( name=dict(), - path=dict(type='path'), + path=dict(type="path"), version=dict(), - production=dict(default=False, type='bool'), - executable=dict(type='path'), + production=dict(default=False, type="bool"), + executable=dict(type="path"), registry=dict(), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), + state=dict(default="present", choices=["present", "absent", "latest"]), + ignore_scripts=dict(default=False, type="bool"), ) - arg_spec['global'] = dict(default=False, type='bool') - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - globally = module.params['global'] - production = module.params['production'] - registry = module.params['registry'] - state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] + arg_spec["global"] = dict(default=False, type="bool") + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params["name"] + path = module.params["path"] + version = module.params["version"] + globally = module.params["global"] + production = module.params["production"] + registry = module.params["registry"] + state = module.params["state"] + ignore_scripts = module.params["ignore_scripts"] # When installing globally, users should not be able to define a path for installation. # Require a path if global is False, though! if path is None and globally is False: - module.fail_json(msg='Path must be specified when not using global arg') + module.fail_json(msg="Path must be specified when not using global arg") elif path and globally is True: - module.fail_json(msg='Cannot specify path if doing global installation') + module.fail_json(msg="Cannot specify path if doing global installation") - if state == 'absent' and not name: - module.fail_json(msg='Package must be explicitly named when uninstalling.') - if state == 'latest': - version = 'latest' + if state == "absent" and not name: + module.fail_json(msg="Package must be explicitly named when uninstalling.") + if state == "latest": + version = "latest" - if module.params['executable']: - executable = module.params['executable'].split(' ') + if module.params["executable"]: + executable = module.params["executable"].split(" ") else: - executable = [module.get_bin_path('yarn', True)] + executable = [module.get_bin_path("yarn", True)] # When installing globally, use the defined path for global node_modules if globally: - _rc, out, _err = module.run_command(executable + ['global', 'dir'], check_rc=True) + _rc, out, _err = module.run_command(executable + ["global", "dir"], check_rc=True) path = out.strip() - yarn = Yarn(module, - name=name, - path=path, - version=version, - globally=globally, - production=production, - executable=executable, - registry=registry, - ignore_scripts=ignore_scripts) + yarn = Yarn( + module, + name=name, + path=path, + version=version, + globally=globally, + production=production, + executable=executable, + registry=registry, + ignore_scripts=ignore_scripts, + ) changed = False - out = '' - err = '' - if state == 'present': - + out = "" + err = "" + if state == "present": if not name: changed = True out, err = yarn.install() @@ -351,8 +347,7 @@ def main(): changed = True out, err = yarn.install() - elif state == 'latest': - + elif state == "latest": if not name: changed = True out, err = yarn.install() @@ -375,5 +370,5 @@ def main(): module.exit_json(changed=changed, out=out, err=err) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/yum_versionlock.py b/plugins/modules/yum_versionlock.py index 3a0e8375255..dbf2229f440 100644 --- a/plugins/modules/yum_versionlock.py +++ b/plugins/modules/yum_versionlock.py @@ -92,32 +92,38 @@ from fnmatch import fnmatch # on DNF-based distros, yum is a symlink to dnf, so we try to handle their different entry formats. -NEVRA_RE_YUM = re.compile(r'^(?P!)?(?P\d+):(?P.+)-' - r'(?P.+)-(?P.+)\.(?P.+)$') -NEVRA_RE_DNF = re.compile(r"^(?P!)?(?P.+)-(?P\d+):(?P.+)-" - r"(?P.+)\.(?P.+)$") +NEVRA_RE_YUM = re.compile( + r"^(?P!)?(?P\d+):(?P.+)-" + r"(?P.+)-(?P.+)\.(?P.+)$" +) +NEVRA_RE_DNF = re.compile( + r"^(?P!)?(?P.+)-(?P\d+):(?P.+)-" + r"(?P.+)\.(?P.+)$" +) class YumVersionLock: def __init__(self, module): self.module = module self.params = module.params - self.yum_bin = module.get_bin_path('yum', required=True) + self.yum_bin = module.get_bin_path("yum", required=True) def get_versionlock_packages(self): - """ Get an overview of all packages on yum versionlock """ + """Get an overview of all packages on yum versionlock""" rc, out, err = self.module.run_command([self.yum_bin, "versionlock", "list"]) if rc == 0: return out - elif rc == 1 and 'o such command:' in err: - self.module.fail_json(msg=f"Error: Please install rpm package yum-plugin-versionlock : {to_native(err)}{to_native(out)}") + elif rc == 1 and "o such command:" in err: + self.module.fail_json( + msg=f"Error: Please install rpm package yum-plugin-versionlock : {to_native(err)}{to_native(out)}" + ) self.module.fail_json(msg=f"Error: {to_native(err)}{to_native(out)}") def ensure_state(self, packages, command): - """ Ensure packages state """ + """Ensure packages state""" rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages) # If no package can be found this will be written on stdout with rc 0 - if 'No package found for' in out: + if "No package found for" in out: self.module.fail_json(msg=out) if rc == 0: return True @@ -133,23 +139,23 @@ def match(entry, name): return False if fnmatch(m.group("name"), name): match = True - if entry.rstrip('.*') == name: + if entry.rstrip(".*") == name: match = True return match def main(): - """ start main program to add/delete a package to yum versionlock """ + """start main program to add/delete a package to yum versionlock""" module = AnsibleModule( argument_spec=dict( - state=dict(default='present', choices=['present', 'absent']), - name=dict(required=True, type='list', elements='str'), + state=dict(default="present", choices=["present", "absent"]), + name=dict(required=True, type="list", elements="str"), ), - supports_check_mode=True + supports_check_mode=True, ) - state = module.params['state'] - packages = module.params['name'] + state = module.params["state"] + packages = module.params["name"] changed = False yum_v = YumVersionLock(module) @@ -159,8 +165,8 @@ def main(): # Ensure versionlock state of packages packages_list = [] - if state in ('present', ): - command = 'add' + if state in ("present",): + command = "add" for single_pkg in packages: if not any(match(pkg, single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) @@ -169,8 +175,8 @@ def main(): changed = True else: changed = yum_v.ensure_state(packages_list, command) - elif state in ('absent', ): - command = 'delete' + elif state in ("absent",): + command = "delete" for single_pkg in packages: if any(match(pkg, single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) @@ -180,14 +186,8 @@ def main(): else: changed = yum_v.ensure_state(packages_list, command) - module.exit_json( - changed=changed, - meta={ - "packages": packages, - "state": state - } - ) + module.exit_json(changed=changed, meta={"packages": packages, "state": state}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zfs.py b/plugins/modules/zfs.py index 38aea68a6bf..8c9cef8497c 100644 --- a/plugins/modules/zfs.py +++ b/plugins/modules/zfs.py @@ -95,26 +95,25 @@ class Zfs: - def __init__(self, module, name, extra_zfs_properties): self.module = module self.name = name self.extra_zfs_properties = extra_zfs_properties self.changed = False - self.zfs_cmd = module.get_bin_path('zfs', True) - self.zpool_cmd = module.get_bin_path('zpool', True) - self.pool = name.split('/')[0].split('@')[0] - self.is_solaris = os.uname()[0] == 'SunOS' + self.zfs_cmd = module.get_bin_path("zfs", True) + self.zpool_cmd = module.get_bin_path("zpool", True) + self.pool = name.split("/")[0].split("@")[0] + self.is_solaris = os.uname()[0] == "SunOS" self.is_openzfs = self.check_openzfs() self.enhanced_sharing = self.check_enhanced_sharing() def check_openzfs(self): cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) + cmd.extend(["get", "version"]) cmd.append(self.pool) (rc, out, err) = self.module.run_command(cmd, check_rc=True) version = out.splitlines()[-1].split()[2] - if version == '-': + if version == "-": return True if int(version) == 5000: return True @@ -123,7 +122,7 @@ def check_openzfs(self): def check_enhanced_sharing(self): if self.is_solaris and not self.is_openzfs: cmd = [self.zpool_cmd] - cmd.extend(['get', 'version']) + cmd.extend(["get", "version"]) cmd.append(self.pool) (rc, out, err) = self.module.run_command(cmd, check_rc=True) version = out.splitlines()[-1].split()[2] @@ -132,7 +131,7 @@ def check_enhanced_sharing(self): return False def exists(self): - cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] + cmd = [self.zfs_cmd, "list", "-t", "all", self.name] rc, dummy, dummy = self.module.run_command(cmd) return rc == 0 @@ -141,30 +140,30 @@ def create(self): self.changed = True return extra_zfs_properties = self.extra_zfs_properties - origin = self.module.params.get('origin') + origin = self.module.params.get("origin") cmd = [self.zfs_cmd] if "@" in self.name: - action = 'snapshot' + action = "snapshot" elif origin: - action = 'clone' + action = "clone" else: - action = 'create' + action = "create" cmd.append(action) - if action in ['create', 'clone']: - cmd += ['-p'] + if action in ["create", "clone"]: + cmd += ["-p"] if extra_zfs_properties: for prop, value in extra_zfs_properties.items(): - if prop == 'volsize': - cmd += ['-V', value] - elif prop == 'volblocksize': - cmd += ['-b', value] + if prop == "volsize": + cmd += ["-V", value] + elif prop == "volblocksize": + cmd += ["-b", value] else: - cmd += ['-o', f'{prop}={value}'] - if origin and action == 'clone': + cmd += ["-o", f"{prop}={value}"] + if origin and action == "clone": cmd.append(origin) cmd.append(self.name) self.module.run_command(cmd, check_rc=True) @@ -174,7 +173,7 @@ def destroy(self): if self.module.check_mode: self.changed = True return - cmd = [self.zfs_cmd, 'destroy', '-R', self.name] + cmd = [self.zfs_cmd, "destroy", "-R", self.name] self.module.run_command(cmd, check_rc=True) self.changed = True @@ -182,18 +181,18 @@ def set_property(self, prop, value): if self.module.check_mode: self.changed = True return - cmd = [self.zfs_cmd, 'set', f"{prop}={value!s}", self.name] + cmd = [self.zfs_cmd, "set", f"{prop}={value!s}", self.name] self.module.run_command(cmd, check_rc=True) def set_properties_if_changed(self): - diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} + diff = {"before": {"extra_zfs_properties": {}}, "after": {"extra_zfs_properties": {}}} current_properties = self.list_properties() for prop, value in self.extra_zfs_properties.items(): current_value = self.get_property(prop, current_properties) if current_value != value: self.set_property(prop, value) - diff['before']['extra_zfs_properties'][prop] = current_value - diff['after']['extra_zfs_properties'][prop] = value + diff["before"]["extra_zfs_properties"][prop] = current_value + diff["after"]["extra_zfs_properties"][prop] = value if self.module.check_mode: return diff updated_properties = self.list_properties() @@ -203,38 +202,38 @@ def set_properties_if_changed(self): self.module.fail_json(msg=f"zfsprop was not present after being successfully set: {prop}") if self.get_property(prop, current_properties) != value: self.changed = True - if prop in diff['after']['extra_zfs_properties']: - diff['after']['extra_zfs_properties'][prop] = value + if prop in diff["after"]["extra_zfs_properties"]: + diff["after"]["extra_zfs_properties"][prop] = value return diff def list_properties(self): - cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,source"] + cmd = [self.zfs_cmd, "get", "-H", "-p", "-o", "property,source"] if self.enhanced_sharing: - cmd += ['-e'] - cmd += ['all', self.name] + cmd += ["-e"] + cmd += ["all", self.name] rc, out, err = self.module.run_command(cmd) properties = [] for line in out.splitlines(): - prop, source = line.split('\t') + prop, source = line.split("\t") # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once - if source in ('local', 'received', '-'): + if source in ("local", "received", "-"): properties.append(prop) return properties def get_property(self, name, list_of_properties): # Add alias for enhanced sharing properties if self.enhanced_sharing: - if name == 'sharenfs': - name = 'share.nfs' - elif name == 'sharesmb': - name = 'share.smb' + if name == "sharenfs": + name = "share.nfs" + elif name == "sharesmb": + name = "share.smb" if name not in list_of_properties: return None - cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "value"] + cmd = [self.zfs_cmd, "get", "-H", "-p", "-o", "value"] if self.enhanced_sharing: - cmd += ['-e'] + cmd += ["-e"] cmd += [name, self.name] rc, out, err = self.module.run_command(cmd) if rc != 0: @@ -246,61 +245,60 @@ def get_property(self, name, list_of_properties): def main(): - module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - origin=dict(type='str'), - extra_zfs_properties=dict(type='dict', default={}), + name=dict(type="str", required=True), + state=dict(type="str", required=True, choices=["absent", "present"]), + origin=dict(type="str"), + extra_zfs_properties=dict(type="dict", default={}), ), supports_check_mode=True, ) - state = module.params.get('state') - name = module.params.get('name') + state = module.params.get("state") + name = module.params.get("name") - if module.params.get('origin') and '@' in name: - module.fail_json(msg='cannot specify origin when operating on a snapshot') + if module.params.get("origin") and "@" in name: + module.fail_json(msg="cannot specify origin when operating on a snapshot") # Reverse the boolification of zfs properties - for prop, value in module.params['extra_zfs_properties'].items(): + for prop, value in module.params["extra_zfs_properties"].items(): if isinstance(value, bool): if value is True: - module.params['extra_zfs_properties'][prop] = 'on' + module.params["extra_zfs_properties"][prop] = "on" else: - module.params['extra_zfs_properties'][prop] = 'off' + module.params["extra_zfs_properties"][prop] = "off" else: - module.params['extra_zfs_properties'][prop] = value + module.params["extra_zfs_properties"][prop] = value result = dict( name=name, state=state, ) - zfs = Zfs(module, name, module.params['extra_zfs_properties']) + zfs = Zfs(module, name, module.params["extra_zfs_properties"]) - if state == 'present': + if state == "present": if zfs.exists(): - result['diff'] = zfs.set_properties_if_changed() + result["diff"] = zfs.set_properties_if_changed() else: zfs.create() - result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} + result["diff"] = {"before": {"state": "absent"}, "after": {"state": state}} - elif state == 'absent': + elif state == "absent": if zfs.exists(): zfs.destroy() - result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} + result["diff"] = {"before": {"state": "present"}, "after": {"state": state}} else: - result['diff'] = {} + result["diff"] = {} - result['diff']['before_header'] = name - result['diff']['after_header'] = name + result["diff"]["before_header"] = name + result["diff"]["after_header"] = name result.update(zfs.extra_zfs_properties) - result['changed'] = zfs.changed + result["changed"] = zfs.changed module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zfs_delegate_admin.py b/plugins/modules/zfs_delegate_admin.py index c2de96f21f5..231f00e88ed 100644 --- a/plugins/modules/zfs_delegate_admin.py +++ b/plugins/modules/zfs_delegate_admin.py @@ -119,85 +119,82 @@ class ZfsDelegateAdmin: def __init__(self, module): self.module = module - self.name = module.params.get('name') - self.state = module.params.get('state') - self.users = module.params.get('users') - self.groups = module.params.get('groups') - self.everyone = module.params.get('everyone') - self.perms = module.params.get('permissions') + self.name = module.params.get("name") + self.state = module.params.get("state") + self.users = module.params.get("users") + self.groups = module.params.get("groups") + self.everyone = module.params.get("everyone") + self.perms = module.params.get("permissions") self.scope = None self.changed = False self.initial_perms = None - self.subcommand = 'allow' + self.subcommand = "allow" self.recursive_opt = [] self.run_method = self.update self.setup(module) def setup(self, module): - """ Validate params and set up for run. - """ - if self.state == 'absent': - self.subcommand = 'unallow' - if module.params.get('recursive'): - self.recursive_opt = ['-r'] + """Validate params and set up for run.""" + if self.state == "absent": + self.subcommand = "unallow" + if module.params.get("recursive"): + self.recursive_opt = ["-r"] - local = module.params.get('local') - descendents = module.params.get('descendents') + local = module.params.get("local") + descendents = module.params.get("descendents") if (local and descendents) or (not local and not descendents): - self.scope = 'ld' + self.scope = "ld" elif local: - self.scope = 'l' + self.scope = "l" elif descendents: - self.scope = 'd' + self.scope = "d" else: - self.module.fail_json(msg='Impossible value for local and descendents') + self.module.fail_json(msg="Impossible value for local and descendents") if not (self.users or self.groups or self.everyone): - if self.state == 'present': - self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set') - elif self.state == 'absent': + if self.state == "present": + self.module.fail_json(msg="One of `users`, `groups`, or `everyone` must be set") + elif self.state == "absent": self.run_method = self.clear # ansible ensures the else cannot happen here - self.zfs_path = module.get_bin_path('zfs', True) + self.zfs_path = module.get_bin_path("zfs", True) @property def current_perms(self): - """ Parse the output of `zfs allow ` to retrieve current permissions. - """ - out = self.run_zfs_raw(subcommand='allow') + """Parse the output of `zfs allow ` to retrieve current permissions.""" + out = self.run_zfs_raw(subcommand="allow") perms = { - 'l': {'u': {}, 'g': {}, 'e': []}, - 'd': {'u': {}, 'g': {}, 'e': []}, - 'ld': {'u': {}, 'g': {}, 'e': []}, + "l": {"u": {}, "g": {}, "e": []}, + "d": {"u": {}, "g": {}, "e": []}, + "ld": {"u": {}, "g": {}, "e": []}, } linemap = { - 'Local permissions:': 'l', - 'Descendent permissions:': 'd', - 'Local+Descendent permissions:': 'ld', + "Local permissions:": "l", + "Descendent permissions:": "d", + "Local+Descendent permissions:": "ld", } scope = None for line in out.splitlines(): scope = linemap.get(line, scope) if not scope: continue - if ' (unknown: ' in line: - line = line.replace('(unknown: ', '', 1) - line = line.replace(')', '', 1) + if " (unknown: " in line: + line = line.replace("(unknown: ", "", 1) + line = line.replace(")", "", 1) try: - if line.startswith('\tuser ') or line.startswith('\tgroup '): + if line.startswith("\tuser ") or line.startswith("\tgroup "): ent_type, ent, cur_perms = line.split() - perms[scope][ent_type[0]][ent] = cur_perms.split(',') - elif line.startswith('\teveryone '): - perms[scope]['e'] = line.split()[1].split(',') + perms[scope][ent_type[0]][ent] = cur_perms.split(",") + elif line.startswith("\teveryone "): + perms[scope]["e"] = line.split()[1].split(",") except ValueError: self.module.fail_json(msg=f"Cannot parse user/group permission output by `zfs allow`: '{line}'") return perms def run_zfs_raw(self, subcommand=None, args=None): - """ Run a raw zfs command, fail on error. - """ + """Run a raw zfs command, fail on error.""" cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name] rc, out, err = self.module.run_command(cmd) if rc: @@ -205,70 +202,66 @@ def run_zfs_raw(self, subcommand=None, args=None): return out def run_zfs(self, args): - """ Run zfs allow/unallow with appropriate options as per module arguments. - """ + """Run zfs allow/unallow with appropriate options as per module arguments.""" args = self.recursive_opt + [f"-{self.scope}"] + args if self.perms: - args.append(','.join(self.perms)) + args.append(",".join(self.perms)) return self.run_zfs_raw(args=args) def clear(self): - """ Called by run() to clear all permissions. - """ + """Called by run() to clear all permissions.""" changed = False - stdout = '' - for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')): + stdout = "" + for scope, ent_type in product(("ld", "l", "d"), ("u", "g")): for ent in self.initial_perms[scope][ent_type].keys(): - stdout += self.run_zfs([f'-{ent_type}', ent]) + stdout += self.run_zfs([f"-{ent_type}", ent]) changed = True - for scope in ('ld', 'l', 'd'): - if self.initial_perms[scope]['e']: - stdout += self.run_zfs(['-e']) + for scope in ("ld", "l", "d"): + if self.initial_perms[scope]["e"]: + stdout += self.run_zfs(["-e"]) changed = True return (changed, stdout) def update(self): - """ Update permissions as per module arguments. - """ - stdout = '' - for ent_type, entities in (('u', self.users), ('g', self.groups)): + """Update permissions as per module arguments.""" + stdout = "" + for ent_type, entities in (("u", self.users), ("g", self.groups)): if entities: - stdout += self.run_zfs([f'-{ent_type}', ','.join(entities)]) + stdout += self.run_zfs([f"-{ent_type}", ",".join(entities)]) if self.everyone: - stdout += self.run_zfs(['-e']) + stdout += self.run_zfs(["-e"]) return (self.initial_perms != self.current_perms, stdout) def run(self): - """ Run an operation, return results for Ansible. - """ - exit_args = {'state': self.state} + """Run an operation, return results for Ansible.""" + exit_args = {"state": self.state} self.initial_perms = self.current_perms - exit_args['changed'], stdout = self.run_method() - if exit_args['changed']: - exit_args['msg'] = 'ZFS delegated admin permissions updated' - exit_args['stdout'] = stdout + exit_args["changed"], stdout = self.run_method() + if exit_args["changed"]: + exit_args["msg"] = "ZFS delegated admin permissions updated" + exit_args["stdout"] = stdout self.module.exit_json(**exit_args) def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - users=dict(type='list', elements='str'), - groups=dict(type='list', elements='str'), - everyone=dict(type='bool', default=False), - permissions=dict(type='list', elements='str'), - local=dict(type='bool'), - descendents=dict(type='bool'), - recursive=dict(type='bool', default=False), + name=dict(type="str", required=True), + state=dict(type="str", default="present", choices=["absent", "present"]), + users=dict(type="list", elements="str"), + groups=dict(type="list", elements="str"), + everyone=dict(type="bool", default=False), + permissions=dict(type="list", elements="str"), + local=dict(type="bool"), + descendents=dict(type="bool"), + recursive=dict(type="bool", default=False), ), supports_check_mode=False, - required_if=[('state', 'present', ['permissions'])], + required_if=[("state", "present", ["permissions"])], ) zfs_delegate_admin = ZfsDelegateAdmin(module) zfs_delegate_admin.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py index cc37ecbfb96..f6f4ecf8b4c 100644 --- a/plugins/modules/zfs_facts.py +++ b/plugins/modules/zfs_facts.py @@ -155,99 +155,98 @@ from ansible.module_utils.basic import AnsibleModule -SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] +SUPPORTED_TYPES = ["all", "filesystem", "volume", "snapshot", "bookmark"] class ZFSFacts: def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.recurse = module.params['recurse'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self.type = module.params['type'] - self.depth = module.params['depth'] + self.name = module.params["name"] + self.recurse = module.params["recurse"] + self.parsable = module.params["parsable"] + self.properties = module.params["properties"] + self.type = module.params["type"] + self.depth = module.params["depth"] self._datasets = defaultdict(dict) self.facts = [] def dataset_exists(self): - cmd = [self.module.get_bin_path('zfs'), 'list', self.name] + cmd = [self.module.get_bin_path("zfs"), "list", self.name] (rc, out, err) = self.module.run_command(cmd) return rc == 0 def get_facts(self): - cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] + cmd = [self.module.get_bin_path("zfs"), "get", "-H"] if self.parsable: - cmd.append('-p') + cmd.append("-p") if self.recurse: - cmd.append('-r') + cmd.append("-r") if self.depth != 0: - cmd.append('-d') - cmd.append(f'{self.depth}') + cmd.append("-d") + cmd.append(f"{self.depth}") if self.type: - cmd.append('-t') - cmd.append(','.join(self.type)) - cmd.extend(['-o', 'name,property,value', self.properties, self.name]) + cmd.append("-t") + cmd.append(",".join(self.type)) + cmd.extend(["-o", "name,property,value", self.properties, self.name]) (rc, out, err) = self.module.run_command(cmd) if rc != 0: - self.module.fail_json(msg=f'Error while trying to get facts about ZFS dataset: {self.name}', - stderr=err, - rc=rc) + self.module.fail_json( + msg=f"Error while trying to get facts about ZFS dataset: {self.name}", stderr=err, rc=rc + ) for line in out.splitlines(): - dataset, property, value = line.split('\t') + dataset, property, value = line.split("\t") self._datasets[dataset].update({property: value}) for k, v in self._datasets.items(): - v.update({'name': k}) + v.update({"name": k}) self.facts.append(v) - return {'ansible_zfs_datasets': self.facts} + return {"ansible_zfs_datasets": self.facts} def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['ds', 'dataset'], type='str'), - recurse=dict(default=False, type='bool'), - parsable=dict(default=False, type='bool'), - properties=dict(default='all', type='str'), - type=dict(default='all', type='list', elements='str', choices=SUPPORTED_TYPES), - depth=dict(default=0, type='int') + name=dict(required=True, aliases=["ds", "dataset"], type="str"), + recurse=dict(default=False, type="bool"), + parsable=dict(default=False, type="bool"), + properties=dict(default="all", type="str"), + type=dict(default="all", type="list", elements="str", choices=SUPPORTED_TYPES), + depth=dict(default=0, type="int"), ), - supports_check_mode=True + supports_check_mode=True, ) - if 'all' in module.params['type'] and len(module.params['type']) > 1: + if "all" in module.params["type"] and len(module.params["type"]) > 1: module.fail_json(msg="Value 'all' for parameter 'type' is mutually exclusive with other values") zfs_facts = ZFSFacts(module) result = {} - result['changed'] = False - result['name'] = zfs_facts.name + result["changed"] = False + result["name"] = zfs_facts.name if zfs_facts.parsable: - result['parsable'] = zfs_facts.parsable + result["parsable"] = zfs_facts.parsable if zfs_facts.recurse: - result['recurse'] = zfs_facts.recurse + result["recurse"] = zfs_facts.recurse if not zfs_facts.dataset_exists(): - module.fail_json(msg=f'ZFS dataset {zfs_facts.name} does not exist!') + module.fail_json(msg=f"ZFS dataset {zfs_facts.name} does not exist!") - result['ansible_facts'] = zfs_facts.get_facts() + result["ansible_facts"] = zfs_facts.get_facts() module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/znode.py b/plugins/modules/znode.py index b87c94dfd28..c0cef494eea 100644 --- a/plugins/modules/znode.py +++ b/plugins/modules/znode.py @@ -136,6 +136,7 @@ try: from kazoo.client import KazooClient from kazoo.handlers.threading import KazooTimeoutError + KAZOO_INSTALLED = True except ImportError: KAZOO_IMP_ERR = traceback.format_exc() @@ -148,46 +149,39 @@ def main(): module = AnsibleModule( argument_spec=dict( - hosts=dict(required=True, type='str'), - name=dict(required=True, type='str'), - value=dict(type='str'), - op=dict(choices=['get', 'wait', 'list']), - state=dict(choices=['present', 'absent']), - timeout=dict(default=300, type='int'), - recursive=dict(default=False, type='bool'), - auth_scheme=dict(default='digest', choices=['digest', 'sasl']), - auth_credential=dict(type='str', no_log=True), - use_tls=dict(default=False, type='bool'), + hosts=dict(required=True, type="str"), + name=dict(required=True, type="str"), + value=dict(type="str"), + op=dict(choices=["get", "wait", "list"]), + state=dict(choices=["present", "absent"]), + timeout=dict(default=300, type="int"), + recursive=dict(default=False, type="bool"), + auth_scheme=dict(default="digest", choices=["digest", "sasl"]), + auth_credential=dict(type="str", no_log=True), + use_tls=dict(default=False, type="bool"), ), - supports_check_mode=False + supports_check_mode=False, ) if not KAZOO_INSTALLED: - module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR) + module.fail_json(msg=missing_required_lib("kazoo >= 2.1"), exception=KAZOO_IMP_ERR) check = check_params(module.params) - if not check['success']: - module.fail_json(msg=check['msg']) + if not check["success"]: + module.fail_json(msg=check["msg"]) zoo = KazooCommandProxy(module) try: zoo.start() except KazooTimeoutError: - module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.') + module.fail_json(msg="The connection to the ZooKeeper ensemble timed out.") command_dict = { - 'op': { - 'get': zoo.get, - 'list': zoo.list, - 'wait': zoo.wait - }, - 'state': { - 'present': zoo.present, - 'absent': zoo.absent - } + "op": {"get": zoo.get, "list": zoo.list, "wait": zoo.wait}, + "state": {"present": zoo.present, "absent": zoo.absent}, } - command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state' + command_type = "op" if "op" in module.params and module.params["op"] is not None else "state" method = module.params[command_type] result, result_dict = command_dict[command_type][method]() zoo.shutdown() @@ -199,36 +193,40 @@ def main(): def check_params(params): - if not params['state'] and not params['op']: - return {'success': False, 'msg': 'Please define an operation (op) or a state.'} + if not params["state"] and not params["op"]: + return {"success": False, "msg": "Please define an operation (op) or a state."} - if params['state'] and params['op']: - return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'} + if params["state"] and params["op"]: + return {"success": False, "msg": "Please choose an operation (op) or a state, but not both."} - return {'success': True} + return {"success": True} -class KazooCommandProxy(): +class KazooCommandProxy: def __init__(self, module): self.module = module - self.zk = KazooClient(module.params['hosts'], use_ssl=module.params['use_tls']) + self.zk = KazooClient(module.params["hosts"], use_ssl=module.params["use_tls"]) def absent(self): - return self._absent(self.module.params['name']) + return self._absent(self.module.params["name"]) def exists(self, znode): return self.zk.exists(znode) def list(self): - children = self.zk.get_children(self.module.params['name']) - return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', - 'znode': self.module.params['name']} + children = self.zk.get_children(self.module.params["name"]) + return True, { + "count": len(children), + "items": children, + "msg": "Retrieved znodes in path.", + "znode": self.module.params["name"], + } def present(self): - return self._present(self.module.params['name'], self.module.params['value']) + return self._present(self.module.params["name"], self.module.params["value"]) def get(self): - return self._get(self.module.params['name']) + return self._get(self.module.params["name"]) def shutdown(self): self.zk.stop() @@ -236,32 +234,31 @@ def shutdown(self): def start(self): self.zk.start() - if self.module.params['auth_credential']: - self.zk.add_auth(self.module.params['auth_scheme'], self.module.params['auth_credential']) + if self.module.params["auth_credential"]: + self.zk.add_auth(self.module.params["auth_scheme"], self.module.params["auth_credential"]) def wait(self): - return self._wait(self.module.params['name'], self.module.params['timeout']) + return self._wait(self.module.params["name"], self.module.params["timeout"]) def _absent(self, znode): if self.exists(znode): - self.zk.delete(znode, recursive=self.module.params['recursive']) - return True, {'changed': True, 'msg': 'The znode was deleted.'} + self.zk.delete(znode, recursive=self.module.params["recursive"]) + return True, {"changed": True, "msg": "The znode was deleted."} else: - return True, {'changed': False, 'msg': 'The znode does not exist.'} + return True, {"changed": False, "msg": "The znode does not exist."} def _get(self, path): if self.exists(path): value, zstat = self.zk.get(path) stat_dict = {} for i in dir(zstat): - if not i.startswith('_'): + if not i.startswith("_"): attr = getattr(zstat, i) if isinstance(attr, (int, str)): stat_dict[i] = attr - result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, - 'stat': stat_dict} + result = True, {"msg": "The node was retrieved.", "znode": path, "value": value, "stat": stat_dict} else: - result = False, {'msg': 'The requested node does not exist.'} + result = False, {"msg": "The requested node does not exist."} return result @@ -270,27 +267,32 @@ def _present(self, path, value): (current_value, zstat) = self.zk.get(path) if value != current_value: self.zk.set(path, to_bytes(value)) - return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, - 'value': value} + return True, {"changed": True, "msg": "Updated the znode value.", "znode": path, "value": value} else: - return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} + return True, {"changed": False, "msg": "No changes were necessary.", "znode": path, "value": value} else: self.zk.create(path, to_bytes(value), makepath=True) - return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} + return True, {"changed": True, "msg": "Created a new znode.", "znode": path, "value": value} def _wait(self, path, timeout, interval=5): lim = time.time() + timeout while time.time() < lim: if self.exists(path): - return True, {'msg': 'The node appeared before the configured timeout.', - 'znode': path, 'timeout': timeout} + return True, { + "msg": "The node appeared before the configured timeout.", + "znode": path, + "timeout": timeout, + } else: time.sleep(interval) - return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, - 'znode': path} + return False, { + "msg": "The node did not appear before the operation timed out.", + "timeout": timeout, + "znode": path, + } -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zpool.py b/plugins/modules/zpool.py index ea308561e9c..f7f8f0994fb 100644 --- a/plugins/modules/zpool.py +++ b/plugins/modules/zpool.py @@ -141,8 +141,19 @@ class Zpool: - - def __init__(self, module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs): + def __init__( + self, + module, + name, + disable_new_features, + force, + pool_properties, + filesystem_properties, + mountpoint, + altroot, + temp_name, + vdevs, + ): self.module = module self.name = name self.disable_new_features = disable_new_features @@ -153,8 +164,8 @@ def __init__(self, module, name, disable_new_features, force, pool_properties, f self.altroot = altroot self.temp_name = temp_name self.vdevs = vdevs - self.zpool_cmd = module.get_bin_path('zpool', required=True) - self.zfs_cmd = module.get_bin_path('zfs', required=True) + self.zpool_cmd = module.get_bin_path("zpool", required=True) + self.zfs_cmd = module.get_bin_path("zfs", required=True) self.changed = False self.zpool_runner = CmdRunner( @@ -162,39 +173,39 @@ def __init__(self, module, name, disable_new_features, force, pool_properties, f command=self.zpool_cmd, arg_formats=dict( subcommand=cmd_runner_fmt.as_list(), - disable_new_features=cmd_runner_fmt.as_bool('-d'), - force=cmd_runner_fmt.as_bool('-f'), - dry_run=cmd_runner_fmt.as_bool('-n'), + disable_new_features=cmd_runner_fmt.as_bool("-d"), + force=cmd_runner_fmt.as_bool("-f"), + dry_run=cmd_runner_fmt.as_bool("-n"), pool_properties=cmd_runner_fmt.as_func( - lambda props: sum([['-o', f'{prop}={value}'] for prop, value in (props or {}).items()], []) + lambda props: sum([["-o", f"{prop}={value}"] for prop, value in (props or {}).items()], []) ), filesystem_properties=cmd_runner_fmt.as_func( - lambda props: sum([['-O', f'{prop}={value}'] for prop, value in (props or {}).items()], []) + lambda props: sum([["-O", f"{prop}={value}"] for prop, value in (props or {}).items()], []) ), - mountpoint=cmd_runner_fmt.as_opt_val('-m'), - altroot=cmd_runner_fmt.as_opt_val('-R'), - temp_name=cmd_runner_fmt.as_opt_val('-t'), + mountpoint=cmd_runner_fmt.as_opt_val("-m"), + altroot=cmd_runner_fmt.as_opt_val("-R"), + temp_name=cmd_runner_fmt.as_opt_val("-t"), name=cmd_runner_fmt.as_list(), vdevs=cmd_runner_fmt.as_func( lambda vdevs: sum( [ - ([vdev['role']] if vdev.get('role') else []) - + ([] if vdev.get('type', 'stripe') == 'stripe' else [vdev['type']]) - + vdev.get('disks', []) + ([vdev["role"]] if vdev.get("role") else []) + + ([] if vdev.get("type", "stripe") == "stripe" else [vdev["type"]]) + + vdev.get("disks", []) for vdev in (vdevs or []) ], [], ) ), vdev_name=cmd_runner_fmt.as_list(), - scripted=cmd_runner_fmt.as_bool('-H'), - parsable=cmd_runner_fmt.as_bool('-p'), - columns=cmd_runner_fmt.as_opt_val('-o'), + scripted=cmd_runner_fmt.as_bool("-H"), + parsable=cmd_runner_fmt.as_bool("-p"), + columns=cmd_runner_fmt.as_opt_val("-o"), properties=cmd_runner_fmt.as_list(), assignment=cmd_runner_fmt.as_list(), - full_paths=cmd_runner_fmt.as_bool('-P'), - real_paths=cmd_runner_fmt.as_bool('-L'), - ) + full_paths=cmd_runner_fmt.as_bool("-P"), + real_paths=cmd_runner_fmt.as_bool("-L"), + ), ) self.zfs_runner = CmdRunner( @@ -202,49 +213,49 @@ def __init__(self, module, name, disable_new_features, force, pool_properties, f command=self.zfs_cmd, arg_formats=dict( subcommand=cmd_runner_fmt.as_list(), - scripted=cmd_runner_fmt.as_bool('-H'), - columns=cmd_runner_fmt.as_opt_val('-o'), + scripted=cmd_runner_fmt.as_bool("-H"), + columns=cmd_runner_fmt.as_opt_val("-o"), properties=cmd_runner_fmt.as_list(), assignment=cmd_runner_fmt.as_list(), - name=cmd_runner_fmt.as_list() - ) + name=cmd_runner_fmt.as_list(), + ), ) def exists(self): - with self.zpool_runner('subcommand name') as ctx: - rc, stdout, stderr = ctx.run(subcommand='list', name=self.name) + with self.zpool_runner("subcommand name") as ctx: + rc, stdout, stderr = ctx.run(subcommand="list", name=self.name) return rc == 0 def create(self): with self.zpool_runner( - 'subcommand disable_new_features force dry_run pool_properties filesystem_properties mountpoint altroot temp_name name vdevs', - check_rc=True + "subcommand disable_new_features force dry_run pool_properties filesystem_properties mountpoint altroot temp_name name vdevs", + check_rc=True, ) as ctx: - rc, stdout, stderr = ctx.run(subcommand='create', dry_run=self.module.check_mode) + rc, stdout, stderr = ctx.run(subcommand="create", dry_run=self.module.check_mode) self.changed = True if self.module.check_mode: - return {'prepared': stdout} + return {"prepared": stdout} def destroy(self): if self.module.check_mode: self.changed = True return - with self.zpool_runner('subcommand name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run(subcommand='destroy') + with self.zpool_runner("subcommand name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="destroy") self.changed = True def list_pool_properties(self): - with self.zpool_runner('subcommand scripted columns properties name', check_rc=True) as ctx: + with self.zpool_runner("subcommand scripted columns properties name", check_rc=True) as ctx: rc, stdout, stderr = ctx.run( - subcommand='get', + subcommand="get", scripted=True, - columns='property,value', - properties='all', + columns="property,value", + properties="all", ) props = {} for line in stdout.splitlines(): - prop, value = line.split('\t', 1) + prop, value = line.split("\t", 1) props[prop] = value return props @@ -256,24 +267,24 @@ def set_pool_properties_if_changed(self): if current.get(prop) != str(value): before[prop] = current.get(prop) if not self.module.check_mode: - with self.zpool_runner('subcommand assignment name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run(subcommand='set', assignment=f'{prop}={value}') + with self.zpool_runner("subcommand assignment name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="set", assignment=f"{prop}={value}") after[prop] = str(value) self.changed = True - return {'before': {'pool_properties': before}, 'after': {'pool_properties': after}} + return {"before": {"pool_properties": before}, "after": {"pool_properties": after}} def list_filesystem_properties(self): - with self.zfs_runner('subcommand scripted columns properties name', check_rc=True) as ctx: + with self.zfs_runner("subcommand scripted columns properties name", check_rc=True) as ctx: rc, stdout, stderr = ctx.run( - subcommand='get', + subcommand="get", scripted=True, - columns='property,value', - properties='all', + columns="property,value", + properties="all", ) props = {} for line in stdout.splitlines(): - prop, value = line.split('\t', 1) + prop, value = line.split("\t", 1) props[prop] = value return props @@ -285,36 +296,36 @@ def set_filesystem_properties_if_changed(self): if current.get(prop) != str(value): before[prop] = current.get(prop) if not self.module.check_mode: - with self.zfs_runner('subcommand assignment name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run(subcommand='set', assignment=f'{prop}={value}') + with self.zfs_runner("subcommand assignment name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="set", assignment=f"{prop}={value}") after[prop] = str(value) self.changed = True - return {'before': {'filesystem_properties': before}, 'after': {'filesystem_properties': after}} + return {"before": {"filesystem_properties": before}, "after": {"filesystem_properties": after}} def base_device(self, device): - if not device.startswith('/dev/'): + if not device.startswith("/dev/"): return device # loop devices - match = re.match(r'^(/dev/loop\d+)$', device) + match = re.match(r"^(/dev/loop\d+)$", device) if match: return match.group(1) # nvme drives - match = re.match(r'^(.*?)(p\d+)$', device) + match = re.match(r"^(.*?)(p\d+)$", device) if match: return match.group(1) # sata/scsi drives - match = re.match(r'^(/dev/(?:sd|vd)[a-z])\d+$', device) + match = re.match(r"^(/dev/(?:sd|vd)[a-z])\d+$", device) if match: return match.group(1) return device def get_current_layout(self): - with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True) + with self.zpool_runner("subcommand full_paths real_paths name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="status", full_paths=True, real_paths=True) vdevs = [] current = None @@ -322,18 +333,18 @@ def get_current_layout(self): def flush_current(current): if current: - if current.get('role') is None: - current.pop('role', None) + if current.get("role") is None: + current.pop("role", None) vdevs.append(current) return None for line in stdout.splitlines(): if not in_config: - if line.strip().startswith('config:'): + if line.strip().startswith("config:"): in_config = True continue - if not line.strip() or line.strip().startswith('NAME'): + if not line.strip() or line.strip().startswith("NAME"): continue partitions = line.split() @@ -342,109 +353,106 @@ def flush_current(current): if device == self.name: continue - if device in ('logs', 'cache', 'spares'): + if device in ("logs", "cache", "spares"): current = flush_current(current) - role = 'spare' if device == 'spares' else device.rstrip('s') - current = {'role': role, 'type': None, 'disks': []} + role = "spare" if device == "spares" else device.rstrip("s") + current = {"role": role, "type": None, "disks": []} continue - match_group = re.match(r'^(mirror|raidz\d?)-\d+$', device) + match_group = re.match(r"^(mirror|raidz\d?)-\d+$", device) if match_group: - if current and current.get('type') is not None: + if current and current.get("type") is not None: current = flush_current(current) kind = match_group.group(1) - role = current.get('role') if current and current.get('type') is None else None - current = {'role': role, 'type': kind, 'disks': []} + role = current.get("role") if current and current.get("type") is None else None + current = {"role": role, "type": kind, "disks": []} continue - if device.startswith('/'): + if device.startswith("/"): base_device = self.base_device(device) if current: - if current.get('type') is None: - entry = { - 'type': 'stripe', - 'disks': [base_device] - } - if current.get('role'): - entry['role'] = current['role'] + if current.get("type") is None: + entry = {"type": "stripe", "disks": [base_device]} + if current.get("role"): + entry["role"] = current["role"] vdevs.append(entry) current = None else: - current['disks'].append(base_device) + current["disks"].append(base_device) else: - vdevs.append({'type': 'stripe', 'disks': [base_device]}) + vdevs.append({"type": "stripe", "disks": [base_device]}) continue - if current and current.get('type') is not None: + if current and current.get("type") is not None: current = flush_current(current) return vdevs def normalize_vdevs(self, vdevs): - alias = {'raidz': 'raidz1'} + alias = {"raidz": "raidz1"} normalized = [] for vdev in vdevs: - normalized_type = alias.get(vdev.get('type', 'stripe'), vdev.get('type', 'stripe')) + normalized_type = alias.get(vdev.get("type", "stripe"), vdev.get("type", "stripe")) entry = { - 'type': normalized_type, - 'disks': sorted(vdev['disks']), + "type": normalized_type, + "disks": sorted(vdev["disks"]), } - role = vdev.get('role') + role = vdev.get("role") if role is not None: - entry['role'] = role + entry["role"] = role normalized.append(entry) - return sorted(normalized, key=lambda x: (x.get('role', ''), x['type'], x['disks'])) + return sorted(normalized, key=lambda x: (x.get("role", ""), x["type"], x["disks"])) def diff_layout(self): current = self.normalize_vdevs(self.get_current_layout()) desired = self.normalize_vdevs(self.vdevs) - before = {'vdevs': current} - after = {'vdevs': desired} + before = {"vdevs": current} + after = {"vdevs": desired} if current != desired: self.changed = True - return {'before': before, 'after': after} + return {"before": before, "after": after} def add_vdevs(self): - invalid_properties = [k for k in self.pool_properties if k != 'ashift'] + invalid_properties = [k for k in self.pool_properties if k != "ashift"] if invalid_properties: self.module.warn(f"zpool add only supports 'ashift', ignoring: {invalid_properties}") diff = self.diff_layout() - before_vdevs = diff['before']['vdevs'] - after_vdevs = diff['after']['vdevs'] + before_vdevs = diff["before"]["vdevs"] + after_vdevs = diff["after"]["vdevs"] to_add = [vdev for vdev in after_vdevs if vdev not in before_vdevs] if not to_add: return {} - with self.zpool_runner('subcommand force dry_run pool_properties name vdevs', check_rc=True) as ctx: + with self.zpool_runner("subcommand force dry_run pool_properties name vdevs", check_rc=True) as ctx: rc, stdout, stderr = ctx.run( - subcommand='add', + subcommand="add", dry_run=self.module.check_mode, - pool_properties={'ashift': self.pool_properties['ashift']} if 'ashift' in self.pool_properties else {}, + pool_properties={"ashift": self.pool_properties["ashift"]} if "ashift" in self.pool_properties else {}, vdevs=to_add, ) self.changed = True if self.module.check_mode: - return {'prepared': stdout} + return {"prepared": stdout} def list_vdevs_with_names(self): - with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True) + with self.zpool_runner("subcommand full_paths real_paths name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="status", full_paths=True, real_paths=True) in_cfg = False saw_pool = False vdevs = [] current = None for line in stdout.splitlines(): if not in_cfg: - if line.strip().startswith('config:'): + if line.strip().startswith("config:"): in_cfg = True continue - if not line.strip() or line.strip().startswith('NAME'): + if not line.strip() or line.strip().startswith("NAME"): continue partitions = line.strip().split() device = partitions[0] @@ -452,97 +460,100 @@ def list_vdevs_with_names(self): if device == self.name: saw_pool = True continue - if re.match(r'^(mirror|raidz\d?)\-\d+$', device) or device in ('cache', 'logs', 'spares'): + if re.match(r"^(mirror|raidz\d?)\-\d+$", device) or device in ("cache", "logs", "spares"): if current: vdevs.append(current) - vdev_type = ('stripe' if device in ('cache', 'logs', 'spares') else ('mirror' if device.startswith('mirror') else 'raidz')) - current = {'name': device, 'type': vdev_type, 'disks': []} + vdev_type = ( + "stripe" + if device in ("cache", "logs", "spares") + else ("mirror" if device.startswith("mirror") else "raidz") + ) + current = {"name": device, "type": vdev_type, "disks": []} continue - if device.startswith('/') and current: - current['disks'].append(self.base_device(device)) + if device.startswith("/") and current: + current["disks"].append(self.base_device(device)) continue - if device.startswith('/'): + if device.startswith("/"): base_device = self.base_device(device) - vdevs.append({'name': base_device, 'type': 'stripe', 'disks': [base_device]}) + vdevs.append({"name": base_device, "type": "stripe", "disks": [base_device]}) if current: vdevs.append(current) return vdevs def remove_vdevs(self): current = self.list_vdevs_with_names() - current_disks = {disk for vdev in current for disk in vdev['disks']} - desired_disks = {disk for vdev in self.vdevs for disk in vdev.get('disks', [])} + current_disks = {disk for vdev in current for disk in vdev["disks"]} + desired_disks = {disk for vdev in self.vdevs for disk in vdev.get("disks", [])} gone = current_disks - desired_disks - to_remove = [vdev['name'] for vdev in current if any(disk in gone for disk in vdev['disks'])] + to_remove = [vdev["name"] for vdev in current if any(disk in gone for disk in vdev["disks"])] if not to_remove: return {} - with self.zpool_runner('subcommand dry_run name vdev_name', check_rc=True) as ctx: - rc, stdout, stderr = ctx.run( - subcommand='remove', dry_run=self.module.check_mode, vdev_name=to_remove) + with self.zpool_runner("subcommand dry_run name vdev_name", check_rc=True) as ctx: + rc, stdout, stderr = ctx.run(subcommand="remove", dry_run=self.module.check_mode, vdev_name=to_remove) self.changed = True if self.module.check_mode: - return {'prepared': stdout} - before = [vdev['name'] for vdev in current] + return {"prepared": stdout} + before = [vdev["name"] for vdev in current] after = [name for name in before if name not in to_remove] - return {'before': {'vdevs': before}, 'after': {'vdevs': after}} + return {"before": {"vdevs": before}, "after": {"vdevs": after}} def main(): module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), - state=dict(type='str', choices=['present', 'absent'], default='present'), - disable_new_features=dict(type='bool', default=False), - force=dict(type='bool', default=False), - pool_properties=dict(type='dict', default={}), - filesystem_properties=dict(type='dict', default={}), - mountpoint=dict(type='str'), - altroot=dict(type='str'), - temp_name=dict(type='str'), + name=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], default="present"), + disable_new_features=dict(type="bool", default=False), + force=dict(type="bool", default=False), + pool_properties=dict(type="dict", default={}), + filesystem_properties=dict(type="dict", default={}), + mountpoint=dict(type="str"), + altroot=dict(type="str"), + temp_name=dict(type="str"), vdevs=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( role=dict( - type='str', - choices=['log', 'cache', 'spare', 'dedup', 'special'], + type="str", + choices=["log", "cache", "spare", "dedup", "special"], ), type=dict( - type='str', - choices=['stripe', 'mirror', 'raidz', 'raidz1', 'raidz2', 'raidz3'], - default='stripe', + type="str", + choices=["stripe", "mirror", "raidz", "raidz1", "raidz2", "raidz3"], + default="stripe", ), disks=dict( - type='list', - elements='path', + type="list", + elements="path", required=True, ), ), ), ), supports_check_mode=True, - required_if=[('state', 'present', ['vdevs'])] + required_if=[("state", "present", ["vdevs"])], ) - name = module.params.get('name') - state = module.params.get('state') - disable_new_features = module.params.get('disable_new_features') - force = module.params.get('force') - pool_properties = module.params.get('pool_properties') - filesystem_properties = module.params.get('filesystem_properties') - mountpoint = module.params.get('mountpoint') - altroot = module.params.get('altroot') - temp_name = module.params.get('temp_name') - vdevs = module.params.get('vdevs') - - for property_key in ('pool_properties', 'filesystem_properties'): + name = module.params.get("name") + state = module.params.get("state") + disable_new_features = module.params.get("disable_new_features") + force = module.params.get("force") + pool_properties = module.params.get("pool_properties") + filesystem_properties = module.params.get("filesystem_properties") + mountpoint = module.params.get("mountpoint") + altroot = module.params.get("altroot") + temp_name = module.params.get("temp_name") + vdevs = module.params.get("vdevs") + + for property_key in ("pool_properties", "filesystem_properties"): for key, value in list(module.params.get(property_key, {}).items()): if isinstance(value, bool): - module.params[property_key][key] = 'on' if value else 'off' + module.params[property_key][key] = "on" if value else "off" - if state != 'absent': + if state != "absent": for idx, vdev in enumerate(vdevs, start=1): - disks = vdev.get('disks') + disks = vdev.get("disks") if not isinstance(disks, list) or len(disks) == 0: module.fail_json(msg=f"vdev #{idx}: at least one disk is required (got: {disks!r})") @@ -551,9 +562,20 @@ def main(): state=state, ) - zpool = Zpool(module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs) + zpool = Zpool( + module, + name, + disable_new_features, + force, + pool_properties, + filesystem_properties, + mountpoint, + altroot, + temp_name, + vdevs, + ) - if state == 'present': + if state == "present": if zpool.exists(): vdev_layout_diff = zpool.diff_layout() @@ -565,46 +587,46 @@ def main(): before = {} after = {} for diff in (vdev_layout_diff, pool_properties_diff, filesystem_properties_diff): - before.update(diff.get('before', {})) - after.update(diff.get('after', {})) + before.update(diff.get("before", {})) + after.update(diff.get("after", {})) - result['diff'] = {'before': before, 'after': after} + result["diff"] = {"before": before, "after": after} if module.check_mode: - prepared = '' + prepared = "" for diff in (add_vdev_diff, remove_vdev_diff): - if 'prepared' in diff: - prepared += (diff['prepared'] if not prepared else f"\n{diff['prepared']}") - result['diff']['prepared'] = prepared + if "prepared" in diff: + prepared += diff["prepared"] if not prepared else f"\n{diff['prepared']}" + result["diff"]["prepared"] = prepared else: if module.check_mode: - result['diff'] = zpool.create() + result["diff"] = zpool.create() else: before_vdevs = [] desired_vdevs = zpool.normalize_vdevs(zpool.vdevs) zpool.create() - result['diff'] = { - 'before': {'state': 'absent', 'vdevs': before_vdevs}, - 'after': {'state': state, 'vdevs': desired_vdevs}, + result["diff"] = { + "before": {"state": "absent", "vdevs": before_vdevs}, + "after": {"state": state, "vdevs": desired_vdevs}, } - elif state == 'absent': + elif state == "absent": if zpool.exists(): before_vdevs = zpool.get_current_layout() zpool.destroy() - result['diff'] = { - 'before': {'state': 'present', 'vdevs': before_vdevs}, - 'after': {'state': state, 'vdevs': []}, + result["diff"] = { + "before": {"state": "present", "vdevs": before_vdevs}, + "after": {"state": state, "vdevs": []}, } else: - result['diff'] = {} + result["diff"] = {} - result['diff']['before_header'] = name - result['diff']['after_header'] = name + result["diff"]["before_header"] = name + result["diff"]["after_header"] = name - result['changed'] = zpool.changed + result["changed"] = zpool.changed module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zpool_facts.py b/plugins/modules/zpool_facts.py index 326e8c22afd..3722941919c 100644 --- a/plugins/modules/zpool_facts.py +++ b/plugins/modules/zpool_facts.py @@ -121,25 +121,24 @@ class ZPoolFacts: def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] + self.name = module.params["name"] + self.parsable = module.params["parsable"] + self.properties = module.params["properties"] self._pools = defaultdict(dict) self.facts = [] def pool_exists(self): - cmd = [self.module.get_bin_path('zpool'), 'list', self.name] + cmd = [self.module.get_bin_path("zpool"), "list", self.name] rc, dummy, dummy = self.module.run_command(cmd) return rc == 0 def get_facts(self): - cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] + cmd = [self.module.get_bin_path("zpool"), "get", "-H"] if self.parsable: - cmd.append('-p') - cmd.append('-o') - cmd.append('name,property,value') + cmd.append("-p") + cmd.append("-o") + cmd.append("name,property,value") cmd.append(self.properties) if self.name: cmd.append(self.name) @@ -147,46 +146,46 @@ def get_facts(self): rc, out, err = self.module.run_command(cmd, check_rc=True) for line in out.splitlines(): - pool, prop, value = line.split('\t') + pool, prop, value = line.split("\t") self._pools[pool].update({prop: value}) for k, v in self._pools.items(): - v.update({'name': k}) + v.update({"name": k}) self.facts.append(v) - return {'ansible_zfs_pools': self.facts} + return {"ansible_zfs_pools": self.facts} def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['pool', 'zpool'], type='str'), - parsable=dict(default=False, type='bool'), - properties=dict(default='all', type='str'), + name=dict(aliases=["pool", "zpool"], type="str"), + parsable=dict(default=False, type="bool"), + properties=dict(default="all", type="str"), ), - supports_check_mode=True + supports_check_mode=True, ) zpool_facts = ZPoolFacts(module) result = { - 'changed': False, - 'name': zpool_facts.name, + "changed": False, + "name": zpool_facts.name, } if zpool_facts.parsable: - result['parsable'] = zpool_facts.parsable + result["parsable"] = zpool_facts.parsable if zpool_facts.name is not None: if zpool_facts.pool_exists(): - result['ansible_facts'] = zpool_facts.get_facts() + result["ansible_facts"] = zpool_facts.get_facts() else: - module.fail_json(msg=f'ZFS pool {zpool_facts.name} does not exist!') + module.fail_json(msg=f"ZFS pool {zpool_facts.name} does not exist!") else: - result['ansible_facts'] = zpool_facts.get_facts() + result["ansible_facts"] = zpool_facts.get_facts() module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zypper.py b/plugins/modules/zypper.py index 31ec1d01908..b24b1507322 100644 --- a/plugins/modules/zypper.py +++ b/plugins/modules/zypper.py @@ -283,7 +283,7 @@ def __init__(self, name, prefix, version): self.name = name self.prefix = prefix self.version = version - self.shouldinstall = (prefix == '+') + self.shouldinstall = prefix == "+" def __str__(self): return self.prefix + self.name + self.version @@ -302,37 +302,37 @@ def split_name_version(name): Also allows a prefix indicating remove "-", "~" or install "+" """ - prefix = '' - if name[0] in ['-', '~', '+']: + prefix = "" + if name[0] in ["-", "~", "+"]: prefix = name[0] name = name[1:] - if prefix == '~': - prefix = '-' + if prefix == "~": + prefix = "-" - version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + version_check = re.compile("^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$") try: reres = version_check.match(name) name, version = reres.groups() if version is None: - version = '' + version = "" return prefix, name, version except Exception: - return prefix, name, '' + return prefix, name, "" def get_want_state(names, remove=False): packages = [] urls = [] for name in names: - if '://' in name or name.endswith('.rpm'): + if "://" in name or name.endswith(".rpm"): urls.append(name) else: prefix, pname, version = split_name_version(name) - if prefix not in ['-', '+']: + if prefix not in ["-", "+"]: if remove: - prefix = '-' + prefix = "-" else: - prefix = '+' + prefix = "+" packages.append(Package(pname, prefix, version)) return packages, urls @@ -340,8 +340,8 @@ def get_want_state(names, remove=False): def get_installed_state(m, packages): "get installed state of packages" - cmd = get_cmd(m, 'search') - cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd = get_cmd(m, "search") + cmd.extend(["--match-exact", "--details", "--installed-only"]) cmd.extend([p.name for p in packages]) return parse_zypper_xml(m, cmd, fail_not_found=False)[0] @@ -352,13 +352,12 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): try: dom = parseXML(stdout) except xml.parsers.expat.ExpatError as exc: - m.fail_json(msg=f"Failed to parse zypper xml output: {exc}", - rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + m.fail_json(msg=f"Failed to parse zypper xml output: {exc}", rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) if rc == 104: # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) if fail_not_found: - errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + errmsg = dom.getElementsByTagName("message")[-1].childNodes[0].data m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) else: return {}, rc, stdout, stderr @@ -374,40 +373,40 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): packages = {} else: firstrun = False - solvable_list = dom.getElementsByTagName('solvable') + solvable_list = dom.getElementsByTagName("solvable") for solvable in solvable_list: - name = solvable.getAttribute('name') + name = solvable.getAttribute("name") packages[name] = {} - packages[name]['version'] = solvable.getAttribute('edition') - packages[name]['oldversion'] = solvable.getAttribute('edition-old') - status = solvable.getAttribute('status') - packages[name]['installed'] = status == "installed" - packages[name]['group'] = solvable.parentNode.nodeName + packages[name]["version"] = solvable.getAttribute("edition") + packages[name]["oldversion"] = solvable.getAttribute("edition-old") + status = solvable.getAttribute("status") + packages[name]["installed"] = status == "installed" + packages[name]["group"] = solvable.parentNode.nodeName if rc == 103 and firstrun: # if this was the first run and it failed with 103 # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) - if rc == 107 and m.params['skip_post_errors'] and firstrun: + if rc == 107 and m.params["skip_post_errors"] and firstrun: # if this was the first run and it failed with 107 with skip_post_errors flag # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) # apply simple_errors logic to rc 0,102,103,106,107 - if m.params['simple_errors']: + if m.params["simple_errors"]: stdout = get_simple_errors(dom) or stdout return packages, rc, stdout, stderr # apply simple_errors logic to rc other than 0,102,103,106,107 - if m.params['simple_errors']: + if m.params["simple_errors"]: stdout = get_simple_errors(dom) or stdout - m.fail_json(msg=f'Zypper run command failed with return code {rc}.', rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + m.fail_json(msg=f"Zypper run command failed with return code {rc}.", rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) def get_simple_errors(dom): simple_errors = [] - message_xml_tags = dom.getElementsByTagName('message') + message_xml_tags = dom.getElementsByTagName("message") if message_xml_tags is None: return None @@ -419,49 +418,55 @@ def get_simple_errors(dom): def get_cmd(m, subcommand): "puts together the basic zypper command arguments with those passed to the module" - is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] - is_refresh = subcommand == 'refresh' - cmd = [m.get_bin_path('zypper', required=True), '--non-interactive', '--xmlout'] - if m.params['quiet']: - cmd.append('--quiet') + is_install = subcommand in ["install", "update", "patch", "dist-upgrade"] + is_refresh = subcommand == "refresh" + cmd = [m.get_bin_path("zypper", required=True), "--non-interactive", "--xmlout"] + if m.params["quiet"]: + cmd.append("--quiet") if transactional_updates(): - cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd - if m.params['extra_args_precommand']: - args_list = m.params['extra_args_precommand'].split() + cmd = [ + m.get_bin_path("transactional-update", required=True), + "--continue", + "--drop-if-no-change", + "--quiet", + "run", + ] + cmd + if m.params["extra_args_precommand"]: + args_list = m.params["extra_args_precommand"].split() cmd.extend(args_list) # add global options before zypper command - if (is_install or is_refresh) and m.params['disable_gpg_check']: - cmd.append('--no-gpg-checks') - if is_install and m.params['auto_import_keys']: - cmd.append('--gpg-auto-import-keys') + if (is_install or is_refresh) and m.params["disable_gpg_check"]: + cmd.append("--no-gpg-checks") + if is_install and m.params["auto_import_keys"]: + cmd.append("--gpg-auto-import-keys") - if subcommand == 'search': - cmd.append('--disable-repositories') + if subcommand == "search": + cmd.append("--disable-repositories") cmd.append(subcommand) - if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: - cmd.extend(['--type', m.params['type']]) - if m.check_mode and subcommand != 'search': - cmd.append('--dry-run') + if subcommand not in ["patch", "dist-upgrade"] and not is_refresh: + cmd.extend(["--type", m.params["type"]]) + if m.check_mode and subcommand != "search": + cmd.append("--dry-run") if is_install: - cmd.append('--auto-agree-with-licenses') - if m.params['disable_recommends']: - cmd.append('--no-recommends') - if m.params['force']: - cmd.append('--force') - if m.params['force_resolution']: - cmd.append('--force-resolution') - if m.params['oldpackage']: - cmd.append('--oldpackage') - if m.params['replacefiles']: - cmd.append('--replacefiles') - if subcommand == 'remove': - if m.params['clean_deps']: - cmd.append('--clean-deps') - if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']: - cmd.append('--allow-vendor-change') - if m.params['extra_args']: - args_list = m.params['extra_args'].split(' ') + cmd.append("--auto-agree-with-licenses") + if m.params["disable_recommends"]: + cmd.append("--no-recommends") + if m.params["force"]: + cmd.append("--force") + if m.params["force_resolution"]: + cmd.append("--force-resolution") + if m.params["oldpackage"]: + cmd.append("--oldpackage") + if m.params["replacefiles"]: + cmd.append("--replacefiles") + if subcommand == "remove": + if m.params["clean_deps"]: + cmd.append("--clean-deps") + if subcommand == "dist-upgrade" and m.params["allow_vendor_change"]: + cmd.append("--allow-vendor-change") + if m.params["extra_args"]: + args_list = m.params["extra_args"].split(" ") cmd.extend(args_list) return cmd @@ -469,38 +474,38 @@ def get_cmd(m, subcommand): def set_diff(m, retvals, result): # TODO: if there is only one package, set before/after to version numbers - packages = {'installed': [], 'removed': [], 'upgraded': []} + packages = {"installed": [], "removed": [], "upgraded": []} if result: for p in result: - group = result[p]['group'] - if group == 'to-upgrade': + group = result[p]["group"] + if group == "to-upgrade": versions = f" ({result[p]['oldversion']} => {result[p]['version']})" - packages['upgraded'].append(p + versions) - elif group == 'to-install': - packages['installed'].append(p) - elif group == 'to-remove': - packages['removed'].append(p) + packages["upgraded"].append(p + versions) + elif group == "to-install": + packages["installed"].append(p) + elif group == "to-remove": + packages["removed"].append(p) - output = '' + output = "" for state in packages: if packages[state]: output += f"{state}: {', '.join(packages[state])}\n" - if 'diff' not in retvals: - retvals['diff'] = {} - if 'prepared' not in retvals['diff']: - retvals['diff']['prepared'] = output + if "diff" not in retvals: + retvals["diff"] = {} + if "prepared" not in retvals["diff"]: + retvals["diff"]["prepared"] = output else: - retvals['diff']['prepared'] += f"\n{output}" + retvals["diff"]["prepared"] += f"\n{output}" def package_present(m, name, want_latest): "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + retvals = {"rc": 0, "stdout": "", "stderr": ""} packages, urls = get_want_state(name) # add oldpackage flag when a version is given to allow downgrades if any(p.version for p in packages): - m.params['oldpackage'] = True + m.params["oldpackage"] = True if not want_latest: # for state=present: filter out already installed packages @@ -518,8 +523,8 @@ def package_present(m, name, want_latest): return None, retvals # zypper install also updates packages - cmd = get_cmd(m, 'install') - cmd.append('--') + cmd = get_cmd(m, "install") + cmd.append("--") cmd.extend(urls) # pass packages to zypper # allow for + or - prefixes in install/remove lists @@ -528,8 +533,8 @@ def package_present(m, name, want_latest): # for example "-exim postfix" runs without removing packages depending on mailserver cmd.extend([str(p) for p in packages]) - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + retvals["cmd"] = cmd + result, retvals["rc"], retvals["stdout"], retvals["stderr"] = parse_zypper_xml(m, cmd) return result, retvals @@ -537,30 +542,30 @@ def package_present(m, name, want_latest): def package_update_all(m): "run update or patch on all available packages" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - if m.params['type'] == 'patch': - cmdname = 'patch' - elif m.params['state'] == 'dist-upgrade': - cmdname = 'dist-upgrade' + retvals = {"rc": 0, "stdout": "", "stderr": ""} + if m.params["type"] == "patch": + cmdname = "patch" + elif m.params["state"] == "dist-upgrade": + cmdname = "dist-upgrade" else: - cmdname = 'update' + cmdname = "update" cmd = get_cmd(m, cmdname) - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + retvals["cmd"] = cmd + result, retvals["rc"], retvals["stdout"], retvals["stderr"] = parse_zypper_xml(m, cmd) return result, retvals def package_absent(m, name): "remove the packages in name" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + retvals = {"rc": 0, "stdout": "", "stderr": ""} # Get package state packages, urls = get_want_state(name, remove=True) - if any(p.prefix == '+' for p in packages): + if any(p.prefix == "+" for p in packages): m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") if urls: m.fail_json(msg="Can not remove via URL.") - if m.params['type'] == 'patch': + if m.params["type"] == "patch": m.fail_json(msg="Can not remove patches.") prerun_state = get_installed_state(m, packages) packages = [p for p in packages if p.name in prerun_state] @@ -568,40 +573,40 @@ def package_absent(m, name): if not packages: return None, retvals - cmd = get_cmd(m, 'remove') + cmd = get_cmd(m, "remove") cmd.extend([p.name + p.version for p in packages]) - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + retvals["cmd"] = cmd + result, retvals["rc"], retvals["stdout"], retvals["stderr"] = parse_zypper_xml(m, cmd) return result, retvals def repo_refresh(m): "update the repositories" - retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + retvals = {"rc": 0, "stdout": "", "stderr": ""} - cmd = get_cmd(m, 'refresh') + cmd = get_cmd(m, "refresh") - retvals['cmd'] = cmd - result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + retvals["cmd"] = cmd + result, retvals["rc"], retvals["stdout"], retvals["stderr"] = parse_zypper_xml(m, cmd) return retvals def get_fs_type_and_readonly_state(mount_point): - with open('/proc/mounts', 'r') as file: + with open("/proc/mounts", "r") as file: for line in file.readlines(): fields = line.split() path = fields[1] if path == mount_point: fs = fields[2] opts = fields[3] - return fs, 'ro' in opts.split(',') + return fs, "ro" in opts.split(",") return None def transactional_updates(): - return os.path.exists('/usr/sbin/transactional-update') and get_fs_type_and_readonly_state('/') == ('btrfs', True) + return os.path.exists("/usr/sbin/transactional-update") and get_fs_type_and_readonly_state("/") == ("btrfs", True) # =========================================== @@ -611,33 +616,37 @@ def transactional_updates(): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['pkg'], type='list', elements='str'), - state=dict(default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), - type=dict(default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + name=dict(required=True, aliases=["pkg"], type="list", elements="str"), + state=dict( + default="present", choices=["absent", "installed", "latest", "present", "removed", "dist-upgrade"] + ), + type=dict( + default="package", choices=["package", "patch", "pattern", "product", "srcpackage", "application"] + ), extra_args_precommand=dict(), - disable_gpg_check=dict(default=False, type='bool'), - auto_import_keys=dict(default=False, type='bool'), - disable_recommends=dict(default=True, type='bool'), - force=dict(default=False, type='bool'), - force_resolution=dict(default=False, type='bool'), - update_cache=dict(aliases=['refresh'], default=False, type='bool'), - oldpackage=dict(default=False, type='bool'), + disable_gpg_check=dict(default=False, type="bool"), + auto_import_keys=dict(default=False, type="bool"), + disable_recommends=dict(default=True, type="bool"), + force=dict(default=False, type="bool"), + force_resolution=dict(default=False, type="bool"), + update_cache=dict(aliases=["refresh"], default=False, type="bool"), + oldpackage=dict(default=False, type="bool"), extra_args=dict(), - allow_vendor_change=dict(default=False, type='bool'), - replacefiles=dict(default=False, type='bool'), - clean_deps=dict(default=False, type='bool'), - simple_errors=dict(default=False, type='bool'), - quiet=dict(default=True, type='bool'), - skip_post_errors=dict(default=False, type='bool'), + allow_vendor_change=dict(default=False, type="bool"), + replacefiles=dict(default=False, type="bool"), + clean_deps=dict(default=False, type="bool"), + simple_errors=dict(default=False, type="bool"), + quiet=dict(default=True, type="bool"), + skip_post_errors=dict(default=False, type="bool"), ), - supports_check_mode=True + supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") - name = module.params['name'] - state = module.params['state'] - update_cache = module.params['update_cache'] + name = module.params["name"] + state = module.params["state"] + update_cache = module.params["update_cache"] # remove empty strings from package list name = [_f for _f in name if _f] @@ -646,31 +655,31 @@ def main(): if update_cache and not module.check_mode: retvals = repo_refresh(module) - if retvals['rc'] != 0: + if retvals["rc"] != 0: module.fail_json(msg="Zypper refresh run failed.", **retvals) # Perform requested action - if name == ['*'] and state in ['latest', 'dist-upgrade']: + if name == ["*"] and state in ["latest", "dist-upgrade"]: packages_changed, retvals = package_update_all(module) - elif name != ['*'] and state == 'dist-upgrade': + elif name != ["*"] and state == "dist-upgrade": module.fail_json(msg="Can not dist-upgrade specific packages.") else: - if state in ['absent', 'removed']: + if state in ["absent", "removed"]: packages_changed, retvals = package_absent(module, name) - elif state in ['installed', 'present', 'latest']: - packages_changed, retvals = package_present(module, name, state == 'latest') + elif state in ["installed", "present", "latest"]: + packages_changed, retvals = package_present(module, name, state == "latest") - retvals['changed'] = retvals['rc'] in [0, 102] and bool(packages_changed) + retvals["changed"] = retvals["rc"] in [0, 102] and bool(packages_changed) if module._diff: set_diff(module, retvals, packages_changed) - if retvals['rc'] not in [0, 102]: + if retvals["rc"] not in [0, 102]: module.fail_json(msg="Zypper run failed.", **retvals) - if not retvals['changed']: - del retvals['stdout'] - del retvals['stderr'] + if not retvals["changed"]: + del retvals["stdout"] + del retvals["stderr"] module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) diff --git a/plugins/modules/zypper_repository.py b/plugins/modules/zypper_repository.py index 98630edc000..9968617ae71 100644 --- a/plugins/modules/zypper_repository.py +++ b/plugins/modules/zypper_repository.py @@ -132,6 +132,7 @@ XML_IMP_ERR = None try: from xml.dom.minidom import parseString as parseXML + HAS_XML = True except ImportError: XML_IMP_ERR = traceback.format_exc() @@ -145,12 +146,12 @@ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] +REPO_OPTS = ["alias", "name", "priority", "enabled", "autorefresh", "gpgcheck"] def _get_cmd(module, *args): """Combines the non-interactive zypper command with arguments/subcommands""" - cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] + cmd = [module.get_bin_path("zypper", required=True), "--quiet", "--non-interactive"] cmd.extend(args) return cmd @@ -158,7 +159,7 @@ def _get_cmd(module, *args): def _parse_repos(module): """parses the output of zypper --xmlout repos and return a parse repo dictionary""" - cmd = _get_cmd(module, '--xmlout', 'repos') + cmd = _get_cmd(module, "--xmlout", "repos") if not HAS_XML: module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR) @@ -166,15 +167,15 @@ def _parse_repos(module): if rc == 0: repos = [] dom = parseXML(stdout) - repo_list = dom.getElementsByTagName('repo') + repo_list = dom.getElementsByTagName("repo") for repo in repo_list: opts = {} for o in REPO_OPTS: opts[o] = repo.getAttribute(o) try: - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + opts["url"] = repo.getElementsByTagName("url")[0].firstChild.data except IndexError: - opts['url'] = repo.getAttribute('metalink') + opts["url"] = repo.getAttribute("metalink") # A repo can be uniquely identified by an alias + url repos.append(opts) return repos @@ -196,16 +197,16 @@ def _repo_changes(module, realrepo, repocmp): valold = str(repocmp[k] or "") valnew = v or "" if k == "url": - if '$releasever' in valold or '$releasever' in valnew: - cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release'] + if "$releasever" in valold or "$releasever" in valnew: + cmd = ["rpm", "-q", "--qf", "%{version}", "-f", "/etc/os-release"] rc, stdout, stderr = module.run_command(cmd, check_rc=True) - valnew = valnew.replace('$releasever', stdout) - valold = valold.replace('$releasever', stdout) - if '$basearch' in valold or '$basearch' in valnew: - cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release'] + valnew = valnew.replace("$releasever", stdout) + valold = valold.replace("$releasever", stdout) + if "$basearch" in valold or "$basearch" in valnew: + cmd = ["rpm", "-q", "--qf", "%{arch}", "-f", "/etc/os-release"] rc, stdout, stderr = module.run_command(cmd, check_rc=True) - valnew = valnew.replace('$basearch', stdout) - valold = valold.replace('$basearch', stdout) + valnew = valnew.replace("$basearch", stdout) + valold = valold.replace("$basearch", stdout) valold, valnew = valold.rstrip("/"), valnew.rstrip("/") if valold != valnew: return True @@ -215,16 +216,16 @@ def _repo_changes(module, realrepo, repocmp): def repo_exists(module, repodata, overwrite_multiple): """Check whether the repository already exists. - returns (exists, mod, old_repos) - exists: whether a matching (name, URL) repo exists - mod: whether there are changes compared to the existing repo - old_repos: list of matching repos + returns (exists, mod, old_repos) + exists: whether a matching (name, URL) repo exists + mod: whether there are changes compared to the existing repo + old_repos: list of matching repos """ existing_repos = _parse_repos(module) # look for repos that have matching alias or url to the one searched repos = [] - for kw in ['alias', 'url']: + for kw in ["alias", "url"]: name = repodata[kw] for oldr in existing_repos: if repodata[kw] == oldr[kw] and oldr not in repos: @@ -243,50 +244,50 @@ def repo_exists(module, repodata, overwrite_multiple): return (True, True, repos) else: errmsg = f'More than one repo matched "{name}": "{repos}".' - errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten' + errmsg += " Use overwrite_multiple to allow more than one repo to be overwritten" module.fail_json(msg=errmsg) def addmodify_repo(module, repodata, old_repos, zypper_version): "Adds the repo, removes old repos before, that would conflict." - repo = repodata['url'] - cmd = _get_cmd(module, 'addrepo', '--check') - if repodata['name']: - cmd.extend(['--name', repodata['name']]) + repo = repodata["url"] + cmd = _get_cmd(module, "addrepo", "--check") + if repodata["name"]: + cmd.extend(["--name", repodata["name"]]) # priority on addrepo available since 1.12.25 # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336 - if repodata['priority']: - if zypper_version >= LooseVersion('1.12.25'): - cmd.extend(['--priority', str(repodata['priority'])]) + if repodata["priority"]: + if zypper_version >= LooseVersion("1.12.25"): + cmd.extend(["--priority", str(repodata["priority"])]) else: module.warn("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") - if repodata['enabled'] == '0': - cmd.append('--disable') + if repodata["enabled"] == "0": + cmd.append("--disable") # gpgcheck available since 1.6.2 # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449 # the default changed in the past, so don't assume a default here and show warning for old zypper versions - if zypper_version >= LooseVersion('1.6.2'): - if repodata['gpgcheck'] == '1': - cmd.append('--gpgcheck') + if zypper_version >= LooseVersion("1.6.2"): + if repodata["gpgcheck"] == "1": + cmd.append("--gpgcheck") else: - cmd.append('--no-gpgcheck') + cmd.append("--no-gpgcheck") else: module.warn("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") - if repodata['autorefresh'] == '1': - cmd.append('--refresh') + if repodata["autorefresh"] == "1": + cmd.append("--refresh") cmd.append(repo) - if not repo.endswith('.repo'): - cmd.append(repodata['alias']) + if not repo.endswith(".repo"): + cmd.append(repodata["alias"]) if old_repos is not None: for oldrepo in old_repos: - remove_repo(module, oldrepo['url']) + remove_repo(module, oldrepo["url"]) rc, stdout, stderr = module.run_command(cmd, check_rc=False) return rc, stdout, stderr @@ -294,27 +295,27 @@ def addmodify_repo(module, repodata, old_repos, zypper_version): def remove_repo(module, repo): "Removes the repo." - cmd = _get_cmd(module, 'removerepo', repo) + cmd = _get_cmd(module, "removerepo", repo) rc, stdout, stderr = module.run_command(cmd, check_rc=True) return rc, stdout, stderr def get_zypper_version(module): - rc, stdout, stderr = module.run_command([module.get_bin_path('zypper', required=True), '--version']) - if rc != 0 or not stdout.startswith('zypper '): - return LooseVersion('1.0') + rc, stdout, stderr = module.run_command([module.get_bin_path("zypper", required=True), "--version"]) + if rc != 0 or not stdout.startswith("zypper "): + return LooseVersion("1.0") return LooseVersion(stdout.split()[1]) def runrefreshrepo(module, auto_import_keys=False, shortname=None): "Forces zypper to refresh repo metadata." if auto_import_keys: - cmd = _get_cmd(module, '--gpg-auto-import-keys', 'refresh', '--force') + cmd = _get_cmd(module, "--gpg-auto-import-keys", "refresh", "--force") else: - cmd = _get_cmd(module, 'refresh', '--force') + cmd = _get_cmd(module, "refresh", "--force") if shortname is not None: - cmd.extend(['-r', shortname]) + cmd.extend(["-r", shortname]) rc, stdout, stderr = module.run_command(cmd, check_rc=True) return rc, stdout, stderr @@ -325,91 +326,91 @@ def main(): argument_spec=dict( name=dict(), repo=dict(), - state=dict(choices=['present', 'absent'], default='present'), - runrefresh=dict(default=False, type='bool'), + state=dict(choices=["present", "absent"], default="present"), + runrefresh=dict(default=False, type="bool"), description=dict(), - disable_gpg_check=dict(default=False, type='bool'), - autorefresh=dict(default=True, type='bool', aliases=['refresh']), - priority=dict(type='int'), - enabled=dict(default=True, type='bool'), - overwrite_multiple=dict(default=False, type='bool'), - auto_import_keys=dict(default=False, type='bool'), + disable_gpg_check=dict(default=False, type="bool"), + autorefresh=dict(default=True, type="bool", aliases=["refresh"]), + priority=dict(type="int"), + enabled=dict(default=True, type="bool"), + overwrite_multiple=dict(default=False, type="bool"), + auto_import_keys=dict(default=False, type="bool"), ), supports_check_mode=False, - required_one_of=[['state', 'runrefresh']], + required_one_of=[["state", "runrefresh"]], ) - repo = module.params['repo'] - alias = module.params['name'] - state = module.params['state'] - overwrite_multiple = module.params['overwrite_multiple'] - auto_import_keys = module.params['auto_import_keys'] - runrefresh = module.params['runrefresh'] + repo = module.params["repo"] + alias = module.params["name"] + state = module.params["state"] + overwrite_multiple = module.params["overwrite_multiple"] + auto_import_keys = module.params["auto_import_keys"] + runrefresh = module.params["runrefresh"] zypper_version = get_zypper_version(module) repodata = { - 'url': repo, - 'alias': alias, - 'name': module.params['description'], - 'priority': module.params['priority'], + "url": repo, + "alias": alias, + "name": module.params["description"], + "priority": module.params["priority"], } # rewrite bools in the language that zypper lr -x provides for easier comparison - if module.params['enabled']: - repodata['enabled'] = '1' + if module.params["enabled"]: + repodata["enabled"] = "1" else: - repodata['enabled'] = '0' - if module.params['disable_gpg_check']: - repodata['gpgcheck'] = '0' + repodata["enabled"] = "0" + if module.params["disable_gpg_check"]: + repodata["gpgcheck"] = "0" else: - repodata['gpgcheck'] = '1' - if module.params['autorefresh']: - repodata['autorefresh'] = '1' + repodata["gpgcheck"] = "1" + if module.params["autorefresh"]: + repodata["autorefresh"] = "1" else: - repodata['autorefresh'] = '0' + repodata["autorefresh"] = "0" def exit_unchanged(): module.exit_json(changed=False, repodata=repodata, state=state) # Check run-time module parameters - if repo == '*' or alias == '*': + if repo == "*" or alias == "*": if runrefresh: runrefreshrepo(module, auto_import_keys) module.exit_json(changed=False, runrefresh=True) else: - module.fail_json(msg='repo=* can only be used with the runrefresh option.') + module.fail_json(msg="repo=* can only be used with the runrefresh option.") - if state == 'present' and not repo: - module.fail_json(msg='Module option state=present requires repo') - if state == 'absent' and not repo and not alias: - module.fail_json(msg='Alias or repo parameter required when state=absent') + if state == "present" and not repo: + module.fail_json(msg="Module option state=present requires repo") + if state == "absent" and not repo and not alias: + module.fail_json(msg="Alias or repo parameter required when state=absent") - if repo and repo.endswith('.repo'): + if repo and repo.endswith(".repo"): if alias: - module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files') + module.fail_json(msg="Incompatible option: 'name'. Do not use name when adding .repo files") else: if not alias and state == "present": - module.fail_json(msg='Name required when adding non-repo files.') + module.fail_json(msg="Name required when adding non-repo files.") # Download / Open and parse .repo file to ensure idempotency - if repo and repo.endswith('.repo'): - if repo.startswith(('http://', 'https://')): + if repo and repo.endswith(".repo"): + if repo.startswith(("http://", "https://")): response, info = fetch_url(module=module, url=repo, force=True) - if not response or info['status'] != 200: - module.fail_json(msg='Error downloading .repo file from provided URL') - repofile_text = to_text(response.read(), errors='surrogate_or_strict') + if not response or info["status"] != 200: + module.fail_json(msg="Error downloading .repo file from provided URL") + repofile_text = to_text(response.read(), errors="surrogate_or_strict") else: try: - with open(repo, encoding='utf-8') as file: + with open(repo, encoding="utf-8") as file: repofile_text = file.read() except IOError: - module.fail_json(msg='Error opening .repo file from provided path') + module.fail_json(msg="Error opening .repo file from provided path") repofile = configparser.ConfigParser() try: repofile.read_file(StringIO(repofile_text)) except configparser.Error: - module.fail_json(msg='Invalid format, .repo file could not be parsed') + module.fail_json(msg="Invalid format, .repo file could not be parsed") # No support for .repo file with zero or more than one repository if len(repofile.sections()) != 1: @@ -419,27 +420,27 @@ def exit_unchanged(): section = repofile.sections()[0] repofile_items = dict(repofile.items(section)) # Only proceed if at least baseurl is available - if 'baseurl' not in repofile_items: - module.fail_json(msg='No baseurl found in .repo file') + if "baseurl" not in repofile_items: + module.fail_json(msg="No baseurl found in .repo file") # Set alias (name) and url based on values from .repo file alias = section - repodata['alias'] = section - repodata['url'] = repofile_items['baseurl'] + repodata["alias"] = section + repodata["url"] = repofile_items["baseurl"] # If gpgkey is part of the .repo file, auto import key - if 'gpgkey' in repofile_items: + if "gpgkey" in repofile_items: auto_import_keys = True # Map additional values, if available - if 'name' in repofile_items: - repodata['name'] = repofile_items['name'] - if 'enabled' in repofile_items: - repodata['enabled'] = repofile_items['enabled'] - if 'autorefresh' in repofile_items: - repodata['autorefresh'] = repofile_items['autorefresh'] - if 'gpgcheck' in repofile_items: - repodata['gpgcheck'] = repofile_items['gpgcheck'] + if "name" in repofile_items: + repodata["name"] = repofile_items["name"] + if "enabled" in repofile_items: + repodata["enabled"] = repofile_items["enabled"] + if "autorefresh" in repofile_items: + repodata["autorefresh"] = repofile_items["autorefresh"] + if "gpgcheck" in repofile_items: + repodata["gpgcheck"] = repofile_items["gpgcheck"] exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple) @@ -448,7 +449,7 @@ def exit_unchanged(): else: shortname = repo - if state == 'present': + if state == "present": if exists and not mod: if runrefresh: runrefreshrepo(module, auto_import_keys, shortname) @@ -456,7 +457,7 @@ def exit_unchanged(): rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version) if rc == 0 and (runrefresh or auto_import_keys): runrefreshrepo(module, auto_import_keys, shortname) - elif state == 'absent': + elif state == "absent": if not exists: exit_unchanged() rc, stdout, stderr = remove_repo(module, shortname) @@ -464,8 +465,10 @@ def exit_unchanged(): if rc == 0: module.exit_json(changed=True, repodata=repodata, state=state) else: - module.fail_json(msg=f"Zypper failed with rc {rc}", rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state) + module.fail_json( + msg=f"Zypper failed with rc {rc}", rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/zypper_repository_info.py b/plugins/modules/zypper_repository_info.py index a7911bbb215..49348f75718 100644 --- a/plugins/modules/zypper_repository_info.py +++ b/plugins/modules/zypper_repository_info.py @@ -78,12 +78,12 @@ from ansible.module_utils.basic import AnsibleModule -REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] +REPO_OPTS = ["alias", "name", "priority", "enabled", "autorefresh", "gpgcheck"] def _get_cmd(module, *args): """Combines the non-interactive zypper command with arguments/subcommands""" - cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] + cmd = [module.get_bin_path("zypper", required=True), "--quiet", "--non-interactive"] cmd.extend(args) return cmd @@ -91,18 +91,18 @@ def _get_cmd(module, *args): def _parse_repos(module): """parses the output of zypper --xmlout repos and return a parse repo dictionary""" - cmd = _get_cmd(module, '--xmlout', 'repos') + cmd = _get_cmd(module, "--xmlout", "repos") rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: repos = [] dom = parseXML(stdout) - repo_list = dom.getElementsByTagName('repo') + repo_list = dom.getElementsByTagName("repo") for repo in repo_list: opts = {} for o in REPO_OPTS: opts[o] = repo.getAttribute(o) - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + opts["url"] = repo.getElementsByTagName("url")[0].firstChild.data # A repo can be uniquely identified by an alias + url repos.append(opts) return repos @@ -114,11 +114,7 @@ def _parse_repos(module): def main(): - module = AnsibleModule( - argument_spec=dict( - ), - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=dict(), supports_check_mode=True) deps.validate(parseXML) @@ -126,5 +122,5 @@ def main(): module.exit_json(changed=False, repodatalist=repodatalist) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/plugin_utils/ansible_type.py b/plugins/plugin_utils/ansible_type.py index 9b413dbe165..d27adefe6cd 100644 --- a/plugins/plugin_utils/ansible_type.py +++ b/plugins/plugin_utils/ansible_type.py @@ -10,6 +10,7 @@ try: # Introduced with Data Tagging (https://github.com/ansible/ansible/pull/84621): from ansible.module_utils.datatag import native_type_name as _native_type_name + HAS_NATIVE_TYPE_NAME = True except ImportError: HAS_NATIVE_TYPE_NAME = False @@ -46,16 +47,16 @@ def _ansible_type(data, alias, *, use_native_type: bool = False): data_type = _atype(data, alias, use_native_type=use_native_type) - if data_type == 'list' and len(data) > 0: + if data_type == "list" and len(data) > 0: items = [_atype(i, alias, use_native_type=use_native_type) for i in data] - items_type = '|'.join(sorted(set(items))) + items_type = "|".join(sorted(set(items))) return f"{data_type}[{items_type}]" - if data_type == 'dict' and len(data) > 0: + if data_type == "dict" and len(data) > 0: keys = [_atype(i, alias, use_native_type=use_native_type) for i in data.keys()] vals = [_atype(i, alias, use_native_type=use_native_type) for i in data.values()] - keys_type = '|'.join(sorted(set(keys))) - vals_type = '|'.join(sorted(set(vals))) + keys_type = "|".join(sorted(set(keys))) + vals_type = "|".join(sorted(set(vals))) return f"{data_type}[{keys_type}, {vals_type}]" return data_type diff --git a/plugins/plugin_utils/keys_filter.py b/plugins/plugin_utils/keys_filter.py index 46e3d2cc633..2f12e9e04c0 100644 --- a/plugins/plugin_utils/keys_filter.py +++ b/plugins/plugin_utils/keys_filter.py @@ -13,12 +13,12 @@ def _keys_filter_params(data, matching_parameter): """test parameters: - * data must be a list of dictionaries. All keys must be strings. - * matching_parameter is member of a list. + * data must be a list of dictionaries. All keys must be strings. + * matching_parameter is member of a list. """ mp = matching_parameter - ml = ['equal', 'starts_with', 'ends_with', 'regex'] + ml = ["equal", "starts_with", "ends_with", "regex"] if not isinstance(data, Sequence): msg = "First argument must be a list. %s is %s" @@ -43,14 +43,14 @@ def _keys_filter_params(data, matching_parameter): def _keys_filter_target_str(target, matching_parameter): """ - Test: - * target is a non-empty string or list. - * If target is list all items are strings. - * target is a string or list with single string if matching_parameter=regex. - Convert target and return: - * tuple of unique target items, or - * tuple with single item, or - * compiled regex if matching_parameter=regex. + Test: + * target is a non-empty string or list. + * If target is list all items are strings. + * target is a string or list with single string if matching_parameter=regex. + Convert target and return: + * tuple of unique target items, or + * tuple with single item, or + * compiled regex if matching_parameter=regex. """ if not isinstance(target, Sequence): @@ -67,7 +67,7 @@ def _keys_filter_target_str(target, matching_parameter): msg = "The target items must be strings. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) - if matching_parameter == 'regex': + if matching_parameter == "regex": if isinstance(target, str): r = target else: @@ -82,7 +82,7 @@ def _keys_filter_target_str(target, matching_parameter): msg = "The target must be a valid regex if matching_parameter=regex. target is %s" raise AnsibleFilterError(msg % r) elif isinstance(target, str): - tt = (target, ) + tt = (target,) else: tt = tuple(set(target)) @@ -91,13 +91,13 @@ def _keys_filter_target_str(target, matching_parameter): def _keys_filter_target_dict(target, matching_parameter): """ - Test: - * target is a list of dictionaries with attributes 'after' and 'before'. - * Attributes 'before' must be valid regex if matching_parameter=regex. - * Otherwise, the attributes 'before' must be strings. - Convert target and return: - * iterator that aggregates attributes 'before' and 'after', or - * iterator that aggregates compiled regex of attributes 'before' and 'after' if matching_parameter=regex. + Test: + * target is a list of dictionaries with attributes 'after' and 'before'. + * Attributes 'before' must be valid regex if matching_parameter=regex. + * Otherwise, the attributes 'before' must be strings. + Convert target and return: + * iterator that aggregates attributes 'before' and 'after', or + * iterator that aggregates compiled regex of attributes 'before' and 'after' if matching_parameter=regex. """ if not isinstance(target, list): @@ -112,26 +112,28 @@ def _keys_filter_target_dict(target, matching_parameter): if not isinstance(elem, Mapping): msg = "The target items must be dictionaries. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) - if not all(k in elem for k in ('before', 'after')): + if not all(k in elem for k in ("before", "after")): msg = "All dictionaries in target must include attributes: after, before." raise AnsibleFilterError(msg) - if not isinstance(elem['before'], str): + if not isinstance(elem["before"], str): msg = "The attributes before must be strings. %s is %s" - raise AnsibleFilterError(msg % (elem['before'], type(elem['before']))) - if not isinstance(elem['after'], str): + raise AnsibleFilterError(msg % (elem["before"], type(elem["before"]))) + if not isinstance(elem["after"], str): msg = "The attributes after must be strings. %s is %s" - raise AnsibleFilterError(msg % (elem['after'], type(elem['after']))) + raise AnsibleFilterError(msg % (elem["after"], type(elem["after"]))) - before = [d['before'] for d in target] - after = [d['after'] for d in target] + before = [d["before"] for d in target] + after = [d["after"] for d in target] - if matching_parameter == 'regex': + if matching_parameter == "regex": try: tr = map(re.compile, before) tz = list(zip(tr, after)) except re.error: - msg = ("The attributes before must be valid regex if matching_parameter=regex." - " Not all items are valid regex in: %s") + msg = ( + "The attributes before must be valid regex if matching_parameter=regex." + " Not all items are valid regex in: %s" + ) raise AnsibleFilterError(msg % before) else: tz = list(zip(before, after)) diff --git a/plugins/plugin_utils/unsafe.py b/plugins/plugin_utils/unsafe.py index 7b2325dcbfe..a60e5d939ba 100644 --- a/plugins/plugin_utils/unsafe.py +++ b/plugins/plugin_utils/unsafe.py @@ -13,8 +13,8 @@ wrap_var as _make_unsafe, ) -_RE_TEMPLATE_CHARS = re.compile('[{}]') -_RE_TEMPLATE_CHARS_BYTES = re.compile(b'[{}]') +_RE_TEMPLATE_CHARS = re.compile("[{}]") +_RE_TEMPLATE_CHARS_BYTES = re.compile(b"[{}]") def make_unsafe(value): diff --git a/plugins/test/a_module.py b/plugins/test/a_module.py index e8b5f7b73b6..44285f0d15d 100644 --- a/plugins/test/a_module.py +++ b/plugins/test/a_module.py @@ -4,7 +4,7 @@ from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ name: a_module short_description: Test whether a given string refers to an existing module or action plugin version_added: 4.0.0 @@ -17,9 +17,9 @@ description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin. type: string required: true -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Make sure that community.aws.route53 is available ansible.builtin.assert: that: @@ -30,13 +30,13 @@ ansible.builtin.assert: that: - "'community.general.does_not_exist' is not community.general.a_module" -''' +""" -RETURN = ''' +RETURN = """ _value: description: Whether the module or action plugin denoted by the input exists. type: boolean -''' +""" from ansible.plugins.loader import action_loader, module_loader @@ -63,9 +63,9 @@ def a_module(term): class TestModule: - ''' Ansible jinja2 tests ''' + """Ansible jinja2 tests""" def tests(self): return { - 'a_module': a_module, + "a_module": a_module, } diff --git a/plugins/test/ansible_type.py b/plugins/test/ansible_type.py index 898252fbbb2..d298fd82453 100644 --- a/plugins/test/ansible_type.py +++ b/plugins/test/ansible_type.py @@ -4,7 +4,7 @@ from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ name: ansible_type short_description: Validate input type version_added: "9.2.0" @@ -23,9 +23,9 @@ description: Data type aliases. default: {} type: dictionary -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr # ---------------------------------------------------------------- @@ -214,13 +214,13 @@ data: 123.45 result: '{{ data is community.general.ansible_type(dtype, alias) }}' # result => true -''' +""" -RETURN = ''' +RETURN = """ _value: description: Whether the data type is valid. type: bool -''' +""" from collections.abc import Sequence @@ -245,8 +245,5 @@ def ansible_type(data, dtype, alias=None): class TestModule: - def tests(self): - return { - 'ansible_type': ansible_type - } + return {"ansible_type": ansible_type} diff --git a/plugins/test/fqdn_valid.py b/plugins/test/fqdn_valid.py index b962dfe1085..c46d55d36f3 100644 --- a/plugins/test/fqdn_valid.py +++ b/plugins/test/fqdn_valid.py @@ -5,7 +5,7 @@ from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ name: fqdn_valid short_description: Validates fully-qualified domain names against RFC 1123 version_added: 8.1.0 @@ -41,9 +41,9 @@ default: false type: bool required: false -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Make sure that hostname is valid ansible.builtin.assert: that: hostname is community.general.fqdn_valid @@ -55,13 +55,13 @@ - name: Make sure that hostname is at least 2 labels long (a.b). Allow '_' ansible.builtin.assert: that: hostname is community.general.fqdn_valid(min_labels=2, allow_underscores=True) -''' +""" -RETURN = ''' +RETURN = """ _value: description: Whether the name is valid. type: bool -''' +""" from ansible.errors import AnsibleError @@ -82,18 +82,18 @@ def fqdn_valid(name, min_labels=1, allow_underscores=False): """ if ANOTHER_LIBRARY_IMPORT_ERROR: - raise AnsibleError('Python package fqdn must be installed to use this test.') from ANOTHER_LIBRARY_IMPORT_ERROR + raise AnsibleError("Python package fqdn must be installed to use this test.") from ANOTHER_LIBRARY_IMPORT_ERROR fobj = FQDN(name, min_labels=min_labels, allow_underscores=allow_underscores) - return (fobj.is_valid) + return fobj.is_valid class TestModule: - ''' Ansible test hostname validity. - https://pypi.org/project/fqdn/ - ''' + """Ansible test hostname validity. + https://pypi.org/project/fqdn/ + """ def tests(self): return { - 'fqdn_valid': fqdn_valid, + "fqdn_valid": fqdn_valid, } diff --git a/ruff.toml b/ruff.toml index 2b9d85ba974..b4d3ac62689 100644 --- a/ruff.toml +++ b/ruff.toml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later # SPDX-FileCopyrightText: 2025 Felix Fontein -line-length = 160 +line-length = 120 [lint] # https://docs.astral.sh/ruff/rules/ @@ -80,3 +80,24 @@ unfixable = [] # Allow unused variables when underscore-prefixed or starting with dummy dummy-variable-rgx = "^(_|dummy).*$" + +[lint.pycodestyle] +max-line-length = 160 + +[format] +# https://docs.astral.sh/ruff/formatter/#configuration + +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +line-ending = "lf" + +# Disable auto-formatting of code examples in docstrings. Markdown, +# reStructuredText code/literal blocks and doctests are all supported. +docstring-code-format = false diff --git a/tests/unit/plugins/become/conftest.py b/tests/unit/plugins/become/conftest.py index f21e1fd0f65..2bc412fe943 100644 --- a/tests/unit/plugins/become/conftest.py +++ b/tests/unit/plugins/become/conftest.py @@ -15,7 +15,7 @@ @pytest.fixture def parser(): - parser = opt_help.create_base_parser('testparser') + parser = opt_help.create_base_parser("testparser") opt_help.add_runas_options(parser) opt_help.add_meta_options(parser) diff --git a/tests/unit/plugins/become/helper.py b/tests/unit/plugins/become/helper.py index e09d5a7707f..b1ad116c834 100644 --- a/tests/unit/plugins/become/helper.py +++ b/tests/unit/plugins/become/helper.py @@ -1,4 +1,3 @@ - # Copyright (c) 2012-2014, Michael DeHaan # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -11,7 +10,7 @@ def call_become_plugin(task, var_options, cmd, executable=None): """Helper function to call become plugin similarly on how Ansible itself handles this.""" - plugin = become_loader.get(task['become_method']) + plugin = become_loader.get(task["become_method"]) plugin.set_options(task_keys=task, var_options=var_options) shell = get_shell_plugin(executable=executable) return plugin.build_become_command(cmd, shell) diff --git a/tests/unit/plugins/become/test_doas.py b/tests/unit/plugins/become/test_doas.py index 92dcc04c159..5f3222d3546 100644 --- a/tests/unit/plugins/become/test_doas.py +++ b/tests/unit/plugins/become/test_doas.py @@ -20,18 +20,18 @@ def test_doas_basic(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - doas_exe = 'doas' - doas_flags = '-n' + doas_exe = "doas" + doas_flags = "-n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_method': 'community.general.doas', + "become_method": "community.general.doas", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{doas_exe} {doas_flags} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert re.match(f"""{doas_exe} {doas_flags} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None def test_doas(mocker, parser, reset_cli_args): @@ -40,20 +40,26 @@ def test_doas(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - doas_exe = 'doas' - doas_flags = '-n' + doas_exe = "doas" + doas_flags = "-n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.doas', - 'become_flags': doas_flags, + "become_user": "foo", + "become_method": "community.general.doas", + "become_flags": doas_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{doas_exe} {doas_flags} -u {task['become_user']} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{doas_exe} {doas_flags} -u {task["become_user"]} {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) def test_doas_varoptions(mocker, parser, reset_cli_args): @@ -62,20 +68,26 @@ def test_doas_varoptions(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - doas_exe = 'doas' - doas_flags = '-n' + doas_exe = "doas" + doas_flags = "-n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.doas', - 'become_flags': 'xxx', + "become_user": "foo", + "become_method": "community.general.doas", + "become_flags": "xxx", } var_options = { - 'ansible_become_user': 'bar', - 'ansible_become_flags': doas_flags, + "ansible_become_user": "bar", + "ansible_become_flags": doas_flags, } cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{doas_exe} {doas_flags} -u {var_options['ansible_become_user']} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{doas_exe} {doas_flags} -u {var_options["ansible_become_user"]} {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) diff --git a/tests/unit/plugins/become/test_dzdo.py b/tests/unit/plugins/become/test_dzdo.py index ff5f1f6b18d..614ba4b864a 100644 --- a/tests/unit/plugins/become/test_dzdo.py +++ b/tests/unit/plugins/become/test_dzdo.py @@ -20,13 +20,13 @@ def test_dzdo_basic(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - dzdo_exe = 'dzdo' - dzdo_flags = '-H -S -n' + dzdo_exe = "dzdo" + dzdo_flags = "-H -S -n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_method': 'community.general.dzdo', + "become_method": "community.general.dzdo", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) @@ -40,25 +40,45 @@ def test_dzdo(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - dzdo_exe = 'dzdo' - dzdo_flags = '' + dzdo_exe = "dzdo" + dzdo_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.dzdo', - 'become_flags': dzdo_flags, + "become_user": "foo", + "become_method": "community.general.dzdo", + "become_flags": dzdo_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match(f"""{dzdo_exe} {dzdo_flags} -u {task['become_user']} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None - task['become_pass'] = 'testpass' + assert ( + re.match( + f"""{dzdo_exe} {dzdo_flags} -u {task["become_user"]} {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) + task["become_pass"] = "testpass" cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"', - task['become_user'], default_exe, success, default_cmd), cmd) is not None + assert ( + re.match( + """%s %s -p %s -u %s %s -c 'echo %s; %s'""" + % ( + dzdo_exe, + dzdo_flags, + r"\"\[dzdo via ansible, key=.+?\] password:\"", + task["become_user"], + default_exe, + success, + default_cmd, + ), + cmd, + ) + is not None + ) def test_dzdo_varoptions(mocker, parser, reset_cli_args): @@ -67,25 +87,45 @@ def test_dzdo_varoptions(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - dzdo_exe = 'dzdo' - dzdo_flags = '' + dzdo_exe = "dzdo" + dzdo_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.dzdo', - 'become_flags': 'xxx', + "become_user": "foo", + "become_method": "community.general.dzdo", + "become_flags": "xxx", } var_options = { - 'ansible_become_user': 'bar', - 'ansible_become_flags': dzdo_flags, + "ansible_become_user": "bar", + "ansible_become_flags": dzdo_flags, } cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match(f"""{dzdo_exe} {dzdo_flags} -u {var_options['ansible_become_user']} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None - var_options['ansible_become_pass'] = 'testpass' + assert ( + re.match( + f"""{dzdo_exe} {dzdo_flags} -u {var_options["ansible_become_user"]} {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) + var_options["ansible_become_pass"] = "testpass" cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"', - var_options['ansible_become_user'], default_exe, success, default_cmd), cmd) is not None + assert ( + re.match( + """%s %s -p %s -u %s %s -c 'echo %s; %s'""" + % ( + dzdo_exe, + dzdo_flags, + r"\"\[dzdo via ansible, key=.+?\] password:\"", + var_options["ansible_become_user"], + default_exe, + success, + default_cmd, + ), + cmd, + ) + is not None + ) diff --git a/tests/unit/plugins/become/test_ksu.py b/tests/unit/plugins/become/test_ksu.py index 45234443fb4..a0aa1b76a1a 100644 --- a/tests/unit/plugins/become/test_ksu.py +++ b/tests/unit/plugins/become/test_ksu.py @@ -20,19 +20,24 @@ def test_ksu_basic(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - ksu_exe = 'ksu' - ksu_flags = '' + ksu_exe = "ksu" + ksu_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.ksu', + "become_user": "foo", + "become_method": "community.general.ksu", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{ksu_exe} {task['become_user']} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{ksu_exe} {task["become_user"]} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", cmd + ) + is not None + ) def test_ksu(mocker, parser, reset_cli_args): @@ -41,20 +46,25 @@ def test_ksu(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - ksu_exe = 'ksu' - ksu_flags = '' + ksu_exe = "ksu" + ksu_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.ksu', - 'become_flags': ksu_flags, + "become_user": "foo", + "become_method": "community.general.ksu", + "become_flags": ksu_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{ksu_exe} {task['become_user']} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{ksu_exe} {task["become_user"]} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", cmd + ) + is not None + ) def test_ksu_varoptions(mocker, parser, reset_cli_args): @@ -63,20 +73,26 @@ def test_ksu_varoptions(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - ksu_exe = 'ksu' - ksu_flags = '' + ksu_exe = "ksu" + ksu_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.ksu', - 'become_flags': 'xxx', + "become_user": "foo", + "become_method": "community.general.ksu", + "become_flags": "xxx", } var_options = { - 'ansible_become_user': 'bar', - 'ansible_become_flags': ksu_flags, + "ansible_become_user": "bar", + "ansible_become_flags": ksu_flags, } cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{ksu_exe} {var_options['ansible_become_user']} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{ksu_exe} {var_options["ansible_become_user"]} {ksu_flags} -e {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) diff --git a/tests/unit/plugins/become/test_pbrun.py b/tests/unit/plugins/become/test_pbrun.py index 5975c8df31e..2d587d28843 100644 --- a/tests/unit/plugins/become/test_pbrun.py +++ b/tests/unit/plugins/become/test_pbrun.py @@ -20,13 +20,13 @@ def test_pbrun_basic(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pbrun_exe = 'pbrun' - pbrun_flags = '' + pbrun_exe = "pbrun" + pbrun_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_method': 'community.general.pbrun', + "become_method": "community.general.pbrun", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) @@ -40,20 +40,23 @@ def test_pbrun(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pbrun_exe = 'pbrun' - pbrun_flags = '' + pbrun_exe = "pbrun" + pbrun_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.pbrun', - 'become_flags': pbrun_flags, + "become_user": "foo", + "become_method": "community.general.pbrun", + "become_flags": pbrun_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match(f"""{pbrun_exe} {pbrun_flags} -u {task['become_user']} 'echo {success}; {default_cmd}'""", cmd) is not None + assert ( + re.match(f"""{pbrun_exe} {pbrun_flags} -u {task["become_user"]} 'echo {success}; {default_cmd}'""", cmd) + is not None + ) def test_pbrun_var_varoptions(mocker, parser, reset_cli_args): @@ -62,20 +65,26 @@ def test_pbrun_var_varoptions(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pbrun_exe = 'pbrun' - pbrun_flags = '' + pbrun_exe = "pbrun" + pbrun_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.pbrun', - 'become_flags': 'xxx', + "become_user": "foo", + "become_method": "community.general.pbrun", + "become_flags": "xxx", } var_options = { - 'ansible_become_user': 'bar', - 'ansible_become_flags': pbrun_flags, + "ansible_become_user": "bar", + "ansible_become_flags": pbrun_flags, } cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert re.match(f"""{pbrun_exe} {pbrun_flags} -u {var_options['ansible_become_user']} 'echo {success}; {default_cmd}'""", cmd) is not None + assert ( + re.match( + f"""{pbrun_exe} {pbrun_flags} -u {var_options["ansible_become_user"]} 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) diff --git a/tests/unit/plugins/become/test_pfexec.py b/tests/unit/plugins/become/test_pfexec.py index cb4e8e14603..9ccddabd613 100644 --- a/tests/unit/plugins/become/test_pfexec.py +++ b/tests/unit/plugins/become/test_pfexec.py @@ -20,13 +20,13 @@ def test_pfexec_basic(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pfexec_exe = 'pfexec' - pfexec_flags = '-H -S -n' + pfexec_exe = "pfexec" + pfexec_flags = "-H -S -n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_method': 'community.general.pfexec', + "become_method": "community.general.pfexec", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) @@ -40,15 +40,15 @@ def test_pfexec(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pfexec_exe = 'pfexec' - pfexec_flags = '' + pfexec_exe = "pfexec" + pfexec_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.pfexec', - 'become_flags': pfexec_flags, + "become_user": "foo", + "become_method": "community.general.pfexec", + "become_flags": pfexec_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) @@ -62,19 +62,19 @@ def test_pfexec_varoptions(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - pfexec_exe = 'pfexec' - pfexec_flags = '' + pfexec_exe = "pfexec" + pfexec_flags = "" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.pfexec', - 'become_flags': 'xxx', + "become_user": "foo", + "become_method": "community.general.pfexec", + "become_flags": "xxx", } var_options = { - 'ansible_become_user': 'bar', - 'ansible_become_flags': pfexec_flags, + "ansible_become_user": "bar", + "ansible_become_flags": pfexec_flags, } cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) diff --git a/tests/unit/plugins/become/test_sudosu.py b/tests/unit/plugins/become/test_sudosu.py index c9b8cc2ddfe..20b25f042ee 100644 --- a/tests/unit/plugins/become/test_sudosu.py +++ b/tests/unit/plugins/become/test_sudosu.py @@ -20,30 +20,49 @@ def test_sudosu(mocker, parser, reset_cli_args): default_cmd = "/bin/foo" default_exe = "/bin/bash" - sudo_exe = 'sudo' - sudo_flags = '-H -s -n' + sudo_exe = "sudo" + sudo_flags = "-H -s -n" - success = 'BECOME-SUCCESS-.+?' + success = "BECOME-SUCCESS-.+?" task = { - 'become_user': 'foo', - 'become_method': 'community.general.sudosu', - 'become_flags': sudo_flags, + "become_user": "foo", + "become_method": "community.general.sudosu", + "become_flags": sudo_flags, } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match(f"""{sudo_exe} {sudo_flags} su -l {task['become_user']} {default_exe} -c 'echo {success}; {default_cmd}'""", cmd) is not None) + assert ( + re.match( + f"""{sudo_exe} {sudo_flags} su -l {task["become_user"]} {default_exe} -c 'echo {success}; {default_cmd}'""", + cmd, + ) + is not None + ) task = { - 'become_user': 'foo', - 'become_method': 'community.general.sudosu', - 'become_flags': sudo_flags, - 'become_pass': 'testpass', + "become_user": "foo", + "become_method": "community.general.sudosu", + "become_flags": sudo_flags, + "become_pass": "testpass", } var_options = {} cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) print(cmd) - assert (re.match("""%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''), - r"\[sudo via ansible, key=.+?\] password:", task['become_user'], - default_exe, success, default_cmd), cmd) is not None) + assert ( + re.match( + """%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" + % ( + sudo_exe, + sudo_flags.replace("-n", ""), + r"\[sudo via ansible, key=.+?\] password:", + task["become_user"], + default_exe, + success, + default_cmd, + ), + cmd, + ) + is not None + ) diff --git a/tests/unit/plugins/cache/test_memcached.py b/tests/unit/plugins/cache/test_memcached.py index c0701aa995d..c3b7253d800 100644 --- a/tests/unit/plugins/cache/test_memcached.py +++ b/tests/unit/plugins/cache/test_memcached.py @@ -7,11 +7,11 @@ import pytest -pytest.importorskip('memcache') +pytest.importorskip("memcache") from ansible.plugins.loader import cache_loader from ansible_collections.community.general.plugins.cache.memcached import CacheModule as MemcachedCache def test_memcached_cachemodule(): - assert isinstance(cache_loader.get('community.general.memcached'), MemcachedCache) + assert isinstance(cache_loader.get("community.general.memcached"), MemcachedCache) diff --git a/tests/unit/plugins/cache/test_redis.py b/tests/unit/plugins/cache/test_redis.py index 6d4744be666..426438251b6 100644 --- a/tests/unit/plugins/cache/test_redis.py +++ b/tests/unit/plugins/cache/test_redis.py @@ -7,7 +7,7 @@ import pytest -pytest.importorskip('redis') +pytest.importorskip("redis") from ansible.plugins.loader import cache_loader from ansible_collections.community.general.plugins.cache.redis import CacheModule as RedisCache @@ -15,11 +15,11 @@ def test_redis_cachemodule(): # The _uri option is required for the redis plugin - connection = '127.0.0.1:6379:1' - assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) + connection = "127.0.0.1:6379:1" + assert isinstance(cache_loader.get("community.general.redis", **{"_uri": connection}), RedisCache) def test_redis_cachemodule_2(): # The _uri option is required for the redis plugin - connection = '[::1]:6379:1' - assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) + connection = "[::1]:6379:1" + assert isinstance(cache_loader.get("community.general.redis", **{"_uri": connection}), RedisCache) diff --git a/tests/unit/plugins/callback/test_elastic.py b/tests/unit/plugins/callback/test_elastic.py index c3bc6aacf2d..d06781d44fa 100644 --- a/tests/unit/plugins/callback/test_elastic.py +++ b/tests/unit/plugins/callback/test_elastic.py @@ -17,84 +17,73 @@ class TestOpentelemetry(unittest.TestCase): - @patch('ansible_collections.community.general.plugins.callback.elastic.socket') + @patch("ansible_collections.community.general.plugins.callback.elastic.socket") def setUp(self, mock_socket): if sys.version_info < ELASTIC_MINIMUM_PYTHON_VERSION: self.skipTest(f"Python {'.'.join(map(str, ELASTIC_MINIMUM_PYTHON_VERSION))}+ is needed for Elastic") - mock_socket.gethostname.return_value = 'my-host' - mock_socket.gethostbyname.return_value = '1.2.3.4' + mock_socket.gethostname.return_value = "my-host" + mock_socket.gethostbyname.return_value = "1.2.3.4" self.elastic = ElasticSource(display=None) - self.task_fields = {'args': {}} - self.mock_host = Mock('MockHost') - self.mock_host.name = 'myhost' - self.mock_host._uuid = 'myhost_uuid' + self.task_fields = {"args": {}} + self.mock_host = Mock("MockHost") + self.mock_host.name = "myhost" + self.mock_host._uuid = "myhost_uuid" self.mock_task = Task() - self.mock_task.action = 'myaction' + self.mock_task.action = "myaction" self.mock_task.no_log = False - self.mock_task._role = 'myrole' - self.mock_task._uuid = 'myuuid' + self.mock_task._role = "myrole" + self.mock_task._uuid = "myuuid" self.mock_task.args = {} - self.mock_task.get_name = MagicMock(return_value='mytask') - self.mock_task.get_path = MagicMock(return_value='/mypath') - self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '') - self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + self.mock_task.get_name = MagicMock(return_value="mytask") + self.mock_task.get_path = MagicMock(return_value="/mypath") + self.my_task = TaskData("myuuid", "mytask", "/mypath", "myplay", "myaction", "") + self.my_task_result = TaskResult( + host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields + ) def test_start_task(self): tasks_data = OrderedDict() - self.elastic.start_task( - tasks_data, - False, - 'myplay', - self.mock_task - ) + self.elastic.start_task(tasks_data, False, "myplay", self.mock_task) - task_data = tasks_data['myuuid'] - self.assertEqual(task_data.uuid, 'myuuid') - self.assertEqual(task_data.name, 'mytask') - self.assertEqual(task_data.path, '/mypath') - self.assertEqual(task_data.play, 'myplay') - self.assertEqual(task_data.action, 'myaction') - self.assertEqual(task_data.args, '') + task_data = tasks_data["myuuid"] + self.assertEqual(task_data.uuid, "myuuid") + self.assertEqual(task_data.name, "mytask") + self.assertEqual(task_data.path, "/mypath") + self.assertEqual(task_data.play, "myplay") + self.assertEqual(task_data.action, "myaction") + self.assertEqual(task_data.args, "") def test_finish_task_with_a_host_match(self): tasks_data = OrderedDict() - tasks_data['myuuid'] = self.my_task + tasks_data["myuuid"] = self.my_task - self.elastic.finish_task( - tasks_data, - 'ok', - self.my_task_result - ) + self.elastic.finish_task(tasks_data, "ok", self.my_task_result) - task_data = tasks_data['myuuid'] - host_data = task_data.host_data['myhost_uuid'] - self.assertEqual(host_data.uuid, 'myhost_uuid') - self.assertEqual(host_data.name, 'myhost') - self.assertEqual(host_data.status, 'ok') + task_data = tasks_data["myuuid"] + host_data = task_data.host_data["myhost_uuid"] + self.assertEqual(host_data.uuid, "myhost_uuid") + self.assertEqual(host_data.name, "myhost") + self.assertEqual(host_data.status, "ok") def test_finish_task_without_a_host_match(self): result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields) tasks_data = OrderedDict() - tasks_data['myuuid'] = self.my_task + tasks_data["myuuid"] = self.my_task - self.elastic.finish_task( - tasks_data, - 'ok', - result - ) + self.elastic.finish_task(tasks_data, "ok", result) - task_data = tasks_data['myuuid'] - host_data = task_data.host_data['include'] - self.assertEqual(host_data.uuid, 'include') - self.assertEqual(host_data.name, 'include') - self.assertEqual(host_data.status, 'ok') + task_data = tasks_data["myuuid"] + host_data = task_data.host_data["include"] + self.assertEqual(host_data.uuid, "include") + self.assertEqual(host_data.name, "include") + self.assertEqual(host_data.status, "ok") def test_get_error_message(self): test_cases = ( - ('my-exception', 'my-msg', None, 'my-exception'), - (None, 'my-msg', None, 'my-msg'), - (None, None, None, 'failed'), + ("my-exception", "my-msg", None, "my-exception"), + (None, "my-msg", None, "my-msg"), + (None, None, None, "failed"), ) for tc in test_cases: @@ -103,11 +92,21 @@ def test_get_error_message(self): def test_enrich_error_message(self): test_cases = ( - ('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'), - ('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'), - (None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'), - ('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'), - ('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"') + ( + "my-exception", + "my-msg", + "my-stderr", + 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"', + ), + ("my-exception", None, "my-stderr", 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'), + (None, "my-msg", "my-stderr", 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'), + ("my-exception", "my-msg", None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'), + ( + "my-exception", + "my-msg", + "\nline1\nline2", + 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"', + ), ) for tc in test_cases: @@ -118,9 +117,9 @@ def test_enrich_error_message(self): def generate_test_data(exception=None, msg=None, stderr=None): res_data = OrderedDict() if exception: - res_data['exception'] = exception + res_data["exception"] = exception if msg: - res_data['msg'] = msg + res_data["msg"] = msg if stderr: - res_data['stderr'] = stderr + res_data["stderr"] = stderr return res_data diff --git a/tests/unit/plugins/callback/test_loganalytics.py b/tests/unit/plugins/callback/test_loganalytics.py index e435b272ad8..e2c40e38f2b 100644 --- a/tests/unit/plugins/callback/test_loganalytics.py +++ b/tests/unit/plugins/callback/test_loganalytics.py @@ -14,52 +14,59 @@ class TestAzureLogAnalytics(unittest.TestCase): - @patch('ansible_collections.community.general.plugins.callback.loganalytics.socket') + @patch("ansible_collections.community.general.plugins.callback.loganalytics.socket") def setUp(self, mock_socket): - mock_socket.gethostname.return_value = 'my-host' - mock_socket.gethostbyname.return_value = '1.2.3.4' + mock_socket.gethostname.return_value = "my-host" + mock_socket.gethostbyname.return_value = "1.2.3.4" self.loganalytics = AzureLogAnalyticsSource() - self.mock_task = Mock('MockTask') - self.mock_task._role = 'myrole' - self.mock_task._uuid = 'myuuid' - self.task_fields = {'args': {}} - self.mock_host = Mock('MockHost') - self.mock_host.name = 'myhost' + self.mock_task = Mock("MockTask") + self.mock_task._role = "myrole" + self.mock_task._uuid = "myuuid" + self.task_fields = {"args": {}} + self.mock_host = Mock("MockHost") + self.mock_host.name = "myhost" - @patch('ansible_collections.community.general.plugins.callback.loganalytics.now') - @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') + @patch("ansible_collections.community.general.plugins.callback.loganalytics.now") + @patch("ansible_collections.community.general.plugins.callback.loganalytics.open_url") def test_overall(self, open_url_mock, mock_now): mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', - shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==', - state='OK', - result=result, - runtime=100) + self.loganalytics.send_event( + workspace_id="01234567-0123-0123-0123-01234567890a", + shared_key="dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==", + state="OK", + result=result, + runtime=100, + ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) - self.assertEqual(sent_data['event']['timestamp'], 'Tue, 01 Dec 2020 00:00:00 GMT') - self.assertEqual(sent_data['event']['host'], 'my-host') - self.assertEqual(sent_data['event']['uuid'], 'myuuid') - self.assertEqual(args[0], 'https://01234567-0123-0123-0123-01234567890a.ods.opinsights.azure.com/api/logs?api-version=2016-04-01') + self.assertEqual(sent_data["event"]["timestamp"], "Tue, 01 Dec 2020 00:00:00 GMT") + self.assertEqual(sent_data["event"]["host"], "my-host") + self.assertEqual(sent_data["event"]["uuid"], "myuuid") + self.assertEqual( + args[0], + "https://01234567-0123-0123-0123-01234567890a.ods.opinsights.azure.com/api/logs?api-version=2016-04-01", + ) - @patch('ansible_collections.community.general.plugins.callback.loganalytics.now') - @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') + @patch("ansible_collections.community.general.plugins.callback.loganalytics.now") + @patch("ansible_collections.community.general.plugins.callback.loganalytics.open_url") def test_auth_headers(self, open_url_mock, mock_now): mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', - shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==', - state='OK', - result=result, - runtime=100) + self.loganalytics.send_event( + workspace_id="01234567-0123-0123-0123-01234567890a", + shared_key="dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==", + state="OK", + result=result, + runtime=100, + ) args, kwargs = open_url_mock.call_args - headers = kwargs['headers'] + headers = kwargs["headers"] - self.assertRegex(headers['Authorization'], r'^SharedKey 01234567-0123-0123-0123-01234567890a:.*=$') - self.assertEqual(headers['Log-Type'], 'ansible_playbook') + self.assertRegex(headers["Authorization"], r"^SharedKey 01234567-0123-0123-0123-01234567890a:.*=$") + self.assertEqual(headers["Log-Type"], "ansible_playbook") diff --git a/tests/unit/plugins/callback/test_opentelemetry.py b/tests/unit/plugins/callback/test_opentelemetry.py index 6c7d0390264..11ba2fcb1f8 100644 --- a/tests/unit/plugins/callback/test_opentelemetry.py +++ b/tests/unit/plugins/callback/test_opentelemetry.py @@ -17,88 +17,77 @@ class TestOpentelemetry(unittest.TestCase): - @patch('ansible_collections.community.general.plugins.callback.opentelemetry.socket') + @patch("ansible_collections.community.general.plugins.callback.opentelemetry.socket") def setUp(self, mock_socket): # TODO: this python version validation won't be needed as long as the _time_ns call is mocked. if sys.version_info < OPENTELEMETRY_MINIMUM_PYTHON_VERSION: - self.skipTest(f"Python {'.'.join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION))}+ is needed for OpenTelemetry") + self.skipTest( + f"Python {'.'.join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION))}+ is needed for OpenTelemetry" + ) - mock_socket.gethostname.return_value = 'my-host' - mock_socket.gethostbyname.return_value = '1.2.3.4' + mock_socket.gethostname.return_value = "my-host" + mock_socket.gethostbyname.return_value = "1.2.3.4" self.opentelemetry = OpenTelemetrySource(display=None) - self.task_fields = {'args': {}} - self.mock_host = Mock('MockHost') - self.mock_host.name = 'myhost' - self.mock_host._uuid = 'myhost_uuid' + self.task_fields = {"args": {}} + self.mock_host = Mock("MockHost") + self.mock_host.name = "myhost" + self.mock_host._uuid = "myhost_uuid" self.mock_task = Task() - self.mock_task.action = 'myaction' + self.mock_task.action = "myaction" self.mock_task.no_log = False - self.mock_task._role = 'myrole' - self.mock_task._uuid = 'myuuid' + self.mock_task._role = "myrole" + self.mock_task._uuid = "myuuid" self.mock_task.args = {} - self.mock_task.get_name = MagicMock(return_value='mytask') - self.mock_task.get_path = MagicMock(return_value='/mypath') - self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '') - self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + self.mock_task.get_name = MagicMock(return_value="mytask") + self.mock_task.get_path = MagicMock(return_value="/mypath") + self.my_task = TaskData("myuuid", "mytask", "/mypath", "myplay", "myaction", "") + self.my_task_result = TaskResult( + host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields + ) def test_start_task(self): tasks_data = OrderedDict() - self.opentelemetry.start_task( - tasks_data, - False, - 'myplay', - self.mock_task - ) + self.opentelemetry.start_task(tasks_data, False, "myplay", self.mock_task) - task_data = tasks_data['myuuid'] - self.assertEqual(task_data.uuid, 'myuuid') - self.assertEqual(task_data.name, 'mytask') - self.assertEqual(task_data.path, '/mypath') - self.assertEqual(task_data.play, 'myplay') - self.assertEqual(task_data.action, 'myaction') + task_data = tasks_data["myuuid"] + self.assertEqual(task_data.uuid, "myuuid") + self.assertEqual(task_data.name, "mytask") + self.assertEqual(task_data.path, "/mypath") + self.assertEqual(task_data.play, "myplay") + self.assertEqual(task_data.action, "myaction") self.assertEqual(task_data.args, {}) def test_finish_task_with_a_host_match(self): tasks_data = OrderedDict() - tasks_data['myuuid'] = self.my_task + tasks_data["myuuid"] = self.my_task - self.opentelemetry.finish_task( - tasks_data, - 'ok', - self.my_task_result, - "" - ) + self.opentelemetry.finish_task(tasks_data, "ok", self.my_task_result, "") - task_data = tasks_data['myuuid'] - host_data = task_data.host_data['myhost_uuid'] - self.assertEqual(host_data.uuid, 'myhost_uuid') - self.assertEqual(host_data.name, 'myhost') - self.assertEqual(host_data.status, 'ok') + task_data = tasks_data["myuuid"] + host_data = task_data.host_data["myhost_uuid"] + self.assertEqual(host_data.uuid, "myhost_uuid") + self.assertEqual(host_data.name, "myhost") + self.assertEqual(host_data.status, "ok") def test_finish_task_without_a_host_match(self): result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields) tasks_data = OrderedDict() - tasks_data['myuuid'] = self.my_task + tasks_data["myuuid"] = self.my_task - self.opentelemetry.finish_task( - tasks_data, - 'ok', - result, - "" - ) + self.opentelemetry.finish_task(tasks_data, "ok", result, "") - task_data = tasks_data['myuuid'] - host_data = task_data.host_data['include'] - self.assertEqual(host_data.uuid, 'include') - self.assertEqual(host_data.name, 'include') - self.assertEqual(host_data.status, 'ok') + task_data = tasks_data["myuuid"] + host_data = task_data.host_data["include"] + self.assertEqual(host_data.uuid, "include") + self.assertEqual(host_data.name, "include") + self.assertEqual(host_data.status, "ok") def test_get_error_message(self): test_cases = ( - ('my-exception', 'my-msg', None, 'my-exception'), - (None, 'my-msg', None, 'my-msg'), - (None, None, None, 'failed'), + ("my-exception", "my-msg", None, "my-exception"), + (None, "my-msg", None, "my-msg"), + (None, None, None, "failed"), ) for tc in test_cases: @@ -107,25 +96,37 @@ def test_get_error_message(self): def test_get_error_message_from_results(self): test_cases = ( - ('my-exception', 'my-msg', None, False, None), - (None, 'my-msg', None, False, None), + ("my-exception", "my-msg", None, False, None), + (None, "my-msg", None, False, None), (None, None, None, False, None), - ('my-exception', 'my-msg', None, True, 'shell(none) - my-exception'), - (None, 'my-msg', None, True, 'shell(none) - my-msg'), - (None, None, None, True, 'shell(none) - failed'), + ("my-exception", "my-msg", None, True, "shell(none) - my-exception"), + (None, "my-msg", None, True, "shell(none) - my-msg"), + (None, None, None, True, "shell(none) - failed"), ) for tc in test_cases: - result = self.opentelemetry.get_error_message_from_results([generate_test_data(tc[0], tc[1], tc[2], tc[3])], 'shell') + result = self.opentelemetry.get_error_message_from_results( + [generate_test_data(tc[0], tc[1], tc[2], tc[3])], "shell" + ) self.assertEqual(result, tc[4]) def test_enrich_error_message(self): test_cases = ( - ('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'), - ('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'), - (None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'), - ('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'), - ('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"') + ( + "my-exception", + "my-msg", + "my-stderr", + 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"', + ), + ("my-exception", None, "my-stderr", 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'), + (None, "my-msg", "my-stderr", 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'), + ("my-exception", "my-msg", None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'), + ( + "my-exception", + "my-msg", + "\nline1\nline2", + 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"', + ), ) for tc in test_cases: @@ -134,29 +135,61 @@ def test_enrich_error_message(self): def test_enrich_error_message_from_results(self): test_cases = ( - ('my-exception', 'my-msg', 'my-stderr', False, ''), - ('my-exception', None, 'my-stderr', False, ''), - (None, 'my-msg', 'my-stderr', False, ''), - ('my-exception', 'my-msg', None, False, ''), - ('my-exception', 'my-msg', '\nline1\nline2', False, ''), - ('my-exception', 'my-msg', 'my-stderr', True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"\n'), - ('my-exception', None, 'my-stderr', True, 'shell(none) - message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"\n'), - (None, 'my-msg', 'my-stderr', True, 'shell(none) - message: "my-msg"\nexception: "None"\nstderr: "my-stderr"\n'), - ('my-exception', 'my-msg', None, True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "None"\n'), - ('my-exception', 'my-msg', '\nline1\nline2', True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"\n') + ("my-exception", "my-msg", "my-stderr", False, ""), + ("my-exception", None, "my-stderr", False, ""), + (None, "my-msg", "my-stderr", False, ""), + ("my-exception", "my-msg", None, False, ""), + ("my-exception", "my-msg", "\nline1\nline2", False, ""), + ( + "my-exception", + "my-msg", + "my-stderr", + True, + 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"\n', + ), + ( + "my-exception", + None, + "my-stderr", + True, + 'shell(none) - message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"\n', + ), + ( + None, + "my-msg", + "my-stderr", + True, + 'shell(none) - message: "my-msg"\nexception: "None"\nstderr: "my-stderr"\n', + ), + ( + "my-exception", + "my-msg", + None, + True, + 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "None"\n', + ), + ( + "my-exception", + "my-msg", + "\nline1\nline2", + True, + 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"\n', + ), ) for tc in test_cases: - result = self.opentelemetry.enrich_error_message_from_results([generate_test_data(tc[0], tc[1], tc[2], tc[3])], 'shell') + result = self.opentelemetry.enrich_error_message_from_results( + [generate_test_data(tc[0], tc[1], tc[2], tc[3])], "shell" + ) self.assertEqual(result, tc[4]) def test_url_from_args(self): test_cases = ( ({}, ""), - ({'url': 'my-url'}, 'my-url'), - ({'url': 'my-url', 'api_url': 'my-api_url'}, 'my-url'), - ({'api_url': 'my-api_url'}, 'my-api_url'), - ({'api_url': 'my-api_url', 'chart_repo_url': 'my-chart_repo_url'}, 'my-api_url') + ({"url": "my-url"}, "my-url"), + ({"url": "my-url", "api_url": "my-api_url"}, "my-url"), + ({"api_url": "my-api_url"}, "my-api_url"), + ({"api_url": "my-api_url", "chart_repo_url": "my-chart_repo_url"}, "my-api_url"), ) for tc in test_cases: @@ -166,12 +199,12 @@ def test_url_from_args(self): def test_parse_and_redact_url_if_possible(self): test_cases = ( ({}, None), - ({'url': 'wrong'}, None), - ({'url': 'https://my-url'}, 'https://my-url'), - ({'url': 'https://user:pass@my-url'}, 'https://my-url'), - ({'url': 'https://my-url:{{ my_port }}'}, 'https://my-url:{{ my_port }}'), - ({'url': 'https://{{ my_hostname }}:{{ my_port }}'}, None), - ({'url': '{{my_schema}}{{ my_hostname }}:{{ my_port }}'}, None) + ({"url": "wrong"}, None), + ({"url": "https://my-url"}, "https://my-url"), + ({"url": "https://user:pass@my-url"}, "https://my-url"), + ({"url": "https://my-url:{{ my_port }}"}, "https://my-url:{{ my_port }}"), + ({"url": "https://{{ my_hostname }}:{{ my_port }}"}, None), + ({"url": "{{my_schema}}{{ my_hostname }}:{{ my_port }}"}, None), ) for tc in test_cases: @@ -185,10 +218,10 @@ def test_parse_and_redact_url_if_possible(self): def generate_test_data(exception=None, msg=None, stderr=None, failed=False): res_data = OrderedDict() if exception: - res_data['exception'] = exception + res_data["exception"] = exception if msg: - res_data['msg'] = msg + res_data["msg"] = msg if stderr: - res_data['stderr'] = stderr - res_data['failed'] = failed + res_data["stderr"] = stderr + res_data["failed"] = failed return res_data diff --git a/tests/unit/plugins/callback/test_splunk.py b/tests/unit/plugins/callback/test_splunk.py index 719870fbcf6..1a89d6c631b 100644 --- a/tests/unit/plugins/callback/test_splunk.py +++ b/tests/unit/plugins/callback/test_splunk.py @@ -14,50 +14,62 @@ class TestSplunkClient(unittest.TestCase): - @patch('ansible_collections.community.general.plugins.callback.splunk.socket') + @patch("ansible_collections.community.general.plugins.callback.splunk.socket") def setUp(self, mock_socket): - mock_socket.gethostname.return_value = 'my-host' - mock_socket.gethostbyname.return_value = '1.2.3.4' + mock_socket.gethostname.return_value = "my-host" + mock_socket.gethostbyname.return_value = "1.2.3.4" self.splunk = SplunkHTTPCollectorSource() - self.mock_task = Mock('MockTask') - self.mock_task._role = 'myrole' - self.mock_task._uuid = 'myuuid' - self.task_fields = {'args': {}} - self.mock_host = Mock('MockHost') - self.mock_host.name = 'myhost' - - @patch('ansible_collections.community.general.plugins.callback.splunk.now') - @patch('ansible_collections.community.general.plugins.callback.splunk.open_url') + self.mock_task = Mock("MockTask") + self.mock_task._role = "myrole" + self.mock_task._uuid = "myuuid" + self.task_fields = {"args": {}} + self.mock_host = Mock("MockHost") + self.mock_host.name = "myhost" + + @patch("ansible_collections.community.general.plugins.callback.splunk.now") + @patch("ansible_collections.community.general.plugins.callback.splunk.open_url") def test_timestamp_with_milliseconds(self, open_url_mock, mock_now): mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.splunk.send_event( - url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True, - batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + url="endpoint", + authtoken="token", + validate_certs=False, + include_milliseconds=True, + batch="abcefghi-1234-5678-9012-abcdefghijkl", + state="OK", + result=result, + runtime=100, ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) - self.assertEqual(sent_data['event']['timestamp'], '2020-12-01 00:00:00.000000 +0000') - self.assertEqual(sent_data['event']['host'], 'my-host') - self.assertEqual(sent_data['event']['ip_address'], '1.2.3.4') + self.assertEqual(sent_data["event"]["timestamp"], "2020-12-01 00:00:00.000000 +0000") + self.assertEqual(sent_data["event"]["host"], "my-host") + self.assertEqual(sent_data["event"]["ip_address"], "1.2.3.4") - @patch('ansible_collections.community.general.plugins.callback.splunk.now') - @patch('ansible_collections.community.general.plugins.callback.splunk.open_url') + @patch("ansible_collections.community.general.plugins.callback.splunk.now") + @patch("ansible_collections.community.general.plugins.callback.splunk.open_url") def test_timestamp_without_milliseconds(self, open_url_mock, mock_now): mock_now.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) self.splunk.send_event( - url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False, - batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + url="endpoint", + authtoken="token", + validate_certs=False, + include_milliseconds=False, + batch="abcefghi-1234-5678-9012-abcdefghijkl", + state="OK", + result=result, + runtime=100, ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) - self.assertEqual(sent_data['event']['timestamp'], '2020-12-01 00:00:00 +0000') - self.assertEqual(sent_data['event']['host'], 'my-host') - self.assertEqual(sent_data['event']['ip_address'], '1.2.3.4') + self.assertEqual(sent_data["event"]["timestamp"], "2020-12-01 00:00:00 +0000") + self.assertEqual(sent_data["event"]["host"], "my-host") + self.assertEqual(sent_data["event"]["ip_address"], "1.2.3.4") diff --git a/tests/unit/plugins/connection/test_lxc.py b/tests/unit/plugins/connection/test_lxc.py index 2d257843a99..6d5d206f3d2 100644 --- a/tests/unit/plugins/connection/test_lxc.py +++ b/tests/unit/plugins/connection/test_lxc.py @@ -24,9 +24,9 @@ def lxc(request): When true (default), a mocked liblxc module is injected. If False, no liblxc will be present. """ - liblxc_present = getattr(request, 'param', True) + liblxc_present = getattr(request, "param", True) - class ContainerMock(): + class ContainerMock: # dict of container name to its state _container_states = {} @@ -36,56 +36,55 @@ def __init__(self, name): @property def state(self): - return ContainerMock._container_states.get(self.name, 'STARTED') + return ContainerMock._container_states.get(self.name, "STARTED") liblxc_module_mock = mock.MagicMock() liblxc_module_mock.Container = ContainerMock - with mock.patch.dict('sys.modules'): + with mock.patch.dict("sys.modules"): if liblxc_present: - sys.modules['lxc'] = liblxc_module_mock - elif 'lxc' in sys.modules: - del sys.modules['lxc'] + sys.modules["lxc"] = liblxc_module_mock + elif "lxc" in sys.modules: + del sys.modules["lxc"] from ansible_collections.community.general.plugins.connection import lxc as lxc_plugin_module assert lxc_plugin_module.HAS_LIBLXC == liblxc_present - assert bool(getattr(lxc_plugin_module, '_lxc', None)) == liblxc_present + assert bool(getattr(lxc_plugin_module, "_lxc", None)) == liblxc_present yield lxc_plugin_module -class TestLXCConnectionClass(): - - @pytest.mark.parametrize('lxc', [True, False], indirect=True) +class TestLXCConnectionClass: + @pytest.mark.parametrize("lxc", [True, False], indirect=True) def test_lxc_connection_module(self, lxc): """Test that a connection can be created with the plugin.""" play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('lxc', play_context, in_stream) + conn = connection_loader.get("lxc", play_context, in_stream) assert conn assert isinstance(conn, lxc.Connection) - @pytest.mark.parametrize('lxc', [False], indirect=True) + @pytest.mark.parametrize("lxc", [False], indirect=True) def test_lxc_connection_liblxc_error(self, lxc): """Test that on connect an error is thrown if liblxc is not present.""" play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('lxc', play_context, in_stream) + conn = connection_loader.get("lxc", play_context, in_stream) - with pytest.raises(AnsibleError, match='lxc python bindings are not installed'): + with pytest.raises(AnsibleError, match="lxc python bindings are not installed"): conn._connect() def test_remote_addr_option(self): """Test that the remote_addr option is used""" play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('lxc', play_context, in_stream) + conn = connection_loader.get("lxc", play_context, in_stream) - container_name = 'my-container' - conn.set_option('remote_addr', container_name) - assert conn.get_option('remote_addr') == container_name + container_name = "my-container" + conn.set_option("remote_addr", container_name) + assert conn.get_option("remote_addr") == container_name conn._connect() assert conn.container_name == container_name @@ -94,23 +93,23 @@ def test_error_when_stopped(self, lxc): """Test that on connect an error is thrown if the container is stopped.""" play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('lxc', play_context, in_stream) - conn.set_option('remote_addr', 'my-container') + conn = connection_loader.get("lxc", play_context, in_stream) + conn.set_option("remote_addr", "my-container") - lxc._lxc.Container._container_states['my-container'] = 'STOPPED' + lxc._lxc.Container._container_states["my-container"] = "STOPPED" - with pytest.raises(AnsibleError, match='my-container is not running'): + with pytest.raises(AnsibleError, match="my-container is not running"): conn._connect() def test_container_name_change(self): """Test connect method reconnects when remote_addr changes""" play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('lxc', play_context, in_stream) + conn = connection_loader.get("lxc", play_context, in_stream) # setting the option does nothing - container1_name = 'my-container' - conn.set_option('remote_addr', container1_name) + container1_name = "my-container" + conn.set_option("remote_addr", container1_name) assert conn.container_name is None assert conn.container is None @@ -128,8 +127,8 @@ def test_container_name_change(self): assert conn.container.name == container1_name # setting the option does again nothing - container2_name = 'my-other-container' - conn.set_option('remote_addr', container2_name) + container2_name = "my-other-container" + conn.set_option("remote_addr", container2_name) assert conn.container_name == container1_name assert conn.container is container1 assert conn.container.name == container1_name diff --git a/tests/unit/plugins/connection/test_wsl.py b/tests/unit/plugins/connection/test_wsl.py index caad0ce4407..3060ce9c469 100644 --- a/tests/unit/plugins/connection/test_wsl.py +++ b/tests/unit/plugins/connection/test_wsl.py @@ -18,126 +18,126 @@ from unittest.mock import patch, MagicMock, mock_open -paramiko = pytest.importorskip('paramiko') +paramiko = pytest.importorskip("paramiko") @pytest.fixture def connection(): play_context = PlayContext() in_stream = StringIO() - conn = connection_loader.get('community.general.wsl', play_context, in_stream) - conn.set_option('remote_addr', '192.168.1.100') - conn.set_option('remote_user', 'root') - conn.set_option('password', 'password') - conn.set_option('wsl_distribution', 'test') + conn = connection_loader.get("community.general.wsl", play_context, in_stream) + conn.set_option("remote_addr", "192.168.1.100") + conn.set_option("remote_user", "root") + conn.set_option("password", "password") + conn.set_option("wsl_distribution", "test") return conn def test_connection_options(connection): - """ Test that connection options are properly set """ - assert connection.get_option('remote_addr') == '192.168.1.100' - assert connection.get_option('remote_user') == 'root' - assert connection.get_option('password') == 'password' - assert connection.get_option('wsl_distribution') == 'test' + """Test that connection options are properly set""" + assert connection.get_option("remote_addr") == "192.168.1.100" + assert connection.get_option("remote_user") == "root" + assert connection.get_option("password") == "password" + assert connection.get_option("wsl_distribution") == "test" def test_authenticity_msg(): - """ Test authenticity message formatting """ - msg = authenticity_msg('test.host', 'ssh-rsa', 'AA:BB:CC:DD') - assert 'test.host' in msg - assert 'ssh-rsa' in msg - assert 'AA:BB:CC:DD' in msg + """Test authenticity message formatting""" + msg = authenticity_msg("test.host", "ssh-rsa", "AA:BB:CC:DD") + assert "test.host" in msg + assert "ssh-rsa" in msg + assert "AA:BB:CC:DD" in msg def test_missing_host_key(connection): - """ Test MyAddPolicy missing_host_key method """ + """Test MyAddPolicy missing_host_key method""" client = MagicMock() key = MagicMock() - key.get_fingerprint.return_value = b'fingerprint' - key.get_name.return_value = 'ssh-rsa' + key.get_fingerprint.return_value = b"fingerprint" + key.get_name.return_value = "ssh-rsa" policy = MyAddPolicy(connection) - connection.set_option('host_key_auto_add', True) - policy.missing_host_key(client, 'test.host', key) - assert hasattr(key, '_added_by_ansible_this_time') + connection.set_option("host_key_auto_add", True) + policy.missing_host_key(client, "test.host", key) + assert hasattr(key, "_added_by_ansible_this_time") - connection.set_option('host_key_auto_add', False) - connection.set_option('host_key_checking', False) - policy.missing_host_key(client, 'test.host', key) + connection.set_option("host_key_auto_add", False) + connection.set_option("host_key_checking", False) + policy.missing_host_key(client, "test.host", key) - connection.set_option('host_key_checking', True) - connection.set_option('host_key_auto_add', False) - connection.set_option('use_persistent_connections', False) + connection.set_option("host_key_checking", True) + connection.set_option("host_key_auto_add", False) + connection.set_option("use_persistent_connections", False) - with patch('ansible.utils.display.Display.prompt_until', return_value='yes'): - policy.missing_host_key(client, 'test.host', key) + with patch("ansible.utils.display.Display.prompt_until", return_value="yes"): + policy.missing_host_key(client, "test.host", key) - with patch('ansible.utils.display.Display.prompt_until', return_value='no'): - with pytest.raises(AnsibleError, match='host connection rejected by user'): - policy.missing_host_key(client, 'test.host', key) + with patch("ansible.utils.display.Display.prompt_until", return_value="no"): + with pytest.raises(AnsibleError, match="host connection rejected by user"): + policy.missing_host_key(client, "test.host", key) def test_set_log_channel(connection): - """ Test setting log channel """ - connection._set_log_channel('test_channel') - assert connection._log_channel == 'test_channel' + """Test setting log channel""" + connection._set_log_channel("test_channel") + assert connection._log_channel == "test_channel" def test_parse_proxy_command(connection): - """ Test proxy command parsing """ - connection.set_option('proxy_command', 'ssh -W %h:%p proxy.example.com') - connection.set_option('remote_addr', 'target.example.com') - connection.set_option('remote_user', 'testuser') + """Test proxy command parsing""" + connection.set_option("proxy_command", "ssh -W %h:%p proxy.example.com") + connection.set_option("remote_addr", "target.example.com") + connection.set_option("remote_user", "testuser") result = connection._parse_proxy_command(port=2222) - assert 'sock' in result - assert isinstance(result['sock'], paramiko.ProxyCommand) + assert "sock" in result + assert isinstance(result["sock"], paramiko.ProxyCommand) -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_connect_with_rsa_sha2_disabled(mock_ssh, connection): - """ Test connection with RSA SHA2 algorithms disabled """ - connection.set_option('use_rsa_sha2_algorithms', False) + """Test connection with RSA SHA2 algorithms disabled""" + connection.set_option("use_rsa_sha2_algorithms", False) mock_client = MagicMock() mock_ssh.return_value = mock_client connection._connect() call_kwargs = mock_client.connect.call_args[1] - assert 'disabled_algorithms' in call_kwargs - assert 'pubkeys' in call_kwargs['disabled_algorithms'] + assert "disabled_algorithms" in call_kwargs + assert "pubkeys" in call_kwargs["disabled_algorithms"] -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_connect_with_bad_host_key(mock_ssh, connection): - """ Test connection with bad host key """ + """Test connection with bad host key""" mock_client = MagicMock() mock_ssh.return_value = mock_client - mock_client.connect.side_effect = paramiko.ssh_exception.BadHostKeyException( - 'hostname', MagicMock(), MagicMock()) + mock_client.connect.side_effect = paramiko.ssh_exception.BadHostKeyException("hostname", MagicMock(), MagicMock()) - with pytest.raises(AnsibleConnectionFailure, match='host key mismatch'): + with pytest.raises(AnsibleConnectionFailure, match="host key mismatch"): connection._connect() -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_connect_with_invalid_host_key(mock_ssh, connection): - """ Test connection with bad host key """ - connection.set_option('host_key_checking', True) + """Test connection with bad host key""" + connection.set_option("host_key_checking", True) mock_client = MagicMock() mock_ssh.return_value = mock_client mock_client.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey( - "Bad Line!", Exception('Something crashed!')) + "Bad Line!", Exception("Something crashed!") + ) with pytest.raises(AnsibleConnectionFailure, match="Invalid host key: Bad Line!"): connection._connect() -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_connect_success(mock_ssh, connection): - """ Test successful SSH connection establishment """ + """Test successful SSH connection establishment""" mock_client = MagicMock() mock_ssh.return_value = mock_client @@ -147,81 +147,72 @@ def test_connect_success(mock_ssh, connection): assert connection._connected -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_connect_authentication_failure(mock_ssh, connection): - """ Test SSH connection with authentication failure """ + """Test SSH connection with authentication failure""" mock_client = MagicMock() mock_ssh.return_value = mock_client - mock_client.connect.side_effect = paramiko.ssh_exception.AuthenticationException('Auth failed') + mock_client.connect.side_effect = paramiko.ssh_exception.AuthenticationException("Auth failed") with pytest.raises(AnsibleAuthenticationFailure): connection._connect() def test_any_keys_added(connection): - """ Test checking for added host keys """ + """Test checking for added host keys""" connection.ssh = MagicMock() connection.ssh._host_keys = { - 'host1': { - 'ssh-rsa': MagicMock(_added_by_ansible_this_time=True), - 'ssh-ed25519': MagicMock(_added_by_ansible_this_time=False) + "host1": { + "ssh-rsa": MagicMock(_added_by_ansible_this_time=True), + "ssh-ed25519": MagicMock(_added_by_ansible_this_time=False), } } assert connection._any_keys_added() is True - connection.ssh._host_keys = { - 'host1': { - 'ssh-rsa': MagicMock(_added_by_ansible_this_time=False) - } - } + connection.ssh._host_keys = {"host1": {"ssh-rsa": MagicMock(_added_by_ansible_this_time=False)}} assert connection._any_keys_added() is False -@patch('os.path.exists') -@patch('os.stat') -@patch('tempfile.NamedTemporaryFile') +@patch("os.path.exists") +@patch("os.stat") +@patch("tempfile.NamedTemporaryFile") def test_save_ssh_host_keys(mock_tempfile, mock_stat, mock_exists, connection): - """ Test saving SSH host keys """ + """Test saving SSH host keys""" mock_exists.return_value = True mock_stat.return_value = MagicMock(st_mode=0o644, st_uid=1000, st_gid=1000) - mock_tempfile.return_value.__enter__.return_value.name = '/tmp/test_keys' + mock_tempfile.return_value.__enter__.return_value.name = "/tmp/test_keys" connection.ssh = MagicMock() connection.ssh._host_keys = { - 'host1': { - 'ssh-rsa': MagicMock( - get_base64=lambda: 'KEY1', - _added_by_ansible_this_time=True - ) - } + "host1": {"ssh-rsa": MagicMock(get_base64=lambda: "KEY1", _added_by_ansible_this_time=True)} } mock_open_obj = mock_open() - with patch('builtins.open', mock_open_obj): - connection._save_ssh_host_keys('/tmp/test_keys') + with patch("builtins.open", mock_open_obj): + connection._save_ssh_host_keys("/tmp/test_keys") - mock_open_obj().write.assert_called_with('host1 ssh-rsa KEY1\n') + mock_open_obj().write.assert_called_with("host1 ssh-rsa KEY1\n") def test_build_wsl_command(connection): - """ Test wsl command building with different users """ + """Test wsl command building with different users""" cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') assert cmd == 'wsl.exe --distribution test -- /bin/sh -c "ls -la"' - connection.set_option('wsl_user', 'test-user') + connection.set_option("wsl_user", "test-user") cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') assert cmd == 'wsl.exe --distribution test --user test-user -- /bin/sh -c "ls -la"' - connection.set_option('become', True) - connection.set_option('become_user', 'test-become-user') + connection.set_option("become", True) + connection.set_option("become_user", "test-become-user") cmd = connection._build_wsl_command('/bin/sh -c "ls -la"') assert cmd == 'wsl.exe --distribution test --user test-become-user -- /bin/sh -c "ls -la"' -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_exec_command_success(mock_ssh, connection): - """ Test successful command execution """ + """Test successful command execution""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -230,21 +221,21 @@ def test_exec_command_success(mock_ssh, connection): mock_client.get_transport.return_value = mock_transport mock_transport.open_session.return_value = mock_channel mock_channel.recv_exit_status.return_value = 0 - mock_channel.makefile.return_value = [to_bytes('stdout')] + mock_channel.makefile.return_value = [to_bytes("stdout")] mock_channel.makefile_stderr.return_value = [to_bytes("")] connection._connected = True connection.ssh = mock_client - returncode, stdout, stderr = connection.exec_command('ls -la') + returncode, stdout, stderr = connection.exec_command("ls -la") mock_transport.open_session.assert_called_once() mock_transport.set_keepalive.assert_called_once_with(5) -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_exec_command_wsl_not_found(mock_ssh, connection): - """ Test command execution when wsl.exe is not found """ + """Test command execution when wsl.exe is not found""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -259,28 +250,28 @@ def test_exec_command_wsl_not_found(mock_ssh, connection): connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleError, match='wsl.exe not found in path of host'): - connection.exec_command('ls -la') + with pytest.raises(AnsibleError, match="wsl.exe not found in path of host"): + connection.exec_command("ls -la") -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_exec_command_session_open_failure(mock_ssh, connection): - """ Test exec_command when session opening fails """ + """Test exec_command when session opening fails""" mock_client = MagicMock() mock_transport = MagicMock() - mock_transport.open_session.side_effect = Exception('Failed to open session') + mock_transport.open_session.side_effect = Exception("Failed to open session") mock_client.get_transport.return_value = mock_transport connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleConnectionFailure, match='Failed to open session'): - connection.exec_command('test command') + with pytest.raises(AnsibleConnectionFailure, match="Failed to open session"): + connection.exec_command("test command") -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_exec_command_with_privilege_escalation(mock_ssh, connection): - """ Test exec_command with privilege escalation """ + """Test exec_command with privilege escalation""" mock_client = MagicMock() mock_channel = MagicMock() mock_transport = MagicMock() @@ -294,33 +285,35 @@ def test_exec_command_with_privilege_escalation(mock_ssh, connection): connection.become.expect_prompt.return_value = True connection.become.check_success.return_value = False connection.become.check_password_prompt.return_value = True - connection.become.get_option.return_value = 'sudo_password' + connection.become.get_option.return_value = "sudo_password" - mock_channel.recv.return_value = b'[sudo] password:' + mock_channel.recv.return_value = b"[sudo] password:" mock_channel.recv_exit_status.return_value = 0 mock_channel.makefile.return_value = [b""] mock_channel.makefile_stderr.return_value = [b""] - returncode, stdout, stderr = connection.exec_command('sudo test command') + returncode, stdout, stderr = connection.exec_command("sudo test command") - mock_channel.sendall.assert_called_once_with(b'sudo_password\n') + mock_channel.sendall.assert_called_once_with(b"sudo_password\n") def test_put_file(connection): - """ Test putting a file to the remote system """ + """Test putting a file to the remote system""" connection.exec_command = MagicMock() connection.exec_command.return_value = (0, b"", b"") - with patch('builtins.open', create=True) as mock_open: - mock_open.return_value.__enter__.return_value.read.return_value = b'test content' - connection.put_file('/local/path', '/remote/path') + with patch("builtins.open", create=True) as mock_open: + mock_open.return_value.__enter__.return_value.read.return_value = b"test content" + connection.put_file("/local/path", "/remote/path") - connection.exec_command.assert_called_once_with("/bin/sh -c 'cat > /remote/path'", in_data=b'test content', sudoable=False) + connection.exec_command.assert_called_once_with( + "/bin/sh -c 'cat > /remote/path'", in_data=b"test content", sudoable=False + ) -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_put_file_general_error(mock_ssh, connection): - """ Test put_file with general error """ + """Test put_file with general error""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -330,18 +323,18 @@ def test_put_file_general_error(mock_ssh, connection): mock_transport.open_session.return_value = mock_channel mock_channel.recv_exit_status.return_value = 1 mock_channel.makefile.return_value = [to_bytes("")] - mock_channel.makefile_stderr.return_value = [to_bytes('Some error')] + mock_channel.makefile_stderr.return_value = [to_bytes("Some error")] connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleError, match='error occurred while putting file from /remote/path to /local/path'): - connection.put_file('/remote/path', '/local/path') + with pytest.raises(AnsibleError, match="error occurred while putting file from /remote/path to /local/path"): + connection.put_file("/remote/path", "/local/path") -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_put_file_cat_not_found(mock_ssh, connection): - """ Test command execution when cat is not found """ + """Test command execution when cat is not found""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -351,30 +344,30 @@ def test_put_file_cat_not_found(mock_ssh, connection): mock_transport.open_session.return_value = mock_channel mock_channel.recv_exit_status.return_value = 1 mock_channel.makefile.return_value = [to_bytes("")] - mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')] + mock_channel.makefile_stderr.return_value = [to_bytes("cat: not found")] connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleError, match='cat not found in path of WSL distribution'): - connection.fetch_file('/remote/path', '/local/path') + with pytest.raises(AnsibleError, match="cat not found in path of WSL distribution"): + connection.fetch_file("/remote/path", "/local/path") def test_fetch_file(connection): - """ Test fetching a file from the remote system """ + """Test fetching a file from the remote system""" connection.exec_command = MagicMock() - connection.exec_command.return_value = (0, b'test content', b"") + connection.exec_command.return_value = (0, b"test content", b"") - with patch('builtins.open', create=True) as mock_open: - connection.fetch_file('/remote/path', '/local/path') + with patch("builtins.open", create=True) as mock_open: + connection.fetch_file("/remote/path", "/local/path") connection.exec_command.assert_called_once_with("/bin/sh -c 'cat /remote/path'", sudoable=False) - mock_open.assert_called_with('/local/path', 'wb') + mock_open.assert_called_with("/local/path", "wb") -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_fetch_file_general_error(mock_ssh, connection): - """ Test fetch_file with general error """ + """Test fetch_file with general error""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -384,18 +377,18 @@ def test_fetch_file_general_error(mock_ssh, connection): mock_transport.open_session.return_value = mock_channel mock_channel.recv_exit_status.return_value = 1 mock_channel.makefile.return_value = [to_bytes("")] - mock_channel.makefile_stderr.return_value = [to_bytes('Some error')] + mock_channel.makefile_stderr.return_value = [to_bytes("Some error")] connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleError, match='error occurred while fetching file from /remote/path to /local/path'): - connection.fetch_file('/remote/path', '/local/path') + with pytest.raises(AnsibleError, match="error occurred while fetching file from /remote/path to /local/path"): + connection.fetch_file("/remote/path", "/local/path") -@patch('paramiko.SSHClient') +@patch("paramiko.SSHClient") def test_fetch_file_cat_not_found(mock_ssh, connection): - """ Test command execution when cat is not found """ + """Test command execution when cat is not found""" mock_client = MagicMock() mock_ssh.return_value = mock_client mock_channel = MagicMock() @@ -405,89 +398,95 @@ def test_fetch_file_cat_not_found(mock_ssh, connection): mock_transport.open_session.return_value = mock_channel mock_channel.recv_exit_status.return_value = 1 mock_channel.makefile.return_value = [to_bytes("")] - mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')] + mock_channel.makefile_stderr.return_value = [to_bytes("cat: not found")] connection._connected = True connection.ssh = mock_client - with pytest.raises(AnsibleError, match='cat not found in path of WSL distribution'): - connection.fetch_file('/remote/path', '/local/path') + with pytest.raises(AnsibleError, match="cat not found in path of WSL distribution"): + connection.fetch_file("/remote/path", "/local/path") def test_close(connection): - """ Test connection close """ + """Test connection close""" mock_ssh = MagicMock() connection.ssh = mock_ssh connection._connected = True connection.close() - assert mock_ssh.close.called, 'ssh.close was not called' - assert not connection._connected, 'self._connected is still True' + assert mock_ssh.close.called, "ssh.close was not called" + assert not connection._connected, "self._connected is still True" def test_close_with_lock_file(connection): - """ Test close method with lock file creation """ + """Test close method with lock file creation""" connection._any_keys_added = MagicMock(return_value=True) connection._connected = True - connection.keyfile = '/tmp/wsl-known_hosts-test' - connection.set_option('host_key_checking', True) - connection.set_option('lock_file_timeout', 5) - connection.set_option('record_host_keys', True) + connection.keyfile = "/tmp/wsl-known_hosts-test" + connection.set_option("host_key_checking", True) + connection.set_option("lock_file_timeout", 5) + connection.set_option("record_host_keys", True) connection.ssh = MagicMock() - lock_file_path = os.path.join(os.path.dirname(connection.keyfile), - f'ansible-{os.path.basename(connection.keyfile)}.lock') + lock_file_path = os.path.join( + os.path.dirname(connection.keyfile), f"ansible-{os.path.basename(connection.keyfile)}.lock" + ) try: connection.close() - assert os.path.exists(lock_file_path), 'Lock file was not created' + assert os.path.exists(lock_file_path), "Lock file was not created" lock_stat = os.stat(lock_file_path) - assert lock_stat.st_mode & 0o777 == 0o600, 'Incorrect lock file permissions' + assert lock_stat.st_mode & 0o777 == 0o600, "Incorrect lock file permissions" finally: Path(lock_file_path).unlink(missing_ok=True) -@patch('pathlib.Path.unlink') -@patch('os.path.exists') +@patch("pathlib.Path.unlink") +@patch("os.path.exists") def test_close_lock_file_time_out_error_handling(mock_exists, mock_unlink, connection): - """ Test close method with lock file timeout error """ + """Test close method with lock file timeout error""" connection._any_keys_added = MagicMock(return_value=True) connection._connected = True connection._save_ssh_host_keys = MagicMock() - connection.keyfile = '/tmp/wsl-known_hosts-test' - connection.set_option('host_key_checking', True) - connection.set_option('lock_file_timeout', 5) - connection.set_option('record_host_keys', True) + connection.keyfile = "/tmp/wsl-known_hosts-test" + connection.set_option("host_key_checking", True) + connection.set_option("lock_file_timeout", 5) + connection.set_option("record_host_keys", True) connection.ssh = MagicMock() mock_exists.return_value = False - matcher = f'writing lock file for {connection.keyfile} ran in to the timeout of {connection.get_option("lock_file_timeout")}s' + matcher = f"writing lock file for {connection.keyfile} ran in to the timeout of {connection.get_option('lock_file_timeout')}s" with pytest.raises(AnsibleError, match=matcher): - with patch('os.getuid', return_value=1000), \ - patch('os.getgid', return_value=1000), \ - patch('os.chmod'), patch('os.chown'), \ - patch('os.rename'), \ - patch.object(FileLock, 'lock_file', side_effect=LockTimeout()): + with ( + patch("os.getuid", return_value=1000), + patch("os.getgid", return_value=1000), + patch("os.chmod"), + patch("os.chown"), + patch("os.rename"), + patch.object(FileLock, "lock_file", side_effect=LockTimeout()), + ): connection.close() -@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') -@patch('tempfile.NamedTemporaryFile') -@patch('os.chmod') -@patch('os.chown') -@patch('os.rename') -@patch('os.path.exists') -def test_tempfile_creation_and_move(mock_exists, mock_rename, mock_chown, mock_chmod, mock_tempfile, mock_lock_file, connection): - """ Test tempfile creation and move during close """ +@patch("ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file") +@patch("tempfile.NamedTemporaryFile") +@patch("os.chmod") +@patch("os.chown") +@patch("os.rename") +@patch("os.path.exists") +def test_tempfile_creation_and_move( + mock_exists, mock_rename, mock_chown, mock_chmod, mock_tempfile, mock_lock_file, connection +): + """Test tempfile creation and move during close""" connection._any_keys_added = MagicMock(return_value=True) connection._connected = True connection._save_ssh_host_keys = MagicMock() - connection.keyfile = '/tmp/wsl-known_hosts-test' - connection.set_option('host_key_checking', True) - connection.set_option('lock_file_timeout', 5) - connection.set_option('record_host_keys', True) + connection.keyfile = "/tmp/wsl-known_hosts-test" + connection.set_option("host_key_checking", True) + connection.set_option("lock_file_timeout", 5) + connection.set_option("record_host_keys", True) connection.ssh = MagicMock() mock_exists.return_value = False @@ -497,7 +496,7 @@ def test_tempfile_creation_and_move(mock_exists, mock_rename, mock_chown, mock_c mock_lock_file_instance.__enter__.return_value = None mock_tempfile_instance = MagicMock() - mock_tempfile_instance.name = '/tmp/mock_tempfile' + mock_tempfile_instance.name = "/tmp/mock_tempfile" mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance mode = 0o644 @@ -505,29 +504,29 @@ def test_tempfile_creation_and_move(mock_exists, mock_rename, mock_chown, mock_c gid = 1000 key_dir = os.path.dirname(connection.keyfile) - with patch('os.getuid', return_value=uid), patch('os.getgid', return_value=gid): + with patch("os.getuid", return_value=uid), patch("os.getgid", return_value=gid): connection.close() - connection._save_ssh_host_keys.assert_called_once_with('/tmp/mock_tempfile') - mock_chmod.assert_called_once_with('/tmp/mock_tempfile', mode) - mock_chown.assert_called_once_with('/tmp/mock_tempfile', uid, gid) - mock_rename.assert_called_once_with('/tmp/mock_tempfile', connection.keyfile) + connection._save_ssh_host_keys.assert_called_once_with("/tmp/mock_tempfile") + mock_chmod.assert_called_once_with("/tmp/mock_tempfile", mode) + mock_chown.assert_called_once_with("/tmp/mock_tempfile", uid, gid) + mock_rename.assert_called_once_with("/tmp/mock_tempfile", connection.keyfile) mock_tempfile.assert_called_once_with(dir=key_dir, delete=False) -@patch('pathlib.Path.unlink') -@patch('tempfile.NamedTemporaryFile') -@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') -@patch('os.path.exists') +@patch("pathlib.Path.unlink") +@patch("tempfile.NamedTemporaryFile") +@patch("ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file") +@patch("os.path.exists") def test_close_tempfile_error_handling(mock_exists, mock_lock_file, mock_tempfile, mock_unlink, connection): - """ Test tempfile creation error """ + """Test tempfile creation error""" connection._any_keys_added = MagicMock(return_value=True) connection._connected = True connection._save_ssh_host_keys = MagicMock() - connection.keyfile = '/tmp/wsl-known_hosts-test' - connection.set_option('host_key_checking', True) - connection.set_option('lock_file_timeout', 5) - connection.set_option('record_host_keys', True) + connection.keyfile = "/tmp/wsl-known_hosts-test" + connection.set_option("host_key_checking", True) + connection.set_option("lock_file_timeout", 5) + connection.set_option("record_host_keys", True) connection.ssh = MagicMock() mock_exists.return_value = False @@ -537,29 +536,30 @@ def test_close_tempfile_error_handling(mock_exists, mock_lock_file, mock_tempfil mock_lock_file_instance.__enter__.return_value = None mock_tempfile_instance = MagicMock() - mock_tempfile_instance.name = '/tmp/mock_tempfile' + mock_tempfile_instance.name = "/tmp/mock_tempfile" mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance - with pytest.raises(AnsibleError, match='error occurred while writing SSH host keys!'): - with patch.object(os, 'chmod', side_effect=Exception()): + with pytest.raises(AnsibleError, match="error occurred while writing SSH host keys!"): + with patch.object(os, "chmod", side_effect=Exception()): connection.close() mock_unlink.assert_called_with(missing_ok=True) -@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file') -@patch('os.path.exists') +@patch("ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file") +@patch("os.path.exists") def test_close_with_invalid_host_key(mock_exists, mock_lock_file, connection): - """ Test load_system_host_keys on close with InvalidHostKey error """ + """Test load_system_host_keys on close with InvalidHostKey error""" connection._any_keys_added = MagicMock(return_value=True) connection._connected = True connection._save_ssh_host_keys = MagicMock() - connection.keyfile = '/tmp/wsl-known_hosts-test' - connection.set_option('host_key_checking', True) - connection.set_option('lock_file_timeout', 5) - connection.set_option('record_host_keys', True) + connection.keyfile = "/tmp/wsl-known_hosts-test" + connection.set_option("host_key_checking", True) + connection.set_option("lock_file_timeout", 5) + connection.set_option("record_host_keys", True) connection.ssh = MagicMock() connection.ssh.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey( - "Bad Line!", Exception('Something crashed!')) + "Bad Line!", Exception("Something crashed!") + ) mock_exists.return_value = False @@ -572,7 +572,7 @@ def test_close_with_invalid_host_key(mock_exists, mock_lock_file, connection): def test_reset(connection): - """ Test connection reset """ + """Test connection reset""" connection._connected = True connection.close = MagicMock() connection._connect = MagicMock() diff --git a/tests/unit/plugins/filter/test_crc32.py b/tests/unit/plugins/filter/test_crc32.py index 542be68c96a..8c6ed9b0c31 100644 --- a/tests/unit/plugins/filter/test_crc32.py +++ b/tests/unit/plugins/filter/test_crc32.py @@ -10,6 +10,5 @@ class TestFilterCrc32(unittest.TestCase): - def test_checksum(self): - self.assertEqual(crc32s('test'), 'd87f7e0c') + self.assertEqual(crc32s("test"), "d87f7e0c") diff --git a/tests/unit/plugins/filter/test_json_patch.py b/tests/unit/plugins/filter/test_json_patch.py index 2fa026975c4..7a96a195b36 100644 --- a/tests/unit/plugins/filter/test_json_patch.py +++ b/tests/unit/plugins/filter/test_json_patch.py @@ -48,21 +48,15 @@ def test_patch_remove(self): self.assertEqual(result, {"a": 1, "d": 3}) def test_patch_replace(self): - result = self.json_patch( - {"a": 1, "b": {"c": 2}, "d": 3}, "replace", "/b", {"x": 99} - ) + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "replace", "/b", {"x": 99}) self.assertEqual(result, {"a": 1, "b": {"x": 99}, "d": 3}) def test_patch_copy(self): - result = self.json_patch( - {"a": 1, "b": {"c": 2}, "d": 3}, "copy", "/d", **{"from": "/b"} - ) + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "copy", "/d", **{"from": "/b"}) self.assertEqual(result, {"a": 1, "b": {"c": 2}, "d": {"c": 2}}) def test_patch_move(self): - result = self.json_patch( - {"a": 1, "b": {"c": 2}, "d": 3}, "move", "/d", **{"from": "/b"} - ) + result = self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "move", "/d", **{"from": "/b"}) self.assertEqual(result, {"a": 1, "d": {"c": 2}}) def test_patch_test_pass(self): @@ -75,9 +69,7 @@ def test_patch_test_fail_none(self): def test_patch_test_fail_fail(self): with self.assertRaises(AnsibleFilterError) as context: - self.json_patch( - {"a": 1, "b": {"c": 2}, "d": 3}, "test", "/b/c", 99, fail_test=True - ) + self.json_patch({"a": 1, "b": {"c": 2}, "d": 3}, "test", "/b/c", 99, fail_test=True) self.assertTrue("json_patch: test operation failed" in str(context.exception)) def test_patch_remove_nonexisting(self): @@ -188,9 +180,7 @@ def test_patch_recipe_process(self): {"op": "test", "path": "/baz/1", "value": 20}, ], ) - self.assertEqual( - result, {"bar": [2], "bax": 1, "bay": 1, "baz": [10, 20, 30], "foo": 1} - ) + self.assertEqual(result, {"bar": [2], "bax": 1, "bay": 1, "baz": [10, 20, 30], "foo": 1}) def test_patch_recipe_test_fail(self): result = self.json_patch_recipe( @@ -246,9 +236,7 @@ def test_patch_recipe_test_fail_fail_pos(self): [{"op": "test", "path": "/b/c", "value": 99}], True, ) - self.assertTrue( - "json_patch_recipe: test operation failed" in str(context.exception) - ) + self.assertTrue("json_patch_recipe: test operation failed" in str(context.exception)) def test_patch_recipe_test_fail_fail_kw(self): with self.assertRaises(AnsibleFilterError) as context: @@ -257,9 +245,7 @@ def test_patch_recipe_test_fail_fail_kw(self): [{"op": "test", "path": "/b/c", "value": 99}], fail_test=True, ) - self.assertTrue( - "json_patch_recipe: test operation failed" in str(context.exception) - ) + self.assertTrue("json_patch_recipe: test operation failed" in str(context.exception)) # json_diff @@ -300,9 +286,7 @@ def test_diff_missing_lib(self): def test_diff_arg_checking(self): with self.assertRaises(AnsibleFilterError) as context: self.json_diff(1, {}) - self.assertEqual( - str(context.exception), "json_diff: input is not dictionary, list or string" - ) + self.assertEqual(str(context.exception), "json_diff: input is not dictionary, list or string") with self.assertRaises(AnsibleFilterError) as context: self.json_diff({}, 1) self.assertEqual( diff --git a/tests/unit/plugins/inventory/test_cobbler.py b/tests/unit/plugins/inventory/test_cobbler.py index fd544bae12a..fb54ceac159 100644 --- a/tests/unit/plugins/inventory/test_cobbler.py +++ b/tests/unit/plugins/inventory/test_cobbler.py @@ -21,4 +21,4 @@ def test_verify_file(tmp_path, inventory): def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.cobbler.yml') is False + assert inventory.verify_file("foobar.cobbler.yml") is False diff --git a/tests/unit/plugins/inventory/test_icinga2.py b/tests/unit/plugins/inventory/test_icinga2.py index 59067aa2c06..2597490ccf6 100644 --- a/tests/unit/plugins/inventory/test_icinga2.py +++ b/tests/unit/plugins/inventory/test_icinga2.py @@ -20,7 +20,7 @@ def inventory(): def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.icinga2.yml') is False + assert inventory.verify_file("foobar.icinga2.yml") is False def check_api(): @@ -33,58 +33,58 @@ def query_hosts(hosts=None, attrs=None, joins=None, host_filter=None): # _get_hosts - list of dicts json_host_data = [ { - 'attrs': { - 'address': 'test-host1.home.local', - 'groups': ['home_servers', 'servers_dell'], - 'display_name': 'Test Host 1', - 'state': 0.0, - 'state_type': 1.0 + "attrs": { + "address": "test-host1.home.local", + "groups": ["home_servers", "servers_dell"], + "display_name": "Test Host 1", + "state": 0.0, + "state_type": 1.0, }, - 'joins': {}, - 'meta': {}, - 'name': 'test-host1', - 'type': 'Host' + "joins": {}, + "meta": {}, + "name": "test-host1", + "type": "Host", }, { - 'attrs': { - 'address': 'test-host2.home.local', - 'display_name': 'Test Host 2', - 'groups': ['home_servers', 'servers_hp'], - 'state': 1.0, - 'state_type': 1.0 + "attrs": { + "address": "test-host2.home.local", + "display_name": "Test Host 2", + "groups": ["home_servers", "servers_hp"], + "state": 1.0, + "state_type": 1.0, }, - 'joins': {}, - 'meta': {}, - 'name': 'test-host2', - 'type': 'Host' + "joins": {}, + "meta": {}, + "name": "test-host2", + "type": "Host", }, { - 'attrs': { - 'address': '', - 'display_name': 'Test Host 3', - 'groups': ['not_home_servers', 'servers_hp'], - 'state': 1.0, - 'state_type': 1.0 + "attrs": { + "address": "", + "display_name": "Test Host 3", + "groups": ["not_home_servers", "servers_hp"], + "state": 1.0, + "state_type": 1.0, }, - 'joins': {}, - 'meta': {}, - 'name': 'test-host3.example.com', - 'type': 'Host' - } + "joins": {}, + "meta": {}, + "name": "test-host3.example.com", + "type": "Host", + }, ] return json_host_data def get_option(option): - if option == 'groups': + if option == "groups": return {} - elif option == 'keyed_groups': + elif option == "keyed_groups": return [] - elif option == 'compose': + elif option == "compose": return {} - elif option == 'strict': + elif option == "strict": return False - elif option == 'group_by_hostgroups': + elif option == "group_by_hostgroups": return True else: return None @@ -92,8 +92,8 @@ def get_option(option): def test_populate(inventory, mocker): # module settings - inventory.icinga2_user = 'ansible' - inventory.icinga2_password = 'password' + inventory.icinga2_user = "ansible" + inventory.icinga2_password = "password" inventory.icinga2_url = "https://localhost:5665/v1" inventory.inventory_attr = "address" inventory.group_by_hostgroups = True @@ -105,46 +105,46 @@ def test_populate(inventory, mocker): inventory._populate() # get different hosts - host1_info = inventory.inventory.get_host('test-host1.home.local') + host1_info = inventory.inventory.get_host("test-host1.home.local") print(host1_info) - host2_info = inventory.inventory.get_host('test-host2.home.local') + host2_info = inventory.inventory.get_host("test-host2.home.local") print(host2_info) - host3_info = inventory.inventory.get_host('test-host3.example.com') - assert inventory.inventory.get_host('test-host3.example.com') is not None + host3_info = inventory.inventory.get_host("test-host3.example.com") + assert inventory.inventory.get_host("test-host3.example.com") is not None print(host3_info) # check if host in the home_servers group - assert 'home_servers' in inventory.inventory.groups - group1_data = inventory.inventory.groups['home_servers'] + assert "home_servers" in inventory.inventory.groups + group1_data = inventory.inventory.groups["home_servers"] group1_test_data = [host1_info, host2_info] print(group1_data.hosts) print(group1_test_data) assert group1_data.hosts == group1_test_data # Test servers_hp group - group2_data = inventory.inventory.groups['servers_hp'] + group2_data = inventory.inventory.groups["servers_hp"] group2_test_data = [host2_info, host3_info] print(group2_data.hosts) print(group2_test_data) assert group2_data.hosts == group2_test_data # check if host state rules apply properly - assert host1_info.get_vars()['state'] == 'on' - assert host1_info.get_vars()['display_name'] == "Test Host 1" - assert host2_info.get_vars()['state'] == 'off' - assert host3_info.get_vars().get('ansible_host') is None + assert host1_info.get_vars()["state"] == "on" + assert host1_info.get_vars()["display_name"] == "Test Host 1" + assert host2_info.get_vars()["state"] == "off" + assert host3_info.get_vars().get("ansible_host") is None # Confirm attribute options switcher inventory.inventory_attr = "name" inventory._populate() - assert inventory.inventory.get_host('test-host3.example.com') is not None - host2_info = inventory.inventory.get_host('test-host2') + assert inventory.inventory.get_host("test-host3.example.com") is not None + host2_info = inventory.inventory.get_host("test-host2") assert host2_info is not None - assert host2_info.get_vars().get('ansible_host') == 'test-host2.home.local' + assert host2_info.get_vars().get("ansible_host") == "test-host2.home.local" # Confirm attribute options switcher inventory.inventory_attr = "display_name" inventory._populate() - assert inventory.inventory.get_host('Test Host 3') is not None - host2_info = inventory.inventory.get_host('Test Host 2') + assert inventory.inventory.get_host("Test Host 3") is not None + host2_info = inventory.inventory.get_host("Test Host 2") assert host2_info is not None - assert host2_info.get_vars().get('ansible_host') == 'test-host2.home.local' + assert host2_info.get_vars().get("ansible_host") == "test-host2.home.local" diff --git a/tests/unit/plugins/inventory/test_iocage.py b/tests/unit/plugins/inventory/test_iocage.py index 8efd2c7f7b4..aed960b3e22 100644 --- a/tests/unit/plugins/inventory/test_iocage.py +++ b/tests/unit/plugins/inventory/test_iocage.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024 Vladimir Botka # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -19,51 +18,51 @@ def inventory(): inv = InventoryModule() inv.inventory = InventoryData() inv.templar = Templar(None) - inv.jails = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt') - inv.js_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml') - inv.jails_dhcp = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt') - inv.js_dhcp_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml') - inv.jails_dhcp_nr = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt') - inv.js_dhcp_nr_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml') - prpts_101 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt') - prpts_102 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt') - prpts_103 = load_txt_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt') - inv.prpts = {'test_101': prpts_101, 'test_102': prpts_102, 'test_103': prpts_103} - inv.ps_ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml') - inv.ok = load_yml_data('tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml') + inv.jails = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.txt") + inv.js_ok = load_yml_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails.yml") + inv.jails_dhcp = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.txt") + inv.js_dhcp_ok = load_yml_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp.yml") + inv.jails_dhcp_nr = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.txt") + inv.js_dhcp_nr_ok = load_yml_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_jails_dhcp_not_running.yml") + prpts_101 = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_101.txt") + prpts_102 = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_102.txt") + prpts_103 = load_txt_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_properties_test_103.txt") + inv.prpts = {"test_101": prpts_101, "test_102": prpts_102, "test_103": prpts_103} + inv.ps_ok = load_yml_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_properties.yml") + inv.ok = load_yml_data("tests/unit/plugins/inventory/fixtures/iocage/iocage_inventory.yml") return inv def load_txt_data(path): - with open(path, 'r') as f: + with open(path, "r") as f: s = f.read() return s def load_yml_data(path): - with open(path, 'r') as f: + with open(path, "r") as f: d = yaml.safe_load(f) return d def get_option(option): groups = {} - groups['test'] = make_trusted("inventory_hostname.startswith('test')") + groups["test"] = make_trusted("inventory_hostname.startswith('test')") - if option == 'groups': + if option == "groups": return groups - elif option == 'keyed_groups': + elif option == "keyed_groups": return [] - elif option == 'compose': + elif option == "compose": return {} - elif option == 'strict': + elif option == "strict": return False else: return None def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.iocage.yml') is False + assert inventory.verify_file("foobar.iocage.yml") is False def test_verify_file(tmp_path, inventory): @@ -73,53 +72,61 @@ def test_verify_file(tmp_path, inventory): def test_get_jails(inventory): - # jails - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} inventory.get_jails(inventory.jails, results) assert results == inventory.js_ok # jails_dhcp - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} inventory.get_jails(inventory.jails_dhcp, results) assert results == inventory.js_dhcp_ok # jails_dhcp_not_running - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} inventory.get_jails(inventory.jails_dhcp_nr, results) assert results == inventory.js_dhcp_nr_ok def test_get_properties(inventory): - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} inventory.get_jails(inventory.jails, results) - for hostname, host_vars in results['_meta']['hostvars'].items(): + for hostname, host_vars in results["_meta"]["hostvars"].items(): inventory.get_properties(inventory.prpts[hostname], results, hostname) assert results == inventory.ps_ok def test_populate(inventory, mocker): - results = {'_meta': {'hostvars': {}}} + results = {"_meta": {"hostvars": {}}} inventory.get_jails(inventory.jails, results) - for hostname, host_vars in results['_meta']['hostvars'].items(): + for hostname, host_vars in results["_meta"]["hostvars"].items(): inventory.get_properties(inventory.prpts[hostname], results, hostname) inventory.get_option = mocker.MagicMock(side_effect=get_option) inventory.populate(results) # test - hosts = ('test_101', 'test_102', 'test_103') - vars = ('iocage_basejail', 'iocage_boot', 'iocage_ip4', 'iocage_ip6', 'iocage_properties', - 'iocage_release', 'iocage_state', 'iocage_template', 'iocage_type') + hosts = ("test_101", "test_102", "test_103") + vars = ( + "iocage_basejail", + "iocage_boot", + "iocage_ip4", + "iocage_ip6", + "iocage_properties", + "iocage_release", + "iocage_state", + "iocage_template", + "iocage_type", + ) # test host_vars for host in hosts: h = inventory.inventory.get_host(host) for var in vars: - assert inventory.ok['all']['children']['test']['hosts'][host][var] == h.get_vars()[var] + assert inventory.ok["all"]["children"]["test"]["hosts"][host][var] == h.get_vars()[var] # test groups - test_101_info = inventory.inventory.get_host('test_101') - test_102_info = inventory.inventory.get_host('test_102') - test_103_info = inventory.inventory.get_host('test_103') - g = inventory.inventory.groups['test'] + test_101_info = inventory.inventory.get_host("test_101") + test_102_info = inventory.inventory.get_host("test_102") + test_103_info = inventory.inventory.get_host("test_103") + g = inventory.inventory.groups["test"] assert g.hosts == [test_101_info, test_102_info, test_103_info] diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index c4d9e8be9c9..7add0fd5b2c 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -6,7 +6,7 @@ import pytest -linode_apiv4 = pytest.importorskip('linode_api4') +linode_apiv4 = pytest.importorskip("linode_api4") from ansible.errors import AnsibleError from ansible.parsing.dataloader import DataLoader @@ -23,10 +23,10 @@ def inventory(): def test_missing_access_token_lookup(inventory): loader = DataLoader() - inventory._options = {'access_token': None} + inventory._options = {"access_token": None} with pytest.raises(AnsibleError) as error_message: inventory._build_client(loader) - assert 'Could not retrieve Linode access token' in error_message + assert "Could not retrieve Linode access token" in error_message def test_verify_file_yml(tmp_path, inventory): diff --git a/tests/unit/plugins/inventory/test_lxd.py b/tests/unit/plugins/inventory/test_lxd.py index ea625b5ba1c..5bf937622be 100644 --- a/tests/unit/plugins/inventory/test_lxd.py +++ b/tests/unit/plugins/inventory/test_lxd.py @@ -12,24 +12,43 @@ HOST_COMPARATIVE_DATA = { - 'ansible_connection': 'ssh', 'ansible_host': '10.98.143.199', 'ansible_lxd_os': 'ubuntu', 'ansible_lxd_release': 'focal', - 'ansible_lxd_profile': ['default'], 'ansible_lxd_state': 'running', 'ansible_lxd_location': 'Berlin', - 'ansible_lxd_vlan_ids': {'my-macvlan': 666}, 'inventory_hostname': 'vlantest', 'inventory_hostname_short': 'vlantest'} + "ansible_connection": "ssh", + "ansible_host": "10.98.143.199", + "ansible_lxd_os": "ubuntu", + "ansible_lxd_release": "focal", + "ansible_lxd_profile": ["default"], + "ansible_lxd_state": "running", + "ansible_lxd_location": "Berlin", + "ansible_lxd_vlan_ids": {"my-macvlan": 666}, + "inventory_hostname": "vlantest", + "inventory_hostname_short": "vlantest", +} GROUP_COMPARATIVE_DATA = { - 'all': [], 'ungrouped': [], 'testpattern': ['vlantest'], 'vlan666': ['vlantest'], 'locationBerlin': ['vlantest'], - 'osUbuntu': ['vlantest'], 'releaseFocal': ['vlantest'], 'releaseBionic': [], 'profileDefault': ['vlantest'], - 'profileX11': [], 'netRangeIPv4': ['vlantest'], 'netRangeIPv6': ['vlantest']} + "all": [], + "ungrouped": [], + "testpattern": ["vlantest"], + "vlan666": ["vlantest"], + "locationBerlin": ["vlantest"], + "osUbuntu": ["vlantest"], + "releaseFocal": ["vlantest"], + "releaseBionic": [], + "profileDefault": ["vlantest"], + "profileX11": [], + "netRangeIPv4": ["vlantest"], + "netRangeIPv6": ["vlantest"], +} GROUP_Config = { - 'testpattern': {'type': 'pattern', 'attribute': 'test'}, - 'vlan666': {'type': 'vlanid', 'attribute': 666}, - 'locationBerlin': {'type': 'location', 'attribute': 'Berlin'}, - 'osUbuntu': {'type': 'os', 'attribute': 'ubuntu'}, - 'releaseFocal': {'type': 'release', 'attribute': 'focal'}, - 'releaseBionic': {'type': 'release', 'attribute': 'bionic'}, - 'profileDefault': {'type': 'profile', 'attribute': 'default'}, - 'profileX11': {'type': 'profile', 'attribute': 'x11'}, - 'netRangeIPv4': {'type': 'network_range', 'attribute': '10.98.143.0/24'}, - 'netRangeIPv6': {'type': 'network_range', 'attribute': 'fd42:bd00:7b11:2167:216:3eff::/96'}} + "testpattern": {"type": "pattern", "attribute": "test"}, + "vlan666": {"type": "vlanid", "attribute": 666}, + "locationBerlin": {"type": "location", "attribute": "Berlin"}, + "osUbuntu": {"type": "os", "attribute": "ubuntu"}, + "releaseFocal": {"type": "release", "attribute": "focal"}, + "releaseBionic": {"type": "release", "attribute": "bionic"}, + "profileDefault": {"type": "profile", "attribute": "default"}, + "profileX11": {"type": "profile", "attribute": "x11"}, + "netRangeIPv4": {"type": "network_range", "attribute": "10.98.143.0/24"}, + "netRangeIPv6": {"type": "network_range", "attribute": "fd42:bd00:7b11:2167:216:3eff::/96"}, +} @pytest.fixture @@ -38,13 +57,13 @@ def inventory(): inv.inventory = InventoryData() # Test Values - inv.data = inv.load_json_data('tests/unit/plugins/inventory/fixtures/lxd_inventory.atd') # Load Test Data + inv.data = inv.load_json_data("tests/unit/plugins/inventory/fixtures/lxd_inventory.atd") # Load Test Data inv.groupby = GROUP_Config - inv.prefered_instance_network_interface = 'eth' - inv.prefered_instance_network_family = 'inet' - inv.filter = 'running' + inv.prefered_instance_network_interface = "eth" + inv.prefered_instance_network_family = "inet" + inv.filter = "running" inv.dump_data = False - inv.type_filter = 'both' + inv.type_filter = "both" return inv @@ -56,7 +75,7 @@ def test_verify_file(tmp_path, inventory): def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.lxd.yml') is False + assert inventory.verify_file("foobar.lxd.yml") is False def test_build_inventory_hosts(inventory): @@ -64,7 +83,7 @@ def test_build_inventory_hosts(inventory): After the inventory plugin has run with the test data, the result of the host is checked.""" inventory._populate() - generated_data = inventory.inventory.get_host('vlantest').get_vars() + generated_data = inventory.inventory.get_host("vlantest").get_vars() eq = True for key, value in HOST_COMPARATIVE_DATA.items(): @@ -94,7 +113,7 @@ def test_build_inventory_groups_with_no_groupselection(inventory): inventory.groupby = None inventory._populate() generated_data = inventory.inventory.get_groups_dict() - group_comparative_data = {'all': [], 'ungrouped': []} + group_comparative_data = {"all": [], "ungrouped": []} eq = True print(f"data: {generated_data}") diff --git a/tests/unit/plugins/inventory/test_opennebula.py b/tests/unit/plugins/inventory/test_opennebula.py index 5c8d5aed97c..953002490c1 100644 --- a/tests/unit/plugins/inventory/test_opennebula.py +++ b/tests/unit/plugins/inventory/test_opennebula.py @@ -47,14 +47,14 @@ def access(f, m, *args, **kwargs): class HistoryEntry: def __init__(self): - self.SEQ = '384' - self.HOSTNAME = 'sam-691-sam' - self.HID = '10' - self.CID = '0' - self.DS_ID = '100' - self.VM_MAD = 'kvm' - self.TM_MAD = '3par' - self.ACTION = '0' + self.SEQ = "384" + self.HOSTNAME = "sam-691-sam" + self.HID = "10" + self.CID = "0" + self.DS_ID = "100" + self.VM_MAD = "kvm" + self.TM_MAD = "3par" + self.ACTION = "0" class HistoryRecords: @@ -76,191 +76,223 @@ def test_verify_file(tmp_path, inventory): def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.opennebula.yml') is False + assert inventory.verify_file("foobar.opennebula.yml") is False def get_vm_pool_json(): - with open('tests/unit/plugins/inventory/fixtures/opennebula_inventory.json', 'r') as json_file: + with open("tests/unit/plugins/inventory/fixtures/opennebula_inventory.json", "r") as json_file: jsondata = json.load(json_file) - data = type('pyone.bindings.VM_POOLSub', (object,), {'VM': []})() + data = type("pyone.bindings.VM_POOLSub", (object,), {"VM": []})() for fake_server in jsondata: - data.VM.append(type('pyone.bindings.VMType90Sub', (object,), fake_server)()) + data.VM.append(type("pyone.bindings.VMType90Sub", (object,), fake_server)()) return data def get_vm_pool(): - data = type('pyone.bindings.VM_POOLSub', (object,), {'VM': []})() - - vm = type('pyone.bindings.VMType90Sub', (object,), { - 'DEPLOY_ID': 'one-7157', - 'ETIME': 0, - 'GID': 132, - 'GNAME': 'CSApparelVDC', - 'HISTORY_RECORDS': HistoryRecords(), - 'ID': 7157, - 'LAST_POLL': 1632762935, - 'LCM_STATE': 3, - 'MONITORING': {}, - 'NAME': 'sam-691-sam', - 'RESCHED': 0, - 'SNAPSHOTS': [], - 'STATE': 3, - 'STIME': 1632755245, - 'TEMPLATE': OrderedDict({ - 'NIC': OrderedDict({ - 'AR_ID': '0', - 'BRIDGE': 'onebr80', - 'BRIDGE_TYPE': 'linux', - 'CLUSTER_ID': '0', - 'IP': '172.22.4.187', - 'MAC': '02:00:ac:16:04:bb', - 'MTU': '8192', - 'NAME': 'NIC0', - 'NETWORK': 'Private Net CSApparel', - 'NETWORK_ID': '80', - 'NETWORK_UNAME': 'CSApparelVDC-admin', - 'NIC_ID': '0', - 'PHYDEV': 'team0', - 'SECURITY_GROUPS': '0', - 'TARGET': 'one-7157-0', - 'VLAN_ID': '480', - 'VN_MAD': '802.1Q' - }) - }), - 'USER_TEMPLATE': OrderedDict({ - 'HYPERVISOR': 'kvm', - 'INPUTS_ORDER': '', - 'LOGO': 'images/logos/centos.png', - 'MEMORY_UNIT_COST': 'MB', - 'SCHED_REQUIREMENTS': 'CLUSTER_ID="0"' - }) - })() + data = type("pyone.bindings.VM_POOLSub", (object,), {"VM": []})() + + vm = type( + "pyone.bindings.VMType90Sub", + (object,), + { + "DEPLOY_ID": "one-7157", + "ETIME": 0, + "GID": 132, + "GNAME": "CSApparelVDC", + "HISTORY_RECORDS": HistoryRecords(), + "ID": 7157, + "LAST_POLL": 1632762935, + "LCM_STATE": 3, + "MONITORING": {}, + "NAME": "sam-691-sam", + "RESCHED": 0, + "SNAPSHOTS": [], + "STATE": 3, + "STIME": 1632755245, + "TEMPLATE": OrderedDict( + { + "NIC": OrderedDict( + { + "AR_ID": "0", + "BRIDGE": "onebr80", + "BRIDGE_TYPE": "linux", + "CLUSTER_ID": "0", + "IP": "172.22.4.187", + "MAC": "02:00:ac:16:04:bb", + "MTU": "8192", + "NAME": "NIC0", + "NETWORK": "Private Net CSApparel", + "NETWORK_ID": "80", + "NETWORK_UNAME": "CSApparelVDC-admin", + "NIC_ID": "0", + "PHYDEV": "team0", + "SECURITY_GROUPS": "0", + "TARGET": "one-7157-0", + "VLAN_ID": "480", + "VN_MAD": "802.1Q", + } + ) + } + ), + "USER_TEMPLATE": OrderedDict( + { + "HYPERVISOR": "kvm", + "INPUTS_ORDER": "", + "LOGO": "images/logos/centos.png", + "MEMORY_UNIT_COST": "MB", + "SCHED_REQUIREMENTS": 'CLUSTER_ID="0"', + } + ), + }, + )() data.VM.append(vm) - vm = type('pyone.bindings.VMType90Sub', (object,), { - 'DEPLOY_ID': 'one-327', - 'ETIME': 0, - 'GID': 0, - 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': [], - 'ID': 327, - 'LAST_POLL': 1632763543, - 'LCM_STATE': 3, - 'MONITORING': {}, - 'NAME': 'zabbix-327', - 'RESCHED': 0, - 'SNAPSHOTS': [], - 'STATE': 3, - 'STIME': 1575410106, - 'TEMPLATE': OrderedDict({ - 'NIC': [ - OrderedDict({ - 'AR_ID': '0', - 'BRIDGE': 'onerb.103', - 'BRIDGE_TYPE': 'linux', - 'IP': '185.165.1.1', - 'IP6_GLOBAL': '2000:a001::b9ff:feae:aa0d', - 'IP6_LINK': 'fe80::b9ff:feae:aa0d', - 'MAC': '02:00:b9:ae:aa:0d', - 'NAME': 'NIC0', - 'NETWORK': 'Public', - 'NETWORK_ID': '7', - 'NIC_ID': '0', - 'PHYDEV': 'team0', - 'SECURITY_GROUPS': '0', - 'TARGET': 'one-327-0', - 'VLAN_ID': '100', - 'VN_MAD': '802.1Q' - }), - OrderedDict({ - 'AR_ID': '0', - 'BRIDGE': 'br0', - 'BRIDGE_TYPE': 'linux', - 'CLUSTER_ID': '0', - 'IP': '192.168.1.1', - 'MAC': '02:00:c0:a8:3b:01', - 'NAME': 'NIC1', - 'NETWORK': 'Management', - 'NETWORK_ID': '11', - 'NIC_ID': '1', - 'SECURITY_GROUPS': '0', - 'TARGET': 'one-327-1', - 'VN_MAD': 'bridge' - }) - ] - }), - 'USER_TEMPLATE': OrderedDict({ - 'HYPERVISOR': 'kvm', - 'INPUTS_ORDER': '', - 'LABELS': 'Oracle Linux', - 'LOGO': 'images/logos/centos.png', - 'MEMORY_UNIT_COST': 'MB', - 'SAVED_TEMPLATE_ID': '29' - }) - })() + vm = type( + "pyone.bindings.VMType90Sub", + (object,), + { + "DEPLOY_ID": "one-327", + "ETIME": 0, + "GID": 0, + "GNAME": "oneadmin", + "HISTORY_RECORDS": [], + "ID": 327, + "LAST_POLL": 1632763543, + "LCM_STATE": 3, + "MONITORING": {}, + "NAME": "zabbix-327", + "RESCHED": 0, + "SNAPSHOTS": [], + "STATE": 3, + "STIME": 1575410106, + "TEMPLATE": OrderedDict( + { + "NIC": [ + OrderedDict( + { + "AR_ID": "0", + "BRIDGE": "onerb.103", + "BRIDGE_TYPE": "linux", + "IP": "185.165.1.1", + "IP6_GLOBAL": "2000:a001::b9ff:feae:aa0d", + "IP6_LINK": "fe80::b9ff:feae:aa0d", + "MAC": "02:00:b9:ae:aa:0d", + "NAME": "NIC0", + "NETWORK": "Public", + "NETWORK_ID": "7", + "NIC_ID": "0", + "PHYDEV": "team0", + "SECURITY_GROUPS": "0", + "TARGET": "one-327-0", + "VLAN_ID": "100", + "VN_MAD": "802.1Q", + } + ), + OrderedDict( + { + "AR_ID": "0", + "BRIDGE": "br0", + "BRIDGE_TYPE": "linux", + "CLUSTER_ID": "0", + "IP": "192.168.1.1", + "MAC": "02:00:c0:a8:3b:01", + "NAME": "NIC1", + "NETWORK": "Management", + "NETWORK_ID": "11", + "NIC_ID": "1", + "SECURITY_GROUPS": "0", + "TARGET": "one-327-1", + "VN_MAD": "bridge", + } + ), + ] + } + ), + "USER_TEMPLATE": OrderedDict( + { + "HYPERVISOR": "kvm", + "INPUTS_ORDER": "", + "LABELS": "Oracle Linux", + "LOGO": "images/logos/centos.png", + "MEMORY_UNIT_COST": "MB", + "SAVED_TEMPLATE_ID": "29", + } + ), + }, + )() data.VM.append(vm) - vm = type('pyone.bindings.VMType90Sub', (object,), { - 'DEPLOY_ID': 'one-107', - 'ETIME': 0, - 'GID': 0, - 'GNAME': 'oneadmin', - 'HISTORY_RECORDS': [], - 'ID': 107, - 'LAST_POLL': 1632764186, - 'LCM_STATE': 3, - 'MONITORING': {}, - 'NAME': 'gitlab-107', - 'RESCHED': 0, - 'SNAPSHOTS': [], - 'STATE': 3, - 'STIME': 1572485522, - 'TEMPLATE': OrderedDict({ - 'NIC': OrderedDict({ - 'AR_ID': '0', - 'BRIDGE': 'onerb.103', - 'BRIDGE_TYPE': 'linux', - 'IP': '185.165.1.3', - 'IP6_GLOBAL': '2000:a001::b9ff:feae:aa03', - 'IP6_LINK': 'fe80::b9ff:feae:aa03', - 'MAC': '02:00:b9:ae:aa:03', - 'NAME': 'NIC0', - 'NETWORK': 'Public', - 'NETWORK_ID': '7', - 'NIC_ID': '0', - 'PHYDEV': 'team0', - 'SECURITY_GROUPS': '0', - 'TARGET': 'one-107-0', - 'VLAN_ID': '100', - 'VN_MAD': '802.1Q' - }) - }), - 'USER_TEMPLATE': OrderedDict({ - 'HYPERVISOR': 'kvm', - 'INPUTS_ORDER': '', - 'LABELS': 'Gitlab,Centos', - 'LOGO': 'images/logos/centos.png', - 'MEMORY_UNIT_COST': 'MB', - 'SCHED_REQUIREMENTS': 'ID="0" | ID="1" | ID="2"', - 'SSH_PORT': '8822' - }) - })() + vm = type( + "pyone.bindings.VMType90Sub", + (object,), + { + "DEPLOY_ID": "one-107", + "ETIME": 0, + "GID": 0, + "GNAME": "oneadmin", + "HISTORY_RECORDS": [], + "ID": 107, + "LAST_POLL": 1632764186, + "LCM_STATE": 3, + "MONITORING": {}, + "NAME": "gitlab-107", + "RESCHED": 0, + "SNAPSHOTS": [], + "STATE": 3, + "STIME": 1572485522, + "TEMPLATE": OrderedDict( + { + "NIC": OrderedDict( + { + "AR_ID": "0", + "BRIDGE": "onerb.103", + "BRIDGE_TYPE": "linux", + "IP": "185.165.1.3", + "IP6_GLOBAL": "2000:a001::b9ff:feae:aa03", + "IP6_LINK": "fe80::b9ff:feae:aa03", + "MAC": "02:00:b9:ae:aa:03", + "NAME": "NIC0", + "NETWORK": "Public", + "NETWORK_ID": "7", + "NIC_ID": "0", + "PHYDEV": "team0", + "SECURITY_GROUPS": "0", + "TARGET": "one-107-0", + "VLAN_ID": "100", + "VN_MAD": "802.1Q", + } + ) + } + ), + "USER_TEMPLATE": OrderedDict( + { + "HYPERVISOR": "kvm", + "INPUTS_ORDER": "", + "LABELS": "Gitlab,Centos", + "LOGO": "images/logos/centos.png", + "MEMORY_UNIT_COST": "MB", + "SCHED_REQUIREMENTS": 'ID="0" | ID="1" | ID="2"', + "SSH_PORT": "8822", + } + ), + }, + )() data.VM.append(vm) return data options_base_test = { - 'api_url': 'https://opennebula:2633/RPC2', - 'api_username': 'username', - 'api_password': 'password', - 'api_authfile': '~/.one/one_auth', - 'hostname': 'v4_first_ip', - 'group_by_labels': True, - 'filter_by_label': None, + "api_url": "https://opennebula:2633/RPC2", + "api_username": "username", + "api_password": "password", + "api_authfile": "~/.one/one_auth", + "hostname": "v4_first_ip", + "group_by_labels": True, + "filter_by_label": None, } @@ -268,6 +300,7 @@ def get_vm_pool(): def mk_get_options(opts_dict): def inner(opt): return opts_dict.get(opt, False) + return inner @@ -275,22 +308,23 @@ def test_get_connection_info(inventory, mocker): inventory.get_option = mocker.MagicMock(side_effect=mk_get_options(options_base_test)) auth = inventory._get_connection_info() - assert (auth.username and auth.password) + assert auth.username and auth.password def test_populate_constructable_templating(mocker): - inventory_filename = '/fake/opennebula.yml' + inventory_filename = "/fake/opennebula.yml" - mocker.patch.object(InventoryModule, '_get_vm_pool', side_effect=get_vm_pool_json) - mocker.patch('ansible_collections.community.general.plugins.inventory.opennebula.HAS_PYONE', True) - mocker.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop) - mocker.patch('os.path.exists', exists_mock(inventory_filename)) - mocker.patch('os.access', access_mock(inventory_filename)) + mocker.patch.object(InventoryModule, "_get_vm_pool", side_effect=get_vm_pool_json) + mocker.patch("ansible_collections.community.general.plugins.inventory.opennebula.HAS_PYONE", True) + mocker.patch("ansible.inventory.manager.unfrackpath", mock_unfrackpath_noop) + mocker.patch("os.path.exists", exists_mock(inventory_filename)) + mocker.patch("os.access", access_mock(inventory_filename)) # the templating engine is needed for the constructable groups/vars # so give that some fake data and instantiate it. - C.INVENTORY_ENABLED = ['community.general.opennebula'] - inventory_file = {inventory_filename: r''' + C.INVENTORY_ENABLED = ["community.general.opennebula"] + inventory_file = { + inventory_filename: r""" --- plugin: community.general.opennebula api_url: https://opennebula:2633/RPC2 @@ -308,43 +342,46 @@ def test_populate_constructable_templating(mocker): keyed_groups: - key: TGROUP prefix: tgroup -'''} +""" + } im = InventoryManager(loader=DictDataLoader(inventory_file), sources=inventory_filename) # note the vm_pool (and json data file) has four hosts, # but the options above asks ansible to filter one out assert len(get_vm_pool_json().VM) == 4 - assert set(vm.NAME for vm in get_vm_pool_json().VM) == set([ - 'terraform_demo_00', - 'terraform_demo_01', - 'terraform_demo_srv_00', - 'bs-windows', - ]) - assert set(im._inventory.hosts) == set(['terraform_demo_00', 'terraform_demo_01', 'terraform_demo_srv_00']) - - host_demo00 = im._inventory.get_host('terraform_demo_00') - host_demo01 = im._inventory.get_host('terraform_demo_01') - host_demosrv = im._inventory.get_host('terraform_demo_srv_00') - - assert 'benchmark_clients' in im._inventory.groups - assert 'lin' in im._inventory.groups - assert im._inventory.groups['benchmark_clients'].hosts == [host_demo00, host_demo01] - assert im._inventory.groups['lin'].hosts == [host_demo00, host_demo01, host_demosrv] + assert set(vm.NAME for vm in get_vm_pool_json().VM) == set( + [ + "terraform_demo_00", + "terraform_demo_01", + "terraform_demo_srv_00", + "bs-windows", + ] + ) + assert set(im._inventory.hosts) == set(["terraform_demo_00", "terraform_demo_01", "terraform_demo_srv_00"]) + + host_demo00 = im._inventory.get_host("terraform_demo_00") + host_demo01 = im._inventory.get_host("terraform_demo_01") + host_demosrv = im._inventory.get_host("terraform_demo_srv_00") + + assert "benchmark_clients" in im._inventory.groups + assert "lin" in im._inventory.groups + assert im._inventory.groups["benchmark_clients"].hosts == [host_demo00, host_demo01] + assert im._inventory.groups["lin"].hosts == [host_demo00, host_demo01, host_demosrv] # test group by label: - assert 'bench' in im._inventory.groups - assert 'foo' in im._inventory.groups - assert im._inventory.groups['bench'].hosts == [host_demo00, host_demo01, host_demosrv] - assert im._inventory.groups['serv'].hosts == [host_demosrv] - assert im._inventory.groups['foo'].hosts == [host_demo00, host_demo01] + assert "bench" in im._inventory.groups + assert "foo" in im._inventory.groups + assert im._inventory.groups["bench"].hosts == [host_demo00, host_demo01, host_demosrv] + assert im._inventory.groups["serv"].hosts == [host_demosrv] + assert im._inventory.groups["foo"].hosts == [host_demo00, host_demo01] # test `compose` transforms GUEST_OS=Linux to is_linux == True - assert host_demo00.get_vars()['GUEST_OS'] == 'linux' - assert host_demo00.get_vars()['is_linux'] is True + assert host_demo00.get_vars()["GUEST_OS"] == "linux" + assert host_demo00.get_vars()["is_linux"] is True # test `keyed_groups` - assert im._inventory.groups['tgroup_bench_clients'].hosts == [host_demo00, host_demo01] - assert im._inventory.groups['tgroup_bench_server'].hosts == [host_demosrv] + assert im._inventory.groups["tgroup_bench_clients"].hosts == [host_demo00, host_demo01] + assert im._inventory.groups["tgroup_bench_server"].hosts == [host_demosrv] def test_populate(inventory, mocker): @@ -354,35 +391,35 @@ def test_populate(inventory, mocker): inventory._populate() # get different hosts - host_sam = inventory.inventory.get_host('sam-691-sam') - host_zabbix = inventory.inventory.get_host('zabbix-327') - host_gitlab = inventory.inventory.get_host('gitlab-107') + host_sam = inventory.inventory.get_host("sam-691-sam") + host_zabbix = inventory.inventory.get_host("zabbix-327") + host_gitlab = inventory.inventory.get_host("gitlab-107") # test if groups exists - assert 'Gitlab' in inventory.inventory.groups - assert 'Centos' in inventory.inventory.groups - assert 'Oracle_Linux' in inventory.inventory.groups + assert "Gitlab" in inventory.inventory.groups + assert "Centos" in inventory.inventory.groups + assert "Oracle_Linux" in inventory.inventory.groups # check if host_zabbix is in Oracle_Linux group - group_oracle_linux = inventory.inventory.groups['Oracle_Linux'] + group_oracle_linux = inventory.inventory.groups["Oracle_Linux"] assert group_oracle_linux.hosts == [host_zabbix] # check if host_gitlab is in Gitlab and Centos group - group_gitlab = inventory.inventory.groups['Gitlab'] - group_centos = inventory.inventory.groups['Centos'] + group_gitlab = inventory.inventory.groups["Gitlab"] + group_centos = inventory.inventory.groups["Centos"] assert group_gitlab.hosts == [host_gitlab] assert group_centos.hosts == [host_gitlab] # check IPv4 address - assert '172.22.4.187' == host_sam.get_vars()['v4_first_ip'] + assert "172.22.4.187" == host_sam.get_vars()["v4_first_ip"] # check IPv6 address - assert '2000:a001::b9ff:feae:aa0d' == host_zabbix.get_vars()['v6_first_ip'] + assert "2000:a001::b9ff:feae:aa0d" == host_zabbix.get_vars()["v6_first_ip"] # check ansible_hosts - assert '172.22.4.187' == host_sam.get_vars()['ansible_host'] - assert '185.165.1.1' == host_zabbix.get_vars()['ansible_host'] - assert '185.165.1.3' == host_gitlab.get_vars()['ansible_host'] + assert "172.22.4.187" == host_sam.get_vars()["ansible_host"] + assert "185.165.1.1" == host_zabbix.get_vars()["ansible_host"] + assert "185.165.1.3" == host_gitlab.get_vars()["ansible_host"] # check for custom ssh port - assert '8822' == host_gitlab.get_vars()['ansible_port'] + assert "8822" == host_gitlab.get_vars()["ansible_port"] diff --git a/tests/unit/plugins/inventory/test_xen_orchestra.py b/tests/unit/plugins/inventory/test_xen_orchestra.py index 7ba6fe88891..199d454ea6b 100644 --- a/tests/unit/plugins/inventory/test_xen_orchestra.py +++ b/tests/unit/plugins/inventory/test_xen_orchestra.py @@ -12,129 +12,127 @@ from ansible_collections.community.general.plugins.inventory.xen_orchestra import InventoryModule objects = { - 'vms': { - '0e64588-2bea-2d82-e922-881654b0a48f': - { - 'type': 'VM', - 'addresses': {}, - 'CPUs': {'max': 4, 'number': 4}, - 'memory': {'dynamic': [1073741824, 2147483648], 'static': [536870912, 4294967296], 'size': 2147483648}, - 'name_description': '', - 'name_label': 'XCP-NG lab 2', - 'os_version': {}, - 'parent': 'd3af89b2-d846-0874-6acb-031ccf11c560', - 'power_state': 'Running', - 'tags': [], - 'id': '0e645898-2bea-2d82-e922-881654b0a48f', - 'uuid': '0e645898-2bea-2d82-e922-881654b0a48f', - '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$container': '222d8594-9426-468a-ad69-7a6f02330fa3' - }, - 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331': - { - 'type': 'VM', - 'addresses': {'0/ipv4/0': '192.168.1.55', '1/ipv4/0': '10.0.90.1'}, - 'CPUs': {'max': 4, 'number': 4}, - 'mainIpAddress': '192.168.1.55', - 'memory': {'dynamic': [2147483648, 2147483648], 'static': [134217728, 2147483648], 'size': 2147483648}, - 'name_description': '', - 'name_label': 'XCP-NG lab 3', - 'os_version': {'name': 'FreeBSD 11.3-STABLE', 'uname': '11.3-STABLE', 'distro': 'FreeBSD'}, - 'power_state': 'Halted', - 'tags': [], - 'id': 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331', - 'uuid': 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331', - '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$container': 'c96ec4dd-28ac-4df4-b73c-4371bd202728', - } + "vms": { + "0e64588-2bea-2d82-e922-881654b0a48f": { + "type": "VM", + "addresses": {}, + "CPUs": {"max": 4, "number": 4}, + "memory": {"dynamic": [1073741824, 2147483648], "static": [536870912, 4294967296], "size": 2147483648}, + "name_description": "", + "name_label": "XCP-NG lab 2", + "os_version": {}, + "parent": "d3af89b2-d846-0874-6acb-031ccf11c560", + "power_state": "Running", + "tags": [], + "id": "0e645898-2bea-2d82-e922-881654b0a48f", + "uuid": "0e645898-2bea-2d82-e922-881654b0a48f", + "$pool": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$poolId": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$container": "222d8594-9426-468a-ad69-7a6f02330fa3", + }, + "b0d25e70-019d-6182-2f7c-b0f5d8ef9331": { + "type": "VM", + "addresses": {"0/ipv4/0": "192.168.1.55", "1/ipv4/0": "10.0.90.1"}, + "CPUs": {"max": 4, "number": 4}, + "mainIpAddress": "192.168.1.55", + "memory": {"dynamic": [2147483648, 2147483648], "static": [134217728, 2147483648], "size": 2147483648}, + "name_description": "", + "name_label": "XCP-NG lab 3", + "os_version": {"name": "FreeBSD 11.3-STABLE", "uname": "11.3-STABLE", "distro": "FreeBSD"}, + "power_state": "Halted", + "tags": [], + "id": "b0d25e70-019d-6182-2f7c-b0f5d8ef9331", + "uuid": "b0d25e70-019d-6182-2f7c-b0f5d8ef9331", + "$pool": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$poolId": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$container": "c96ec4dd-28ac-4df4-b73c-4371bd202728", + }, }, - 'pools': { - '3d315997-73bd-5a74-8ca7-289206cb03ab': { - 'master': '222d8594-9426-468a-ad69-7a6f02330fa3', - 'tags': [], - 'name_description': '', - 'name_label': 'Storage Lab', - 'cpus': {'cores': 120, 'sockets': 6}, - 'id': '3d315997-73bd-5a74-8ca7-289206cb03ab', - 'type': 'pool', - 'uuid': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab' + "pools": { + "3d315997-73bd-5a74-8ca7-289206cb03ab": { + "master": "222d8594-9426-468a-ad69-7a6f02330fa3", + "tags": [], + "name_description": "", + "name_label": "Storage Lab", + "cpus": {"cores": 120, "sockets": 6}, + "id": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "type": "pool", + "uuid": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$pool": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$poolId": "3d315997-73bd-5a74-8ca7-289206cb03ab", } }, - 'hosts': { - 'c96ec4dd-28ac-4df4-b73c-4371bd202728': { - 'type': 'host', - 'uuid': 'c96ec4dd-28ac-4df4-b73c-4371bd202728', - 'enabled': True, - 'CPUs': { - 'cpu_count': '40', - 'socket_count': '2', - 'vendor': 'GenuineIntel', - 'speed': '1699.998', - 'modelname': 'Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz', - 'family': '6', - 'model': '62', - 'stepping': '4' + "hosts": { + "c96ec4dd-28ac-4df4-b73c-4371bd202728": { + "type": "host", + "uuid": "c96ec4dd-28ac-4df4-b73c-4371bd202728", + "enabled": True, + "CPUs": { + "cpu_count": "40", + "socket_count": "2", + "vendor": "GenuineIntel", + "speed": "1699.998", + "modelname": "Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz", + "family": "6", + "model": "62", + "stepping": "4", }, - 'address': '172.16.210.14', - 'build': 'release/stockholm/master/7', - 'cpus': {'cores': 40, 'sockets': 2}, - 'hostname': 'r620-s1', - 'name_description': 'Default install', - 'name_label': 'R620-S1', - 'memory': {'usage': 45283590144, 'size': 137391292416}, - 'power_state': 'Running', - 'tags': [], - 'version': '8.2.0', - 'productBrand': 'XCP-ng', - 'id': 'c96ec4dd-28ac-4df4-b73c-4371bd202728', - '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab' + "address": "172.16.210.14", + "build": "release/stockholm/master/7", + "cpus": {"cores": 40, "sockets": 2}, + "hostname": "r620-s1", + "name_description": "Default install", + "name_label": "R620-S1", + "memory": {"usage": 45283590144, "size": 137391292416}, + "power_state": "Running", + "tags": [], + "version": "8.2.0", + "productBrand": "XCP-ng", + "id": "c96ec4dd-28ac-4df4-b73c-4371bd202728", + "$pool": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$poolId": "3d315997-73bd-5a74-8ca7-289206cb03ab", }, - '222d8594-9426-468a-ad69-7a6f02330fa3': { - 'type': 'host', - 'uuid': '222d8594-9426-468a-ad69-7a6f02330fa3', - 'enabled': True, - 'CPUs': { - 'cpu_count': '40', - 'socket_count': '2', - 'vendor': 'GenuineIntel', - 'speed': '1700.007', - 'modelname': 'Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz', - 'family': '6', - 'model': '62', - 'stepping': '4' + "222d8594-9426-468a-ad69-7a6f02330fa3": { + "type": "host", + "uuid": "222d8594-9426-468a-ad69-7a6f02330fa3", + "enabled": True, + "CPUs": { + "cpu_count": "40", + "socket_count": "2", + "vendor": "GenuineIntel", + "speed": "1700.007", + "modelname": "Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz", + "family": "6", + "model": "62", + "stepping": "4", }, - 'address': '172.16.210.16', - 'build': 'release/stockholm/master/7', - 'cpus': {'cores': 40, 'sockets': 2}, - 'hostname': 'r620-s2', - 'name_description': 'Default install', - 'name_label': 'R620-S2', - 'memory': {'usage': 10636521472, 'size': 137391292416}, - 'power_state': 'Running', - 'tags': ['foo', 'bar', 'baz'], - 'version': '8.2.0', - 'productBrand': 'XCP-ng', - 'id': '222d8594-9426-468a-ad69-7a6f02330fa3', - '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab', - '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab' - } - } + "address": "172.16.210.16", + "build": "release/stockholm/master/7", + "cpus": {"cores": 40, "sockets": 2}, + "hostname": "r620-s2", + "name_description": "Default install", + "name_label": "R620-S2", + "memory": {"usage": 10636521472, "size": 137391292416}, + "power_state": "Running", + "tags": ["foo", "bar", "baz"], + "version": "8.2.0", + "productBrand": "XCP-ng", + "id": "222d8594-9426-468a-ad69-7a6f02330fa3", + "$pool": "3d315997-73bd-5a74-8ca7-289206cb03ab", + "$poolId": "3d315997-73bd-5a74-8ca7-289206cb03ab", + }, + }, } def get_option(option): - if option == 'groups': + if option == "groups": return {} - elif option == 'keyed_groups': + elif option == "keyed_groups": return [] - elif option == 'compose': + elif option == "compose": return {} - elif option == 'strict': + elif option == "strict": return False else: return None @@ -152,55 +150,57 @@ def inventory(): def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.xen_orchestra.yml') is False + assert inventory.verify_file("foobar.xen_orchestra.yml") is False def test_populate(inventory, mocker): - inventory.host_entry_name_type = 'uuid' - inventory.vm_entry_name_type = 'uuid' + inventory.host_entry_name_type = "uuid" + inventory.vm_entry_name_type = "uuid" inventory.get_option = mocker.MagicMock(side_effect=get_option) inventory._populate(objects) actual = sorted(inventory.inventory.hosts.keys()) - expected = sorted(['c96ec4dd-28ac-4df4-b73c-4371bd202728', '222d8594-9426-468a-ad69-7a6f02330fa3', - '0e64588-2bea-2d82-e922-881654b0a48f', 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331']) + expected = sorted( + [ + "c96ec4dd-28ac-4df4-b73c-4371bd202728", + "222d8594-9426-468a-ad69-7a6f02330fa3", + "0e64588-2bea-2d82-e922-881654b0a48f", + "b0d25e70-019d-6182-2f7c-b0f5d8ef9331", + ] + ) assert actual == expected # Host with ip assertions - host_with_ip = inventory.inventory.get_host( - 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331') + host_with_ip = inventory.inventory.get_host("b0d25e70-019d-6182-2f7c-b0f5d8ef9331") host_with_ip_vars = host_with_ip.vars - assert host_with_ip_vars['ansible_host'] == '192.168.1.55' - assert host_with_ip_vars['power_state'] == 'halted' - assert host_with_ip_vars['type'] == 'VM' + assert host_with_ip_vars["ansible_host"] == "192.168.1.55" + assert host_with_ip_vars["power_state"] == "halted" + assert host_with_ip_vars["type"] == "VM" - assert host_with_ip in inventory.inventory.groups['with_ip'].hosts + assert host_with_ip in inventory.inventory.groups["with_ip"].hosts # Host without ip - host_without_ip = inventory.inventory.get_host( - '0e64588-2bea-2d82-e922-881654b0a48f') + host_without_ip = inventory.inventory.get_host("0e64588-2bea-2d82-e922-881654b0a48f") host_without_ip_vars = host_without_ip.vars - assert host_without_ip_vars['ansible_host'] is None - assert host_without_ip_vars['power_state'] == 'running' + assert host_without_ip_vars["ansible_host"] is None + assert host_without_ip_vars["power_state"] == "running" - assert host_without_ip in inventory.inventory.groups['without_ip'].hosts + assert host_without_ip in inventory.inventory.groups["without_ip"].hosts - assert host_with_ip in inventory.inventory.groups['xo_host_r620_s1'].hosts - assert host_without_ip in inventory.inventory.groups['xo_host_r620_s2'].hosts + assert host_with_ip in inventory.inventory.groups["xo_host_r620_s1"].hosts + assert host_without_ip in inventory.inventory.groups["xo_host_r620_s2"].hosts - r620_s1 = inventory.inventory.get_host( - 'c96ec4dd-28ac-4df4-b73c-4371bd202728') - r620_s2 = inventory.inventory.get_host( - '222d8594-9426-468a-ad69-7a6f02330fa3') + r620_s1 = inventory.inventory.get_host("c96ec4dd-28ac-4df4-b73c-4371bd202728") + r620_s2 = inventory.inventory.get_host("222d8594-9426-468a-ad69-7a6f02330fa3") - assert r620_s1.vars['address'] == '172.16.210.14' - assert r620_s1.vars['tags'] == [] - assert r620_s2.vars['address'] == '172.16.210.16' - assert r620_s2.vars['tags'] == ['foo', 'bar', 'baz'] + assert r620_s1.vars["address"] == "172.16.210.14" + assert r620_s1.vars["tags"] == [] + assert r620_s2.vars["address"] == "172.16.210.16" + assert r620_s2.vars["tags"] == ["foo", "bar", "baz"] - storage_lab = inventory.inventory.groups['xo_pool_storage_lab'] + storage_lab = inventory.inventory.groups["xo_pool_storage_lab"] # Check that hosts are in their corresponding pool assert r620_s1 in storage_lab.hosts diff --git a/tests/unit/plugins/lookup/conftest.py b/tests/unit/plugins/lookup/conftest.py index fa4a5fb2a62..0c5192df4d0 100644 --- a/tests/unit/plugins/lookup/conftest.py +++ b/tests/unit/plugins/lookup/conftest.py @@ -12,7 +12,10 @@ @pytest.fixture def fake_op(mocker): def _fake_op(version): - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase.get_current_version", return_value=version) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase.get_current_version", + return_value=version, + ) op = OnePass() op._config._config_file_path = "/home/jin/.op/config" mocker.patch.object(op._cli, "_run") diff --git a/tests/unit/plugins/lookup/onepassword_common.py b/tests/unit/plugins/lookup/onepassword_common.py index e9f76777d97..bb83d82b4d0 100644 --- a/tests/unit/plugins/lookup/onepassword_common.py +++ b/tests/unit/plugins/lookup/onepassword_common.py @@ -22,31 +22,22 @@ def load_file(file): MOCK_ENTRIES = { OnePassCLIv1: [ { - 'vault_name': 'Acme "Quot\'d" Servers', - 'queries': [ - '0123456789', - 'Mock "Quot\'d" Server' - ], - 'expected': ['t0pS3cret', 't0pS3cret'], - 'output': load_file("v1_out_01.json"), + "vault_name": 'Acme "Quot\'d" Servers', + "queries": ["0123456789", 'Mock "Quot\'d" Server'], + "expected": ["t0pS3cret", "t0pS3cret"], + "output": load_file("v1_out_01.json"), }, { - 'vault_name': 'Acme Logins', - 'queries': [ - '9876543210', - 'Mock Website', - 'acme.com' - ], - 'expected': ['t0pS3cret', 't0pS3cret', 't0pS3cret'], - 'output': load_file("v1_out_02.json"), + "vault_name": "Acme Logins", + "queries": ["9876543210", "Mock Website", "acme.com"], + "expected": ["t0pS3cret", "t0pS3cret", "t0pS3cret"], + "output": load_file("v1_out_02.json"), }, { - 'vault_name': 'Acme Logins', - 'queries': [ - '864201357' - ], - 'expected': ['vauxhall'], - 'output': load_file("v1_out_03.json"), + "vault_name": "Acme Logins", + "queries": ["864201357"], + "expected": ["vauxhall"], + "output": load_file("v1_out_03.json"), }, ], OnePassCLIv2: [ @@ -67,7 +58,7 @@ def load_file(file): "field": "password1", }, "expected": ["data in custom field"], - "output": load_file("v2_out_02.json") + "output": load_file("v2_out_02.json"), }, { # Request data from a custom section @@ -78,7 +69,7 @@ def load_file(file): "section": "Section 2", }, "expected": ["first value"], - "output": load_file("v2_out_03.json") + "output": load_file("v2_out_03.json"), }, { # Request data from an omitted value (label lookup, no section) @@ -88,7 +79,7 @@ def load_file(file): "field": "label-without-value", }, "expected": [""], - "output": load_file("v2_out_04.json") + "output": load_file("v2_out_04.json"), }, { # Request data from an omitted value (id lookup, no section) @@ -98,18 +89,15 @@ def load_file(file): "field": "67890q7mspf4x6zrlw3qejn7m", }, "expected": [""], - "output": load_file("v2_out_04.json") + "output": load_file("v2_out_04.json"), }, { # Request data from an omitted value (label lookup, with section) "vault_name": "Test Vault", "queries": ["Omitted values"], - "kwargs": { - "field": "section-label-without-value", - "section": "Section-Without-Values" - }, + "kwargs": {"field": "section-label-without-value", "section": "Section-Without-Values"}, "expected": [""], - "output": load_file("v2_out_04.json") + "output": load_file("v2_out_04.json"), }, { # Request data from an omitted value (id lookup, with section) @@ -120,7 +108,7 @@ def load_file(file): "section": "section-without-values", }, "expected": [""], - "output": load_file("v2_out_04.json") + "output": load_file("v2_out_04.json"), }, { # Query item without section by lowercase id (case matching) @@ -130,7 +118,7 @@ def load_file(file): "field": "lowercaseid", }, "expected": ["lowercaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by lowercase id (case not matching) @@ -140,7 +128,7 @@ def load_file(file): "field": "LOWERCASEID", }, "expected": ["lowercaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by lowercase label (case matching) @@ -150,7 +138,7 @@ def load_file(file): "field": "lowercaselabel", }, "expected": ["lowercaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by lowercase label (case not matching) @@ -160,7 +148,7 @@ def load_file(file): "field": "LOWERCASELABEL", }, "expected": ["lowercaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by mixed case id (case matching) @@ -170,7 +158,7 @@ def load_file(file): "field": "MiXeDcAsEiD", }, "expected": ["mixedcaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by mixed case id (case not matching) @@ -180,7 +168,7 @@ def load_file(file): "field": "mixedcaseid", }, "expected": ["mixedcaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by mixed case label (case matching) @@ -190,7 +178,7 @@ def load_file(file): "field": "MiXeDcAsElAbEl", }, "expected": ["mixedcaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item without section by mixed case label (case not matching) @@ -200,7 +188,7 @@ def load_file(file): "field": "mixedcaselabel", }, "expected": ["mixedcaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase id (case matching) @@ -211,7 +199,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionlowercaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase id (case not matching) @@ -222,7 +210,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionlowercaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase label (case matching) @@ -233,7 +221,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionlowercaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase label (case not matching) @@ -244,7 +232,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionlowercaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase id (case matching) @@ -255,7 +243,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionmixedcaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase id (case not matching) @@ -266,7 +254,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionmixedcaseid"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase label (case matching) @@ -277,7 +265,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionmixedcaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, { # Query item with section by lowercase label (case not matching) @@ -288,7 +276,7 @@ def load_file(file): "section": "section-with-values", }, "expected": ["sectionmixedcaselabel"], - "output": load_file("v2_out_05.json") + "output": load_file("v2_out_05.json"), }, ], } @@ -298,9 +286,7 @@ def load_file(file): { "vault_name": "Personal", "queries": ["ssh key"], - "expected": [ - "-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n" - ], + "expected": ["-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n"], "output": load_file("ssh_key_output.json"), }, # loads private key in PKCS#8 format becasue ssh_format=false @@ -310,9 +296,7 @@ def load_file(file): "kwargs": { "ssh_format": False, }, - "expected": [ - "-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n" - ], + "expected": ["-----BEGIN PRIVATE KEY-----\n..........=\n-----END PRIVATE KEY-----\n"], "output": load_file("ssh_key_output.json"), }, # loads private key in ssh format @@ -322,9 +306,7 @@ def load_file(file): "kwargs": { "ssh_format": True, }, - "expected": [ - "-----BEGIN OPENSSH PRIVATE KEY-----\r\n.....\r\n-----END OPENSSH PRIVATE KEY-----\r\n" - ], + "expected": ["-----BEGIN OPENSSH PRIVATE KEY-----\r\n.....\r\n-----END OPENSSH PRIVATE KEY-----\r\n"], "output": load_file("ssh_key_output.json"), }, ] diff --git a/tests/unit/plugins/lookup/test_bitwarden.py b/tests/unit/plugins/lookup/test_bitwarden.py index a038405418a..18e0e14c8a8 100644 --- a/tests/unit/plugins/lookup/test_bitwarden.py +++ b/tests/unit/plugins/lookup/test_bitwarden.py @@ -18,24 +18,12 @@ MOCK_RECORDS = [ { - "collectionIds": [ - MOCK_COLLECTION_ID - ], + "collectionIds": [MOCK_COLLECTION_ID], "deletedDate": None, "favorite": False, "fields": [ - { - "linkedId": None, - "name": "a_new_secret", - "type": 1, - "value": "this is a new secret" - }, - { - "linkedId": None, - "name": "not so secret", - "type": 0, - "value": "not secret" - } + {"linkedId": None, "name": "a_new_secret", "type": 1, "value": "this is a new secret"}, + {"linkedId": None, "name": "not so secret", "type": 0, "value": "not secret"}, ], "folderId": "3b12a9da-7c49-40b8-ad33-aede017a7ead", "id": "90992f63-ddb6-4e76-8bfc-aede016ca5eb", @@ -43,29 +31,20 @@ "password": "passwordA3", "passwordRevisionDate": "2022-07-26T23:03:23.399Z", "totp": None, - "username": "userA" + "username": "userA", }, "name": "a_test", "notes": None, "object": "item", "organizationId": MOCK_ORGANIZATION_ID, "passwordHistory": [ - { - "lastUsedDate": "2022-07-26T23:03:23.405Z", - "password": "a_new_secret: this is secret" - }, - { - "lastUsedDate": "2022-07-26T23:03:23.399Z", - "password": "passwordA2" - }, - { - "lastUsedDate": "2022-07-26T22:59:52.885Z", - "password": "passwordA" - } + {"lastUsedDate": "2022-07-26T23:03:23.405Z", "password": "a_new_secret: this is secret"}, + {"lastUsedDate": "2022-07-26T23:03:23.399Z", "password": "passwordA2"}, + {"lastUsedDate": "2022-07-26T22:59:52.885Z", "password": "passwordA"}, ], "reprompt": 0, "revisionDate": "2022-07-26T23:03:23.743Z", - "type": 1 + "type": 1, }, { "collectionIds": [], @@ -73,41 +52,29 @@ "favorite": False, "folderId": None, "id": "5ebd4d31-104c-49fc-a09c-aedf003d28ad", - "login": { - "password": "b", - "passwordRevisionDate": None, - "totp": None, - "username": "a" - }, + "login": {"password": "b", "passwordRevisionDate": None, "totp": None, "username": "a"}, "name": "dupe_name", "notes": None, "object": "item", "organizationId": None, "reprompt": 0, "revisionDate": "2022-07-27T03:42:40.353Z", - "type": 1 + "type": 1, }, { - "collectionIds": [ - MOCK_COLLECTION_ID - ], + "collectionIds": [MOCK_COLLECTION_ID], "deletedDate": None, "favorite": False, "folderId": None, "id": "90657653-6695-496d-9431-aedf003d3015", - "login": { - "password": "d", - "passwordRevisionDate": None, - "totp": None, - "username": "c" - }, + "login": {"password": "d", "passwordRevisionDate": None, "totp": None, "username": "c"}, "name": "dupe_name", "notes": None, "object": "item", "organizationId": MOCK_ORGANIZATION_ID, "reprompt": 0, "revisionDate": "2022-07-27T03:42:46.673Z", - "type": 1 + "type": 1, }, { "collectionIds": [], @@ -115,153 +82,142 @@ "favorite": False, "folderId": None, "id": "2bf517be-fb13-11ee-be89-a345aa369a94", - "login": { - "password": "e", - "passwordRevisionDate": None, - "totp": None, - "username": "f" - }, + "login": {"password": "e", "passwordRevisionDate": None, "totp": None, "username": "f"}, "name": "non_collection_org_record", "notes": None, "object": "item", "organizationId": MOCK_ORGANIZATION_ID, "reprompt": 0, "revisionDate": "2024-14-15T11:30:00.000Z", - "type": 1 + "type": 1, }, { "object": "collection", "id": MOCK_COLLECTION_ID, "organizationId": MOCK_ORGANIZATION_ID, "name": "MOCK_COLLECTION", - "externalId": None + "externalId": None, }, { "object": "collection", "id": "3b12a9da-7c49-40b8-ad33-aede017a8ead", "organizationId": "3b12a9da-7c49-40b8-ad33-aede017a9ead", "name": "some/other/collection", - "externalId": None + "externalId": None, }, ] class MockBitwarden(Bitwarden): - unlocked = True def _run(self, args, stdin=None, expected_rc=0): - if args[0] == 'get': - if args[1] == 'item': + if args[0] == "get": + if args[1] == "item": for item in MOCK_RECORDS: - if item.get('id') == args[2]: - return AnsibleJSONEncoder().encode(item), '' - if args[0] == 'list': - if args[1] == 'items': + if item.get("id") == args[2]: + return AnsibleJSONEncoder().encode(item), "" + if args[0] == "list": + if args[1] == "items": try: - search_value = args[args.index('--search') + 1] + search_value = args[args.index("--search") + 1] except ValueError: search_value = None try: - collection_to_filter = args[args.index('--collectionid') + 1] + collection_to_filter = args[args.index("--collectionid") + 1] except ValueError: collection_to_filter = None try: - organization_to_filter = args[args.index('--organizationid') + 1] + organization_to_filter = args[args.index("--organizationid") + 1] except ValueError: organization_to_filter = None items = [] for item in MOCK_RECORDS: - if item.get('object') != 'item': + if item.get("object") != "item": continue - if search_value and not re.search(search_value, item.get('name')): + if search_value and not re.search(search_value, item.get("name")): continue - if collection_to_filter and collection_to_filter not in item.get('collectionIds', []): + if collection_to_filter and collection_to_filter not in item.get("collectionIds", []): continue - if organization_to_filter and item.get('organizationId') != organization_to_filter: + if organization_to_filter and item.get("organizationId") != organization_to_filter: continue items.append(item) - return AnsibleJSONEncoder().encode(items), '' - elif args[1] == 'collections': + return AnsibleJSONEncoder().encode(items), "" + elif args[1] == "collections": try: - search_value = args[args.index('--search') + 1] + search_value = args[args.index("--search") + 1] except ValueError: search_value = None try: - collection_to_filter = args[args.index('--collectionid') + 1] + collection_to_filter = args[args.index("--collectionid") + 1] except ValueError: collection_to_filter = None try: - organization_to_filter = args[args.index('--organizationid') + 1] + organization_to_filter = args[args.index("--organizationid") + 1] except ValueError: organization_to_filter = None collections = [] for item in MOCK_RECORDS: - if item.get('object') != 'collection': + if item.get("object") != "collection": continue - if search_value and not re.search(search_value, item.get('name')): + if search_value and not re.search(search_value, item.get("name")): continue - if collection_to_filter and collection_to_filter not in item.get('collectionIds', []): + if collection_to_filter and collection_to_filter not in item.get("collectionIds", []): continue - if organization_to_filter and item.get('organizationId') != organization_to_filter: + if organization_to_filter and item.get("organizationId") != organization_to_filter: continue collections.append(item) - return AnsibleJSONEncoder().encode(collections), '' + return AnsibleJSONEncoder().encode(collections), "" - return '[]', '' + return "[]", "" class LoggedOutMockBitwarden(MockBitwarden): - unlocked = False class TestLookupModule(unittest.TestCase): - def setUp(self): - self.lookup = lookup_loader.get('community.general.bitwarden') + self.lookup = lookup_loader.get("community.general.bitwarden") - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_no_match(self): # Entry 0, "a_test" of the test input should have no duplicates. - self.assertEqual([], self.lookup.run(['not_here'], field='password')[0]) + self.assertEqual([], self.lookup.run(["not_here"], field="password")[0]) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_fields(self): # Entry 0, "a_test" of the test input should have no duplicates. record = MOCK_RECORDS[0] - record_name = record['name'] - for k, v in record['login'].items(): - self.assertEqual([v], - self.lookup.run([record_name], field=k)[0]) + record_name = record["name"] + for k, v in record["login"].items(): + self.assertEqual([v], self.lookup.run([record_name], field=k)[0]) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_duplicates(self): # There are two records with name dupe_name; we need to be order-insensitive with # checking what was retrieved. - self.assertEqual(set(['b', 'd']), - set(self.lookup.run(['dupe_name'], field='password')[0])) + self.assertEqual(set(["b", "d"]), set(self.lookup.run(["dupe_name"], field="password")[0])) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_full_item(self): # Try to retrieve the full record of the first entry where the name is "a_name". - self.assertEqual([MOCK_RECORDS[0]], - self.lookup.run(['a_test'])[0]) + self.assertEqual([MOCK_RECORDS[0]], self.lookup.run(["a_test"])[0]) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', LoggedOutMockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", LoggedOutMockBitwarden()) def test_bitwarden_plugin_unlocked(self): record = MOCK_RECORDS[0] - record_name = record['name'] + record_name = record["name"] with self.assertRaises(AnsibleError) as raised_error: - self.lookup.run([record_name], field='password') + self.lookup.run([record_name], field="password") self.assertEqual("Bitwarden Vault locked. Run 'bw unlock'.", str(raised_error.exception)) @@ -269,8 +225,8 @@ def test_bitwarden_plugin_without_session_option(self): mock_bitwarden = MockBitwarden() with patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", mock_bitwarden): record = MOCK_RECORDS[0] - record_name = record['name'] - session = 'session' + record_name = record["name"] + session = "session" self.lookup.run([record_name], field=None) self.assertIsNone(mock_bitwarden.session) @@ -279,37 +235,41 @@ def test_bitwarden_plugin_session_option(self): mock_bitwarden = MockBitwarden() with patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", mock_bitwarden): record = MOCK_RECORDS[0] - record_name = record['name'] - session = 'session' + record_name = record["name"] + session = "session" self.lookup.run([record_name], field=None, bw_session=session) self.assertEqual(mock_bitwarden.session, session) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_full_collection(self): # Try to retrieve the full records of the given collection. self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, collection_id=MOCK_COLLECTION_ID)[0]) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_full_organization(self): - self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2], MOCK_RECORDS[3]], - self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID)[0]) + self.assertEqual( + [MOCK_RECORDS[0], MOCK_RECORDS[2], MOCK_RECORDS[3]], + self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID)[0], + ) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_filter_organization(self): - self.assertEqual([MOCK_RECORDS[2]], - self.lookup.run(['dupe_name'], organization_id=MOCK_ORGANIZATION_ID)[0]) + self.assertEqual([MOCK_RECORDS[2]], self.lookup.run(["dupe_name"], organization_id=MOCK_ORGANIZATION_ID)[0]) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_full_collection_organization(self): - self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, - collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID)[0]) + self.assertEqual( + [MOCK_RECORDS[0], MOCK_RECORDS[2]], + self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID)[0], + ) - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_collection_name_filter(self): # all passwords from MOCK_COLLECTION - self.assertEqual([MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, - collection_name="MOCK_COLLECTION")[0]) + self.assertEqual( + [MOCK_RECORDS[0], MOCK_RECORDS[2]], self.lookup.run(None, collection_name="MOCK_COLLECTION")[0] + ) # Existing collection, no results self.assertEqual([], self.lookup.run(None, collection_name="some/other/collection")[0]) @@ -317,12 +277,13 @@ def test_bitwarden_plugin_collection_name_filter(self): with self.assertRaises(BitwardenException): self.lookup.run(None, collection_name="nonexistent") - @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden()) + @patch("ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden", new=MockBitwarden()) def test_bitwarden_plugin_result_count_check(self): self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID, result_count=2) with self.assertRaises(BitwardenException): - self.lookup.run(None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID, - result_count=1) + self.lookup.run( + None, collection_id=MOCK_COLLECTION_ID, organization_id=MOCK_ORGANIZATION_ID, result_count=1 + ) self.lookup.run(None, organization_id=MOCK_ORGANIZATION_ID, result_count=3) with self.assertRaises(BitwardenException): diff --git a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py index 064b6527a84..a46b63d4a84 100644 --- a/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py +++ b/tests/unit/plugins/lookup/test_bitwarden_secrets_manager.py @@ -23,7 +23,7 @@ "value": "1234supersecret5678", "note": "A test secret to use when developing the ansible bitwarden_secrets_manager lookup plugin", "creationDate": "2023-04-23T13:13:37.7507017Z", - "revisionDate": "2023-04-23T13:13:37.7507017Z" + "revisionDate": "2023-04-23T13:13:37.7507017Z", }, { "object": "secret", @@ -34,13 +34,12 @@ "value": "abcd_such_secret_very_important_efgh", "note": "notes go here", "creationDate": "2023-04-23T13:26:44.0392906Z", - "revisionDate": "2023-04-23T13:26:44.0392906Z" - } + "revisionDate": "2023-04-23T13:26:44.0392906Z", + }, ] class MockBitwardenSecretsManager(BitwardenSecretsManager): - def _run(self, args, stdin=None): # mock the --version call if args[0] == "--version": @@ -68,17 +67,24 @@ def _run(self, args, stdin=None): class TestLookupModule(unittest.TestCase): - def setUp(self): - self.lookup = lookup_loader.get('community.general.bitwarden_secrets_manager') + self.lookup = lookup_loader.get("community.general.bitwarden_secrets_manager") - @patch('ansible_collections.community.general.plugins.lookup.bitwarden_secrets_manager._bitwarden_secrets_manager', new=MockBitwardenSecretsManager()) + @patch( + "ansible_collections.community.general.plugins.lookup.bitwarden_secrets_manager._bitwarden_secrets_manager", + new=MockBitwardenSecretsManager(), + ) def test_bitwarden_secrets_manager(self): # Getting a secret by its id should return the full secret info - self.assertEqual([MOCK_SECRETS[0]], self.lookup.run(['ababc4a8-c242-4e54-bceb-77d17cdf2e07'], bws_access_token='123')) - - @patch('ansible_collections.community.general.plugins.lookup.bitwarden_secrets_manager._bitwarden_secrets_manager', new=MockBitwardenSecretsManager()) + self.assertEqual( + [MOCK_SECRETS[0]], self.lookup.run(["ababc4a8-c242-4e54-bceb-77d17cdf2e07"], bws_access_token="123") + ) + + @patch( + "ansible_collections.community.general.plugins.lookup.bitwarden_secrets_manager._bitwarden_secrets_manager", + new=MockBitwardenSecretsManager(), + ) def test_bitwarden_secrets_manager_no_match(self): # Getting a nonexistent secret id throws exception with self.assertRaises(AnsibleLookupError): - self.lookup.run(['nonexistant_id'], bws_access_token='123') + self.lookup.run(["nonexistant_id"], bws_access_token="123") diff --git a/tests/unit/plugins/lookup/test_dependent.py b/tests/unit/plugins/lookup/test_dependent.py index 0934feb2c6e..cbd045d2dfb 100644 --- a/tests/unit/plugins/lookup/test_dependent.py +++ b/tests/unit/plugins/lookup/test_dependent.py @@ -25,16 +25,16 @@ def test_simple(self): self.assertListEqual( self.lookup.run( [ - {'a': make_trusted('[1, 2]')}, - {'b': make_trusted('[item.a + 3, item.a + 6]')}, - {'c': make_trusted('[item.a + item.b * 10]')}, + {"a": make_trusted("[1, 2]")}, + {"b": make_trusted("[item.a + 3, item.a + 6]")}, + {"c": make_trusted("[item.a + item.b * 10]")}, ], {}, ), [ - {'a': 1, 'b': 4, 'c': 41}, - {'a': 1, 'b': 7, 'c': 71}, - {'a': 2, 'b': 5, 'c': 52}, - {'a': 2, 'b': 8, 'c': 82}, + {"a": 1, "b": 4, "c": 41}, + {"a": 1, "b": 7, "c": 71}, + {"a": 2, "b": 5, "c": 52}, + {"a": 2, "b": 8, "c": 82}, ], ) diff --git a/tests/unit/plugins/lookup/test_dsv.py b/tests/unit/plugins/lookup/test_dsv.py index 965f8307202..af69bda4892 100644 --- a/tests/unit/plugins/lookup/test_dsv.py +++ b/tests/unit/plugins/lookup/test_dsv.py @@ -37,6 +37,10 @@ def test_get_secret_json(self): self.lookup.run( ["/dummy"], [], - **{"tenant": "dummy", "client_id": "dummy", "client_secret": "dummy", } + **{ + "tenant": "dummy", + "client_id": "dummy", + "client_secret": "dummy", + }, ), ) diff --git a/tests/unit/plugins/lookup/test_etcd3.py b/tests/unit/plugins/lookup/test_etcd3.py index d7f5a66b977..e237e56d7d5 100644 --- a/tests/unit/plugins/lookup/test_etcd3.py +++ b/tests/unit/plugins/lookup/test_etcd3.py @@ -13,42 +13,39 @@ class FakeKVMetadata: - def __init__(self, keyvalue, header): self.key = keyvalue - self.create_revision = '' - self.mod_revision = '' - self.version = '' - self.lease_id = '' + self.create_revision = "" + self.mod_revision = "" + self.version = "" + self.lease_id = "" self.response_header = header class FakeEtcd3Client(MagicMock): - def get_prefix(self, key): for i in range(1, 4): - yield self.get(f'{key}_{i}') + yield self.get(f"{key}_{i}") def get(self, key): return (f"{key} value", FakeKVMetadata(key, None)) class TestLookupModule(unittest.TestCase): - def setUp(self): etcd3.HAS_ETCD = True - self.lookup = lookup_loader.get('community.general.etcd3') + self.lookup = lookup_loader.get("community.general.etcd3") - @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client()) + @patch("ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client", FakeEtcd3Client()) def test_key(self): - expected_result = [{'key': 'a_key', 'value': 'a_key value'}] - self.assertListEqual(expected_result, self.lookup.run(['a_key'], [])) + expected_result = [{"key": "a_key", "value": "a_key value"}] + self.assertListEqual(expected_result, self.lookup.run(["a_key"], [])) - @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client()) + @patch("ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client", FakeEtcd3Client()) def test_key_prefix(self): expected_result = [ - {'key': 'a_key_1', 'value': 'a_key_1 value'}, - {'key': 'a_key_2', 'value': 'a_key_2 value'}, - {'key': 'a_key_3', 'value': 'a_key_3 value'}, + {"key": "a_key_1", "value": "a_key_1 value"}, + {"key": "a_key_2", "value": "a_key_2 value"}, + {"key": "a_key_3", "value": "a_key_3 value"}, ] - self.assertListEqual(expected_result, self.lookup.run(['a_key'], [], **{'prefix': True})) + self.assertListEqual(expected_result, self.lookup.run(["a_key"], [], **{"prefix": True})) diff --git a/tests/unit/plugins/lookup/test_github_app_access_token.py b/tests/unit/plugins/lookup/test_github_app_access_token.py index 8516c305554..cfbbaac23c5 100644 --- a/tests/unit/plugins/lookup/test_github_app_access_token.py +++ b/tests/unit/plugins/lookup/test_github_app_access_token.py @@ -16,8 +16,8 @@ from ansible.plugins.loader import lookup_loader -ENCODE_RESULT = 'Foobar' -PRIVATE_KEY = 'private_key' +ENCODE_RESULT = "Foobar" +PRIVATE_KEY = "private_key" class MockJWT(MagicMock): @@ -31,101 +31,97 @@ def load_pem_private_key(self, key_bytes, password): class MockResponse(MagicMock): - response_token = 'Bar' + response_token = "Bar" def read(self): - return json.dumps({ - "token": self.response_token, - }).encode('utf-8') + return json.dumps( + { + "token": self.response_token, + } + ).encode("utf-8") class TestLookupModule(unittest.TestCase): def test_get_token_with_file_with_pyjwt(self): pyjwt = types.ModuleType("jwt") pyjwt.encode = MagicMock(return_value=ENCODE_RESULT) - with patch.dict(sys.modules, {'jwt': pyjwt}), \ - patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", - open=mock_open(read_data="foo_bar"), - open_url=MagicMock(return_value=MockResponse()), - HAS_JWT=True, - HAS_CRYPTOGRAPHY=True, - serialization=serialization()): - - lookup = lookup_loader.get('community.general.github_app_access_token') + with ( + patch.dict(sys.modules, {"jwt": pyjwt}), + patch.multiple( + "ansible_collections.community.general.plugins.lookup.github_app_access_token", + open=mock_open(read_data="foo_bar"), + open_url=MagicMock(return_value=MockResponse()), + HAS_JWT=True, + HAS_CRYPTOGRAPHY=True, + serialization=serialization(), + ), + ): + lookup = lookup_loader.get("community.general.github_app_access_token") self.assertListEqual( [MockResponse.response_token], - lookup.run( - [], - key_path="key", - app_id="app_id", - installation_id="installation_id", - token_expiry=600 - ) + lookup.run([], key_path="key", app_id="app_id", installation_id="installation_id", token_expiry=600), ) def test_get_token_with_fact_with_pyjwt(self): pyjwt = types.ModuleType("jwt") pyjwt.encode = MagicMock(return_value=ENCODE_RESULT) - with patch.dict(sys.modules, {'jwt': pyjwt}), \ - patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", - open=mock_open(read_data="foo_bar"), - open_url=MagicMock(return_value=MockResponse()), - HAS_JWT=True, - HAS_CRYPTOGRAPHY=True, - serialization=serialization()): - - lookup = lookup_loader.get('community.general.github_app_access_token') + with ( + patch.dict(sys.modules, {"jwt": pyjwt}), + patch.multiple( + "ansible_collections.community.general.plugins.lookup.github_app_access_token", + open=mock_open(read_data="foo_bar"), + open_url=MagicMock(return_value=MockResponse()), + HAS_JWT=True, + HAS_CRYPTOGRAPHY=True, + serialization=serialization(), + ), + ): + lookup = lookup_loader.get("community.general.github_app_access_token") self.assertListEqual( [MockResponse.response_token], lookup.run( - [], - app_id="app_id", - installation_id="installation_id", - private_key="foo_bar", - token_expiry=600 - ) + [], app_id="app_id", installation_id="installation_id", private_key="foo_bar", token_expiry=600 + ), ) def test_get_token_with_python_jwt(self): python_jwt = types.ModuleType("jwt") python_jwt.JWT = MagicMock() - python_jwt.jwk_from_pem = MagicMock(return_value='private_key') + python_jwt.jwk_from_pem = MagicMock(return_value="private_key") python_jwt.jwt_instance = MockJWT() - with patch.dict(sys.modules, {'jwt': python_jwt}), \ - patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", - open=mock_open(read_data="foo_bar"), - open_url=MagicMock(return_value=MockResponse()), - HAS_JWT=True): - lookup = lookup_loader.get('community.general.github_app_access_token') + with ( + patch.dict(sys.modules, {"jwt": python_jwt}), + patch.multiple( + "ansible_collections.community.general.plugins.lookup.github_app_access_token", + open=mock_open(read_data="foo_bar"), + open_url=MagicMock(return_value=MockResponse()), + HAS_JWT=True, + ), + ): + lookup = lookup_loader.get("community.general.github_app_access_token") self.assertListEqual( [MockResponse.response_token], - lookup.run( - [], - key_path="key", - app_id="app_id", - installation_id="installation_id", - token_expiry=600 - ) + lookup.run([], key_path="key", app_id="app_id", installation_id="installation_id", token_expiry=600), ) def test_get_token_with_fact_with_python_jwt(self): python_jwt = types.ModuleType("jwt") python_jwt.JWT = MagicMock() - python_jwt.jwk_from_pem = MagicMock(return_value='private_key') + python_jwt.jwk_from_pem = MagicMock(return_value="private_key") python_jwt.jwt_instance = MockJWT() - with patch.dict(sys.modules, {'jwt': python_jwt}), \ - patch.multiple("ansible_collections.community.general.plugins.lookup.github_app_access_token", - open=mock_open(read_data="foo_bar"), - open_url=MagicMock(return_value=MockResponse()), - HAS_JWT=True): - lookup = lookup_loader.get('community.general.github_app_access_token') + with ( + patch.dict(sys.modules, {"jwt": python_jwt}), + patch.multiple( + "ansible_collections.community.general.plugins.lookup.github_app_access_token", + open=mock_open(read_data="foo_bar"), + open_url=MagicMock(return_value=MockResponse()), + HAS_JWT=True, + ), + ): + lookup = lookup_loader.get("community.general.github_app_access_token") self.assertListEqual( [MockResponse.response_token], lookup.run( - [], - app_id="app_id", - installation_id="installation_id", - private_key="foo_bar", - token_expiry=600 - ) + [], app_id="app_id", installation_id="installation_id", private_key="foo_bar", token_expiry=600 + ), ) diff --git a/tests/unit/plugins/lookup/test_lastpass.py b/tests/unit/plugins/lookup/test_lastpass.py index ed27f44b43e..e86b8e4026f 100644 --- a/tests/unit/plugins/lookup/test_lastpass.py +++ b/tests/unit/plugins/lookup/test_lastpass.py @@ -14,112 +14,114 @@ from ansible_collections.community.general.plugins.lookup.lastpass import LPass, LPassException -MOCK_ENTRIES = [{'username': 'user', - 'name': 'Mock Entry', - 'password': 't0pS3cret passphrase entry!', - 'url': 'https://localhost/login', - 'notes': 'Test\nnote with multiple lines.\n', - 'id': '0123456789'}] +MOCK_ENTRIES = [ + { + "username": "user", + "name": "Mock Entry", + "password": "t0pS3cret passphrase entry!", + "url": "https://localhost/login", + "notes": "Test\nnote with multiple lines.\n", + "id": "0123456789", + } +] class MockLPass(LPass): - _mock_logged_out = False _mock_disconnected = False def _lookup_mock_entry(self, key): for entry in MOCK_ENTRIES: - if key == entry['id'] or key == entry['name']: + if key == entry["id"] or key == entry["name"]: return entry def _run(self, args, stdin=None, expected_rc=0): # Mock behavior of lpass executable base_options = ArgumentParser(add_help=False) - base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never']) + base_options.add_argument("--color", default="auto", choices=["auto", "always", "never"]) p = ArgumentParser() - sp = p.add_subparsers(help='command', dest='subparser_name') + sp = p.add_subparsers(help="command", dest="subparser_name") - logout_p = sp.add_parser('logout', parents=[base_options], help='logout') - show_p = sp.add_parser('show', parents=[base_options], help='show entry details') + logout_p = sp.add_parser("logout", parents=[base_options], help="logout") + show_p = sp.add_parser("show", parents=[base_options], help="show entry details") field_group = show_p.add_mutually_exclusive_group(required=True) for field in MOCK_ENTRIES[0].keys(): - field_group.add_argument(f"--{field}", default=False, action='store_true') - field_group.add_argument('--field', default=None) - show_p.add_argument('selector', help='Unique Name or ID') + field_group.add_argument(f"--{field}", default=False, action="store_true") + field_group.add_argument("--field", default=None) + show_p.add_argument("selector", help="Unique Name or ID") args = p.parse_args(args) - def mock_exit(output='', error='', rc=0): + def mock_exit(output="", error="", rc=0): if rc != expected_rc: raise LPassException(error) return output, error - if args.color != 'never': - return mock_exit(error='Error: Mock only supports --color=never', rc=1) + if args.color != "never": + return mock_exit(error="Error: Mock only supports --color=never", rc=1) - if args.subparser_name == 'logout': + if args.subparser_name == "logout": if self._mock_logged_out: - return mock_exit(error='Error: Not currently logged in', rc=1) + return mock_exit(error="Error: Not currently logged in", rc=1) - logged_in_error = 'Are you sure you would like to log out? [Y/n]' - if stdin and stdin.lower() == 'n\n': - return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1) - elif stdin and stdin.lower() == 'y\n': - return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0) + logged_in_error = "Are you sure you would like to log out? [Y/n]" + if stdin and stdin.lower() == "n\n": + return mock_exit(output="Log out: aborted.", error=logged_in_error, rc=1) + elif stdin and stdin.lower() == "y\n": + return mock_exit(output="Log out: complete.", error=logged_in_error, rc=0) else: - return mock_exit(error='Error: aborted response', rc=1) + return mock_exit(error="Error: aborted response", rc=1) - if args.subparser_name == 'show': + if args.subparser_name == "show": if self._mock_logged_out: - return mock_exit(error="Error: Could not find decryption key. Perhaps you need to login with `lpass login`.", rc=1) + return mock_exit( + error="Error: Could not find decryption key. Perhaps you need to login with `lpass login`.", rc=1 + ) if self._mock_disconnected: - return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1) + return mock_exit(error="Error: Couldn't resolve host name.", rc=1) mock_entry = self._lookup_mock_entry(args.selector) if args.field: - return mock_exit(output=mock_entry.get(args.field, '')) + return mock_exit(output=mock_entry.get(args.field, "")) elif args.password: - return mock_exit(output=mock_entry.get('password', '')) + return mock_exit(output=mock_entry.get("password", "")) elif args.username: - return mock_exit(output=mock_entry.get('username', '')) + return mock_exit(output=mock_entry.get("username", "")) elif args.url: - return mock_exit(output=mock_entry.get('url', '')) + return mock_exit(output=mock_entry.get("url", "")) elif args.name: - return mock_exit(output=mock_entry.get('name', '')) + return mock_exit(output=mock_entry.get("name", "")) elif args.id: - return mock_exit(output=mock_entry.get('id', '')) + return mock_exit(output=mock_entry.get("id", "")) elif args.notes: - return mock_exit(output=mock_entry.get('notes', '')) + return mock_exit(output=mock_entry.get("notes", "")) - raise LPassException('We should never get here') + raise LPassException("We should never get here") class DisconnectedMockLPass(MockLPass): - _mock_disconnected = True class LoggedOutMockLPass(MockLPass): - _mock_logged_out = True class TestLPass(unittest.TestCase): - def setUp(self): - self.lookup = lookup_loader.get('community.general.lastpass') + self.lookup = lookup_loader.get("community.general.lastpass") def test_lastpass_cli_path(self): - lp = MockLPass(path='/dev/null') - self.assertEqual('/dev/null', lp.cli_path) + lp = MockLPass(path="/dev/null") + self.assertEqual("/dev/null", lp.cli_path) def test_lastpass_build_args_logout(self): lp = MockLPass() - self.assertEqual(['logout', '--color=never'], lp._build_args("logout")) + self.assertEqual(["logout", "--color=never"], lp._build_args("logout")) def test_lastpass_logged_in_true(self): lp = MockLPass() @@ -133,39 +135,37 @@ def test_lastpass_show_disconnected(self): lp = DisconnectedMockLPass() with self.assertRaises(LPassException): - lp.get_field('0123456789', 'username') + lp.get_field("0123456789", "username") def test_lastpass_show(self): lp = MockLPass() for entry in MOCK_ENTRIES: - entry_id = entry.get('id') + entry_id = entry.get("id") for k, v in entry.items(): self.assertEqual(v.strip(), lp.get_field(entry_id, k)) class TestLastpassPlugin(unittest.TestCase): - def setUp(self): - self.lookup = lookup_loader.get('community.general.lastpass') + self.lookup = lookup_loader.get("community.general.lastpass") - @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', new=MockLPass) + @patch("ansible_collections.community.general.plugins.lookup.lastpass.LPass", new=MockLPass) def test_lastpass_plugin_normal(self): for entry in MOCK_ENTRIES: - entry_id = entry.get('id') + entry_id = entry.get("id") for k, v in entry.items(): - self.assertEqual(v.strip(), - self.lookup.run([entry_id], field=k)[0]) + self.assertEqual(v.strip(), self.lookup.run([entry_id], field=k)[0]) - @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', LoggedOutMockLPass) + @patch("ansible_collections.community.general.plugins.lookup.lastpass.LPass", LoggedOutMockLPass) def test_lastpass_plugin_logged_out(self): entry = MOCK_ENTRIES[0] - entry_id = entry.get('id') + entry_id = entry.get("id") with self.assertRaises(AnsibleError): - self.lookup.run([entry_id], field='password') + self.lookup.run([entry_id], field="password") - @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', DisconnectedMockLPass) + @patch("ansible_collections.community.general.plugins.lookup.lastpass.LPass", DisconnectedMockLPass) def test_lastpass_plugin_disconnected(self): entry = MOCK_ENTRIES[0] - entry_id = entry.get('id') + entry_id = entry.get("id") with self.assertRaises(AnsibleError): - self.lookup.run([entry_id], field='password') + self.lookup.run([entry_id], field="password") diff --git a/tests/unit/plugins/lookup/test_merge_variables.py b/tests/unit/plugins/lookup/test_merge_variables.py index ffa587642e2..c2d0b188e10 100644 --- a/tests/unit/plugins/lookup/test_merge_variables.py +++ b/tests/unit/plugins/lookup/test_merge_variables.py @@ -18,7 +18,6 @@ class TestMergeVariablesLookup(unittest.TestCase): class HostVarsMock(dict): - def __getattr__(self, item): return super().__getitem__(item) @@ -33,269 +32,241 @@ def setUp(self): self.templar = Templar(loader=self.loader, variables={}) self.merge_vars_lookup = merge_variables.LookupModule(loader=self.loader, templar=self.templar) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[['item1'], ['item3']]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", None]) + @patch.object(Templar, "template", side_effect=[["item1"], ["item3"]]) def test_merge_list(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_list'], { - 'testlist1__merge_list': ['item1'], - 'testlist2': ['item2'], - 'testlist3__merge_list': ['item3'] - }) + results = self.merge_vars_lookup.run( + ["__merge_list"], + {"testlist1__merge_list": ["item1"], "testlist2": ["item2"], "testlist3__merge_list": ["item3"]}, + ) - self.assertEqual(results, [['item1', 'item3']]) + self.assertEqual(results, [["item1", "item3"]]) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[['initial_item'], 'ignore', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[['item1'], ['item3']]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[["initial_item"], "ignore", "suffix", None]) + @patch.object(Templar, "template", side_effect=[["item1"], ["item3"]]) def test_merge_list_with_initial_value(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_list'], { - 'testlist1__merge_list': ['item1'], - 'testlist2': ['item2'], - 'testlist3__merge_list': ['item3'] - }) + results = self.merge_vars_lookup.run( + ["__merge_list"], + {"testlist1__merge_list": ["item1"], "testlist2": ["item2"], "testlist3__merge_list": ["item3"]}, + ) - self.assertEqual(results, [['initial_item', 'item1', 'item3']]) + self.assertEqual(results, [["initial_item", "item1", "item3"]]) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']}, - {'item2': 'test', 'list_item': ['test2']}]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", None]) + @patch.object( + Templar, + "template", + side_effect=[{"item1": "test", "list_item": ["test1"]}, {"item2": "test", "list_item": ["test2"]}], + ) def test_merge_dict(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_dict'], { - 'testdict1__merge_dict': { - 'item1': 'test', - 'list_item': ['test1'] + results = self.merge_vars_lookup.run( + ["__merge_dict"], + { + "testdict1__merge_dict": {"item1": "test", "list_item": ["test1"]}, + "testdict2__merge_dict": {"item2": "test", "list_item": ["test2"]}, }, - 'testdict2__merge_dict': { - 'item2': 'test', - 'list_item': ['test2'] - } - }) + ) - self.assertEqual(results, [ - { - 'item1': 'test', - 'item2': 'test', - 'list_item': ['test1', 'test2'] - } - ]) + self.assertEqual(results, [{"item1": "test", "item2": "test", "list_item": ["test1", "test2"]}]) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[{'initial_item': 'random value', 'list_item': ['test0']}, - 'ignore', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']}, - {'item2': 'test', 'list_item': ['test2']}]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object( + AnsiblePlugin, + "get_option", + side_effect=[{"initial_item": "random value", "list_item": ["test0"]}, "ignore", "suffix", None], + ) + @patch.object( + Templar, + "template", + side_effect=[{"item1": "test", "list_item": ["test1"]}, {"item2": "test", "list_item": ["test2"]}], + ) def test_merge_dict_with_initial_value(self, mock_set_options, mock_get_option, mock_template): - results = self.merge_vars_lookup.run(['__merge_dict'], { - 'testdict1__merge_dict': { - 'item1': 'test', - 'list_item': ['test1'] + results = self.merge_vars_lookup.run( + ["__merge_dict"], + { + "testdict1__merge_dict": {"item1": "test", "list_item": ["test1"]}, + "testdict2__merge_dict": {"item2": "test", "list_item": ["test2"]}, }, - 'testdict2__merge_dict': { - 'item2': 'test', - 'list_item': ['test2'] - } - }) + ) - self.assertEqual(results, [ - { - 'initial_item': 'random value', - 'item1': 'test', - 'item2': 'test', - 'list_item': ['test0', 'test1', 'test2'] - } - ]) + self.assertEqual( + results, + [ + { + "initial_item": "random value", + "item1": "test", + "item2": "test", + "list_item": ["test0", "test1", "test2"], + } + ], + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'warn', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}]) - @patch.object(Display, 'warning') + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "warn", "suffix", None]) + @patch.object(Templar, "template", side_effect=[{"item": "value1"}, {"item": "value2"}]) + @patch.object(Display, "warning") def test_merge_dict_non_unique_warning(self, mock_set_options, mock_get_option, mock_template, mock_display): - results = self.merge_vars_lookup.run(['__merge_non_unique'], { - 'testdict1__merge_non_unique': {'item': 'value1'}, - 'testdict2__merge_non_unique': {'item': 'value2'} - }) + results = self.merge_vars_lookup.run( + ["__merge_non_unique"], + {"testdict1__merge_non_unique": {"item": "value1"}, "testdict2__merge_non_unique": {"item": "value2"}}, + ) self.assertTrue(mock_display.called) - self.assertEqual(results, [{'item': 'value2'}]) + self.assertEqual(results, [{"item": "value2"}]) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'error', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "error", "suffix", None]) + @patch.object(Templar, "template", side_effect=[{"item": "value1"}, {"item": "value2"}]) def test_merge_dict_non_unique_error(self, mock_set_options, mock_get_option, mock_template): with self.assertRaises(AnsibleError): - self.merge_vars_lookup.run(['__merge_non_unique'], { - 'testdict1__merge_non_unique': {'item': 'value1'}, - 'testdict2__merge_non_unique': {'item': 'value2'} - }) + self.merge_vars_lookup.run( + ["__merge_non_unique"], + {"testdict1__merge_non_unique": {"item": "value1"}, "testdict2__merge_non_unique": {"item": "value2"}}, + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', None]) - @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']}, - ['item2', 'item3']]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", None]) + @patch.object(Templar, "template", side_effect=[{"item1": "test", "list_item": ["test1"]}, ["item2", "item3"]]) def test_merge_list_and_dict(self, mock_set_options, mock_get_option, mock_template): with self.assertRaises(AnsibleError): - self.merge_vars_lookup.run(['__merge_var'], { - 'testlist__merge_var': { - 'item1': 'test', - 'list_item': ['test1'] + self.merge_vars_lookup.run( + ["__merge_var"], + { + "testlist__merge_var": {"item1": "test", "list_item": ["test1"]}, + "testdict__merge_var": ["item2", "item3"], }, - 'testdict__merge_var': ['item2', 'item3'] - }) + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', ['all']]) - @patch.object(Templar, 'template', side_effect=[ - {'var': [{'item1': 'value1', 'item2': 'value2'}]}, - {'var': [{'item5': 'value5', 'item6': 'value6'}]}, - ]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", ["all"]]) + @patch.object( + Templar, + "template", + side_effect=[ + {"var": [{"item1": "value1", "item2": "value2"}]}, + {"var": [{"item5": "value5", "item6": "value6"}]}, + ], + ) def test_merge_dict_group_all(self, mock_set_options, mock_get_option, mock_template): - hostvars = self.HostVarsMock({ - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } + hostvars = self.HostVarsMock( + { + "host1": { + "group_names": ["dummy1"], + "inventory_hostname": "host1", + "1testlist__merge_var": {"var": [{"item1": "value1", "item2": "value2"}]}, + }, + "host2": { + "group_names": ["dummy1"], + "inventory_hostname": "host2", + "2otherlist__merge_var": {"var": [{"item5": "value5", "item6": "value6"}]}, + }, } - }) - variables = { - 'inventory_hostname': 'host1', - 'hostvars': hostvars - } + ) + variables = {"inventory_hostname": "host1", "hostvars": hostvars} - results = self.merge_vars_lookup.run(['__merge_var'], variables) + results = self.merge_vars_lookup.run(["__merge_var"], variables) - self.assertEqual(results, [ - {'var': [ - {'item1': 'value1', 'item2': 'value2'}, - {'item5': 'value5', 'item6': 'value6'} - ]} - ]) + self.assertEqual( + results, [{"var": [{"item1": "value1", "item2": "value2"}, {"item5": "value5", "item6": "value6"}]}] + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', ['dummy1']]) - @patch.object(Templar, 'template', side_effect=[ - {'var': [{'item1': 'value1', 'item2': 'value2'}]}, - {'var': [{'item5': 'value5', 'item6': 'value6'}]}, - ]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", ["dummy1"]]) + @patch.object( + Templar, + "template", + side_effect=[ + {"var": [{"item1": "value1", "item2": "value2"}]}, + {"var": [{"item5": "value5", "item6": "value6"}]}, + ], + ) def test_merge_dict_group_single(self, mock_set_options, mock_get_option, mock_template): - hostvars = self.HostVarsMock({ - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } - }, - 'host3': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': { - 'var': [{'item3': 'value3', 'item4': 'value4'}] - } + hostvars = self.HostVarsMock( + { + "host1": { + "group_names": ["dummy1"], + "inventory_hostname": "host1", + "1testlist__merge_var": {"var": [{"item1": "value1", "item2": "value2"}]}, + }, + "host2": { + "group_names": ["dummy1"], + "inventory_hostname": "host2", + "2otherlist__merge_var": {"var": [{"item5": "value5", "item6": "value6"}]}, + }, + "host3": { + "group_names": ["dummy2"], + "inventory_hostname": "host3", + "3otherlist__merge_var": {"var": [{"item3": "value3", "item4": "value4"}]}, + }, } - }) - variables = { - 'inventory_hostname': 'host1', - 'hostvars': hostvars - } + ) + variables = {"inventory_hostname": "host1", "hostvars": hostvars} - results = self.merge_vars_lookup.run(['__merge_var'], variables) + results = self.merge_vars_lookup.run(["__merge_var"], variables) - self.assertEqual(results, [ - {'var': [ - {'item1': 'value1', 'item2': 'value2'}, - {'item5': 'value5', 'item6': 'value6'} - ]} - ]) + self.assertEqual( + results, [{"var": [{"item1": "value1", "item2": "value2"}, {"item5": "value5", "item6": "value6"}]}] + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', ['dummy1', 'dummy2']]) - @patch.object(Templar, 'template', side_effect=[ - {'var': [{'item1': 'value1', 'item2': 'value2'}]}, - {'var': [{'item5': 'value5', 'item6': 'value6'}]}, - ]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", ["dummy1", "dummy2"]]) + @patch.object( + Templar, + "template", + side_effect=[ + {"var": [{"item1": "value1", "item2": "value2"}]}, + {"var": [{"item5": "value5", "item6": "value6"}]}, + ], + ) def test_merge_dict_group_multiple(self, mock_set_options, mock_get_option, mock_template): - hostvars = self.HostVarsMock({ - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': { - 'var': [{'item1': 'value1', 'item2': 'value2'}] - } - }, - 'host2': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': { - 'var': [{'item5': 'value5', 'item6': 'value6'}] - } - }, - 'host3': { - 'group_names': ['dummy3'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': { - 'var': [{'item3': 'value3', 'item4': 'value4'}] - } + hostvars = self.HostVarsMock( + { + "host1": { + "group_names": ["dummy1"], + "inventory_hostname": "host1", + "1testlist__merge_var": {"var": [{"item1": "value1", "item2": "value2"}]}, + }, + "host2": { + "group_names": ["dummy2"], + "inventory_hostname": "host2", + "2otherlist__merge_var": {"var": [{"item5": "value5", "item6": "value6"}]}, + }, + "host3": { + "group_names": ["dummy3"], + "inventory_hostname": "host3", + "3otherlist__merge_var": {"var": [{"item3": "value3", "item4": "value4"}]}, + }, } - }) - variables = { - 'inventory_hostname': 'host1', - 'hostvars': hostvars - } - results = self.merge_vars_lookup.run(['__merge_var'], variables) + ) + variables = {"inventory_hostname": "host1", "hostvars": hostvars} + results = self.merge_vars_lookup.run(["__merge_var"], variables) - self.assertEqual(results, [ - {'var': [ - {'item1': 'value1', 'item2': 'value2'}, - {'item5': 'value5', 'item6': 'value6'} - ]} - ]) + self.assertEqual( + results, [{"var": [{"item1": "value1", "item2": "value2"}, {"item5": "value5", "item6": "value6"}]}] + ) - @patch.object(AnsiblePlugin, 'set_options') - @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix', ['dummy1', 'dummy2']]) - @patch.object(Templar, 'template', side_effect=[ - ['item1'], - ['item5'], - ]) + @patch.object(AnsiblePlugin, "set_options") + @patch.object(AnsiblePlugin, "get_option", side_effect=[None, "ignore", "suffix", ["dummy1", "dummy2"]]) + @patch.object( + Templar, + "template", + side_effect=[ + ["item1"], + ["item5"], + ], + ) def test_merge_list_group_multiple(self, mock_set_options, mock_get_option, mock_template): - hostvars = self.HostVarsMock({ - 'host1': { - 'group_names': ['dummy1'], - 'inventory_hostname': 'host1', - '1testlist__merge_var': ['item1'] - }, - 'host2': { - 'group_names': ['dummy2'], - 'inventory_hostname': 'host2', - '2otherlist__merge_var': ['item5'] - }, - 'host3': { - 'group_names': ['dummy3'], - 'inventory_hostname': 'host3', - '3otherlist__merge_var': ['item3'] + hostvars = self.HostVarsMock( + { + "host1": {"group_names": ["dummy1"], "inventory_hostname": "host1", "1testlist__merge_var": ["item1"]}, + "host2": {"group_names": ["dummy2"], "inventory_hostname": "host2", "2otherlist__merge_var": ["item5"]}, + "host3": {"group_names": ["dummy3"], "inventory_hostname": "host3", "3otherlist__merge_var": ["item3"]}, } - }) - variables = { - 'inventory_hostname': 'host1', - 'hostvars': hostvars - } - results = self.merge_vars_lookup.run(['__merge_var'], variables) + ) + variables = {"inventory_hostname": "host1", "hostvars": hostvars} + results = self.merge_vars_lookup.run(["__merge_var"], variables) - self.assertEqual(results, [['item1', 'item5']]) + self.assertEqual(results, [["item1", "item5"]]) diff --git a/tests/unit/plugins/lookup/test_onepassword.py b/tests/unit/plugins/lookup/test_onepassword.py index f9b26167b40..ecb15ad455d 100644 --- a/tests/unit/plugins/lookup/test_onepassword.py +++ b/tests/unit/plugins/lookup/test_onepassword.py @@ -19,19 +19,34 @@ ) -OP_VERSION_FIXTURES = [ - "opv1", - "opv2" -] +OP_VERSION_FIXTURES = ["opv1", "opv2"] @pytest.mark.parametrize( ("args", "rc", "expected_call_args", "expected_call_kwargs", "expected"), ( - ([], 0, ["get", "account"], {"ignore_errors": True}, True,), - ([], 1, ["get", "account"], {"ignore_errors": True}, False,), - (["acme"], 1, ["get", "account", "--account", "acme.1password.com"], {"ignore_errors": True}, False,), - ) + ( + [], + 0, + ["get", "account"], + {"ignore_errors": True}, + True, + ), + ( + [], + 1, + ["get", "account"], + {"ignore_errors": True}, + False, + ), + ( + ["acme"], + 1, + ["get", "account", "--account", "acme.1password.com"], + {"ignore_errors": True}, + False, + ), + ), ) def test_assert_logged_in_v1(mocker, args, rc, expected_call_args, expected_call_kwargs, expected): mocker.patch.object(OnePassCLIv1, "_run", return_value=[rc, "", ""]) @@ -54,23 +69,44 @@ def test_full_signin_v1(mocker): ) result = op_cli.full_signin() - op_cli._run.assert_called_with([ - "signin", - "acme.1password.com", - b"bob@acme.com", - b"SECRET", - "--raw", - ], command_input=b"ONEKEYTORULETHEMALL") + op_cli._run.assert_called_with( + [ + "signin", + "acme.1password.com", + b"bob@acme.com", + b"SECRET", + "--raw", + ], + command_input=b"ONEKEYTORULETHEMALL", + ) assert result == [0, "", ""] @pytest.mark.parametrize( ("args", "out", "expected_call_args", "expected_call_kwargs", "expected"), ( - ([], "list of accounts", ["account", "get"], {"ignore_errors": True}, True,), - (["acme"], "list of accounts", ["account", "get", "--account", "acme.1password.com"], {"ignore_errors": True}, True,), - ([], "", ["account", "list"], {}, False,), - ) + ( + [], + "list of accounts", + ["account", "get"], + {"ignore_errors": True}, + True, + ), + ( + ["acme"], + "list of accounts", + ["account", "get", "--account", "acme.1password.com"], + {"ignore_errors": True}, + True, + ), + ( + [], + "", + ["account", "list"], + {}, + False, + ), + ), ) def test_assert_logged_in_v2(mocker, args, out, expected_call_args, expected_call_kwargs, expected): mocker.patch.object(OnePassCLIv2, "_run", return_value=[0, out, ""]) @@ -100,13 +136,17 @@ def test_full_signin_v2(mocker): op_cli._run.assert_called_with( [ - "account", "add", "--raw", - "--address", "acme.1password.com", - "--email", b"bob@acme.com", + "account", + "add", + "--raw", + "--address", + "acme.1password.com", + "--email", + b"bob@acme.com", "--signin", ], command_input=b"ONEKEYTORULETHEMALL", - environment_update={'OP_SECRET_KEY': 'SECRET'}, + environment_update={"OP_SECRET_KEY": "SECRET"}, ) assert result == [0, "", ""] @@ -116,7 +156,7 @@ def test_full_signin_v2(mocker): ( ("1.17.2", OnePassCLIv1), ("2.27.4", OnePassCLIv2), - ) + ), ) def test_op_correct_cli_class(fake_op, version, version_class): op = fake_op(version) @@ -146,12 +186,11 @@ def test_op_set_token_with_config(op_fixture, mocker, request): [ (op, value) for op in OP_VERSION_FIXTURES - for value in - ( + for value in ( "Missing required parameters", "The operation is unauthorized", ) - ] + ], ) def test_op_set_token_with_config_missing_args(op_fixture, message, request, mocker): op = request.getfixturevalue(op_fixture) @@ -169,7 +208,9 @@ def test_op_set_token_with_config_missing_args(op_fixture, message, request, moc def test_op_set_token_with_config_full_signin(op_fixture, request, mocker): op = request.getfixturevalue(op_fixture) mocker.patch("os.path.isfile", return_value=True) - mocker.patch.object(op._cli, "signin", return_value=(99, "", ""), side_effect=AnsibleLookupError("Raised intentionally")) + mocker.patch.object( + op._cli, "signin", return_value=(99, "", ""), side_effect=AnsibleLookupError("Raised intentionally") + ) mocker.patch.object(op._cli, "full_signin", return_value=(0, "", "")) op.set_token() @@ -192,8 +233,7 @@ def test_op_set_token_without_config(op_fixture, request, mocker): @pytest.mark.parametrize( - ("op_fixture", "login_status"), - [(op, value) for op in OP_VERSION_FIXTURES for value in [False, True]] + ("op_fixture", "login_status"), [(op, value) for op in OP_VERSION_FIXTURES for value in [False, True]] ) def test_op_assert_logged_in(mocker, login_status, op_fixture, request): op = request.getfixturevalue(op_fixture) @@ -230,7 +270,7 @@ def test_op_get_raw_v1(mocker, op_fixture, request): (None, ""), ("", ""), ] - ) + ), ) def test_op_get_field(mocker, op_fixture, output, expected, request): op = request.getfixturevalue(op_fixture) @@ -251,12 +291,17 @@ def test_op_get_field(mocker, op_fixture, output, expected, request): (_cli_class, item["vault_name"], item["queries"], item.get("kwargs", {}), item["output"], item["expected"]) for _cli_class in sorted(MOCK_ENTRIES, key=operator.attrgetter("__name__")) for item in MOCK_ENTRIES[_cli_class] - ) + ), ) def test_op_lookup(mocker, cli_class, vault, queries, kwargs, output, expected): mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass._get_cli_class", cli_class) - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True) - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", return_value=(0, json.dumps(output), "")) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True + ) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", + return_value=(0, json.dumps(output), ""), + ) op_lookup = lookup_loader.get("community.general.onepassword") result = op_lookup.run(queries, vault=vault, **kwargs) @@ -269,14 +314,19 @@ def test_signin(op_fixture, request): op = request.getfixturevalue(op_fixture) op._cli.master_password = "master_pass" op._cli.signin() - op._cli._run.assert_called_once_with(['signin', '--raw'], command_input=b"master_pass") + op._cli._run.assert_called_once_with(["signin", "--raw"], command_input=b"master_pass") def test_op_doc(mocker): document_contents = "Document Contents\n" - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True) - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", return_value=(0, document_contents, "")) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True + ) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", + return_value=(0, document_contents, ""), + ) op_lookup = lookup_loader.get("community.general.onepassword_doc") result = op_lookup.run(["Private key doc"]) @@ -289,17 +339,18 @@ def test_op_doc(mocker): [ (plugin, connect_host, connect_token) for plugin in ("community.general.onepassword", "community.general.onepassword_raw") - for (connect_host, connect_token) in - ( + for (connect_host, connect_token) in ( ("http://localhost", None), (None, "foobar"), ) - ] + ], ) def test_op_connect_partial_args(plugin, connect_host, connect_token, mocker): op_lookup = lookup_loader.get(plugin) - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass._get_cli_class", OnePassCLIv2) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePass._get_cli_class", OnePassCLIv2 + ) with pytest.raises(AnsibleOptionsError): op_lookup.run("login", vault_name="test vault", connect_host=connect_host, connect_token=connect_token) @@ -310,7 +361,7 @@ def test_op_connect_partial_args(plugin, connect_host, connect_token, mocker): ( {"connect_host": "http://localhost", "connect_token": "foobar"}, {"service_account_token": "foobar"}, - ) + ), ) def test_opv1_unsupported_features(kwargs): op_cli = OnePassCLIv1(**kwargs) diff --git a/tests/unit/plugins/lookup/test_onepassword_ssh_key.py b/tests/unit/plugins/lookup/test_onepassword_ssh_key.py index 700598890b4..42ac99f2721 100644 --- a/tests/unit/plugins/lookup/test_onepassword_ssh_key.py +++ b/tests/unit/plugins/lookup/test_onepassword_ssh_key.py @@ -17,11 +17,16 @@ ( (item["vault_name"], item["queries"], item.get("kwargs", {}), item["output"], item["expected"]) for item in SSH_KEY_MOCK_ENTRIES - ) + ), ) def test_ssh_key(mocker, vault, queries, kwargs, output, expected): - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True) - mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", return_value=(0, json.dumps(output), "")) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True + ) + mocker.patch( + "ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", + return_value=(0, json.dumps(output), ""), + ) op_lookup = lookup_loader.get("community.general.onepassword_ssh_key") result = op_lookup.run(queries, vault=vault, **kwargs) diff --git a/tests/unit/plugins/lookup/test_revbitspss.py b/tests/unit/plugins/lookup/test_revbitspss.py index 1d88ad147cc..910f38dded6 100644 --- a/tests/unit/plugins/lookup/test_revbitspss.py +++ b/tests/unit/plugins/lookup/test_revbitspss.py @@ -14,7 +14,7 @@ class MockPamSecrets(MagicMock): - RESPONSE = 'dummy value' + RESPONSE = "dummy value" def get_pam_secret(self, path): return self.RESPONSE @@ -30,13 +30,7 @@ def setUp(self): MockPamSecrets(), ) def test_get_pam_secret(self): - terms = ['dummy secret'] + terms = ["dummy secret"] variables = [] - kwargs = { - "base_url": 'https://dummy.url', - "api_key": 'dummy' - } - self.assertListEqual( - [{'dummy secret': 'dummy value'}], - self.lookup.run(terms, variables, **kwargs) - ) + kwargs = {"base_url": "https://dummy.url", "api_key": "dummy"} + self.assertListEqual([{"dummy secret": "dummy value"}], self.lookup.run(terms, variables, **kwargs)) diff --git a/tests/unit/plugins/lookup/test_tss.py b/tests/unit/plugins/lookup/test_tss.py index 36951ee8258..70e3e40099c 100644 --- a/tests/unit/plugins/lookup/test_tss.py +++ b/tests/unit/plugins/lookup/test_tss.py @@ -16,7 +16,7 @@ from ansible.plugins.loader import lookup_loader -TSS_IMPORT_PATH = 'ansible_collections.community.general.plugins.lookup.tss' +TSS_IMPORT_PATH = "ansible_collections.community.general.plugins.lookup.tss" def make_absolute(name): @@ -25,7 +25,7 @@ def make_absolute(name): class SecretServerError(Exception): def __init__(self): - self.message = '' + self.message = "" class MockSecretServer(MagicMock): @@ -40,41 +40,39 @@ def get_secret_json(self, path): raise SecretServerError -@patch(make_absolute('SecretServer'), MockSecretServer()) +@patch(make_absolute("SecretServer"), MockSecretServer()) class TestTSSClient(TestCase): def setUp(self): self.server_params = { - 'base_url': '', - 'username': '', - 'domain': '', - 'password': '', - 'api_path_uri': '', - 'token_path_uri': '', + "base_url": "", + "username": "", + "domain": "", + "password": "", + "api_path_uri": "", + "token_path_uri": "", } def test_from_params(self): - with patch(make_absolute('HAS_TSS_AUTHORIZER'), False): - self.assert_client_version('v0') + with patch(make_absolute("HAS_TSS_AUTHORIZER"), False): + self.assert_client_version("v0") - with patch.dict(self.server_params, {'domain': 'foo'}): + with patch.dict(self.server_params, {"domain": "foo"}): with self.assertRaises(tss.AnsibleError): self._get_client() - with patch.multiple(TSS_IMPORT_PATH, - HAS_TSS_AUTHORIZER=True, - PasswordGrantAuthorizer=DEFAULT, - DomainPasswordGrantAuthorizer=DEFAULT): + with patch.multiple( + TSS_IMPORT_PATH, + HAS_TSS_AUTHORIZER=True, + PasswordGrantAuthorizer=DEFAULT, + DomainPasswordGrantAuthorizer=DEFAULT, + ): + self.assert_client_version("v1") - self.assert_client_version('v1') - - with patch.dict(self.server_params, {'domain': 'foo'}): - self.assert_client_version('v1') + with patch.dict(self.server_params, {"domain": "foo"}): + self.assert_client_version("v1") def assert_client_version(self, version): - version_to_class = { - 'v0': tss.TSSClientV0, - 'v1': tss.TSSClientV1 - } + version_to_class = {"v0": tss.TSSClientV0, "v1": tss.TSSClientV1} client = self._get_client() self.assertIsInstance(client, version_to_class[version]) @@ -85,29 +83,25 @@ def _get_client(self): class TestLookupModule(TestCase): VALID_TERMS = [1] - INVALID_TERMS = ['foo'] + INVALID_TERMS = ["foo"] def setUp(self): self.lookup = lookup_loader.get("community.general.tss") - @patch.multiple(TSS_IMPORT_PATH, - HAS_TSS_SDK=False, - SecretServer=MockSecretServer) + @patch.multiple(TSS_IMPORT_PATH, HAS_TSS_SDK=False, SecretServer=MockSecretServer) def test_missing_sdk(self): with self.assertRaises(tss.AnsibleError): self._run_lookup(self.VALID_TERMS) - @patch.multiple(TSS_IMPORT_PATH, - HAS_TSS_SDK=True, - SecretServerError=SecretServerError) + @patch.multiple(TSS_IMPORT_PATH, HAS_TSS_SDK=True, SecretServerError=SecretServerError) def test_get_secret_json(self): - with patch(make_absolute('SecretServer'), MockSecretServer): + with patch(make_absolute("SecretServer"), MockSecretServer): self.assertListEqual([MockSecretServer.RESPONSE], self._run_lookup(self.VALID_TERMS)) with self.assertRaises(tss.AnsibleOptionsError): self._run_lookup(self.INVALID_TERMS) - with patch(make_absolute('SecretServer'), MockFaultySecretServer): + with patch(make_absolute("SecretServer"), MockFaultySecretServer): with self.assertRaises(tss.AnsibleError): self._run_lookup(self.VALID_TERMS) diff --git a/tests/unit/plugins/module_utils/cloud/test_backoff.py b/tests/unit/plugins/module_utils/cloud/test_backoff.py index 5871071c39e..5e5f41aa9fa 100644 --- a/tests/unit/plugins/module_utils/cloud/test_backoff.py +++ b/tests/unit/plugins/module_utils/cloud/test_backoff.py @@ -7,15 +7,14 @@ import random import unittest -from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, \ - _full_jitter_backoff +from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, _full_jitter_backoff class ExponentialBackoffStrategyTestCase(unittest.TestCase): def test_no_retries(self): strategy = _exponential_backoff(retries=0) result = list(strategy()) - self.assertEqual(result, [], 'list should be empty') + self.assertEqual(result, [], "list should be empty") def test_exponential_backoff(self): strategy = _exponential_backoff(retries=5, delay=1, backoff=2) @@ -37,7 +36,7 @@ class FullJitterBackoffStrategyTestCase(unittest.TestCase): def test_no_retries(self): strategy = _full_jitter_backoff(retries=0) result = list(strategy()) - self.assertEqual(result, [], 'list should be empty') + self.assertEqual(result, [], "list should be empty") def test_full_jitter(self): retries = 5 @@ -46,8 +45,7 @@ def test_full_jitter(self): r = random.Random(seed) expected = [r.randint(0, 2**i) for i in range(0, retries)] - strategy = _full_jitter_backoff( - retries=retries, delay=1, _random=random.Random(seed)) + strategy = _full_jitter_backoff(retries=retries, delay=1, _random=random.Random(seed)) result = list(strategy()) self.assertEqual(result, expected) diff --git a/tests/unit/plugins/module_utils/cloud/test_scaleway.py b/tests/unit/plugins/module_utils/cloud/test_scaleway.py index 93e68fc3b0e..f8f1103a8c5 100644 --- a/tests/unit/plugins/module_utils/cloud/test_scaleway.py +++ b/tests/unit/plugins/module_utils/cloud/test_scaleway.py @@ -11,48 +11,48 @@ class SecretVariablesTestCase(unittest.TestCase): def test_dict_to_list(self): - source = dict( - attribute1="value1", - attribute2="value2" - ) - expect = [ - dict(key="attribute1", value="value1"), - dict(key="attribute2", value="value2") - ] + source = dict(attribute1="value1", attribute2="value2") + expect = [dict(key="attribute1", value="value1"), dict(key="attribute2", value="value2")] result = SecretVariables.dict_to_list(source) - result = sorted(result, key=lambda el: el['key']) + result = sorted(result, key=lambda el: el["key"]) self.assertEqual(result, expect) def test_list_to_dict(self): source = [ - dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"), - dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI") + dict( + key="secret1", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", + ), + dict( + key="secret2", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] expect = dict( secret1="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", - secret2="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI" + secret2="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", ) self.assertEqual(SecretVariables.list_to_dict(source, hashed=True), expect) def test_list_to_dict_2(self): - source = [ - dict(key="secret1", value="value1"), - dict(key="secret2", value="value2") - ] - expect = dict( - secret1="value1", - secret2="value2" - ) + source = [dict(key="secret1", value="value1"), dict(key="secret2", value="value2")] + expect = dict(secret1="value1", secret2="value2") self.assertEqual(SecretVariables.list_to_dict(source, hashed=False), expect) @unittest.skipIf(argon2 is None, "Missing required 'argon2' library") def test_decode_full(self): source_secret = [ - dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"), - dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"), + dict( + key="secret1", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", + ), + dict( + key="secret2", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] source_value = [ dict(key="secret1", value="value1"), @@ -65,14 +65,20 @@ def test_decode_full(self): ] result = SecretVariables.decode(source_secret, source_value) - result = sorted(result, key=lambda el: el['key']) + result = sorted(result, key=lambda el: el["key"]) self.assertEqual(result, expect) @unittest.skipIf(argon2 is None, "Missing required 'argon2' library") def test_decode_dict_divergent_values(self): source_secret = [ - dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"), - dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"), + dict( + key="secret1", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", + ), + dict( + key="secret2", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] source_value = [ dict(key="secret1", value="value1"), @@ -81,17 +87,23 @@ def test_decode_dict_divergent_values(self): expect = [ dict(key="secret1", value="value1"), - dict(key="secret2", value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"), + dict( + key="secret2", + value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] result = SecretVariables.decode(source_secret, source_value) - result = sorted(result, key=lambda el: el['key']) + result = sorted(result, key=lambda el: el["key"]) self.assertEqual(result, expect) @unittest.skipIf(argon2 is None, "Missing required 'argon2' library") def test_decode_dict_missing_values_left(self): source_secret = [ - dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"), + dict( + key="secret1", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", + ), ] source_value = [ dict(key="secret1", value="value1"), @@ -103,14 +115,20 @@ def test_decode_dict_missing_values_left(self): ] result = SecretVariables.decode(source_secret, source_value) - result = sorted(result, key=lambda el: el['key']) + result = sorted(result, key=lambda el: el["key"]) self.assertEqual(result, expect) @unittest.skipIf(argon2 is None, "Missing required 'argon2' library") def test_decode_dict_missing_values_right(self): source_secret = [ - dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"), - dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"), + dict( + key="secret1", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc", + ), + dict( + key="secret2", + hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] source_value = [ dict(key="secret1", value="value1"), @@ -118,9 +136,12 @@ def test_decode_dict_missing_values_right(self): expect = [ dict(key="secret1", value="value1"), - dict(key="secret2", value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"), + dict( + key="secret2", + value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI", + ), ] result = SecretVariables.decode(source_secret, source_value) - result = sorted(result, key=lambda el: el['key']) + result = sorted(result, key=lambda el: el["key"]) self.assertEqual(result, expect) diff --git a/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py b/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py index 784bcca29da..34b649d1170 100644 --- a/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py +++ b/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py @@ -14,102 +14,50 @@ class HwcDictComparisonTestCase(unittest.TestCase): def test_simple_no_difference(self): - value1 = { - 'foo': 'bar', - 'test': 'original' - } + value1 = {"foo": "bar", "test": "original"} self.assertFalse(are_different_dicts(value1, value1)) def test_simple_different(self): - value1 = { - 'foo': 'bar', - 'test': 'original' - } - value2 = { - 'foo': 'bar', - 'test': 'different' - } - value3 = { - 'test': 'original' - } + value1 = {"foo": "bar", "test": "original"} + value2 = {"foo": "bar", "test": "different"} + value3 = {"test": "original"} self.assertTrue(are_different_dicts(value1, value2)) self.assertTrue(are_different_dicts(value1, value3)) self.assertTrue(are_different_dicts(value2, value3)) def test_nested_dictionaries_no_difference(self): - value1 = { - 'foo': { - 'quiet': { - 'tree': 'test' - }, - 'bar': 'baz' - }, - 'test': 'original' - } + value1 = {"foo": {"quiet": {"tree": "test"}, "bar": "baz"}, "test": "original"} self.assertFalse(are_different_dicts(value1, value1)) def test_nested_dictionaries_with_difference(self): - value1 = { - 'foo': { - 'quiet': { - 'tree': 'test' - }, - 'bar': 'baz' - }, - 'test': 'original' - } - value2 = { - 'foo': { - 'quiet': { - 'tree': 'baz' - }, - 'bar': 'hello' - }, - 'test': 'original' - } - value3 = { - 'foo': { - 'quiet': { - 'tree': 'test' - }, - 'bar': 'baz' - } - } + value1 = {"foo": {"quiet": {"tree": "test"}, "bar": "baz"}, "test": "original"} + value2 = {"foo": {"quiet": {"tree": "baz"}, "bar": "hello"}, "test": "original"} + value3 = {"foo": {"quiet": {"tree": "test"}, "bar": "baz"}} self.assertTrue(are_different_dicts(value1, value2)) self.assertTrue(are_different_dicts(value1, value3)) self.assertTrue(are_different_dicts(value2, value3)) def test_arrays_strings_no_difference(self): - value1 = { - 'foo': [ - 'baz', - 'bar' - ] - } + value1 = {"foo": ["baz", "bar"]} self.assertFalse(are_different_dicts(value1, value1)) def test_arrays_strings_with_difference(self): value1 = { - 'foo': [ - 'baz', - 'bar', + "foo": [ + "baz", + "bar", ] } - value2 = { - 'foo': [ - 'baz', - 'hello' - ] - } + value2 = {"foo": ["baz", "hello"]} value3 = { - 'foo': [ - 'bar', + "foo": [ + "bar", ] } @@ -118,48 +66,18 @@ def test_arrays_strings_with_difference(self): self.assertTrue(are_different_dicts(value2, value3)) def test_arrays_dicts_with_no_difference(self): - value1 = { - 'foo': [ - { - 'test': 'value', - 'foo': 'bar' - }, - { - 'different': 'dict' - } - ] - } + value1 = {"foo": [{"test": "value", "foo": "bar"}, {"different": "dict"}]} self.assertFalse(are_different_dicts(value1, value1)) def test_arrays_dicts_with_difference(self): - value1 = { - 'foo': [ - { - 'test': 'value', - 'foo': 'bar' - }, - { - 'different': 'dict' - } - ] - } + value1 = {"foo": [{"test": "value", "foo": "bar"}, {"different": "dict"}]} value2 = { - 'foo': [ - { - 'test': 'value2', - 'foo': 'bar2' - }, - ] - } - value3 = { - 'foo': [ - { - 'test': 'value', - 'foo': 'bar' - } + "foo": [ + {"test": "value2", "foo": "bar2"}, ] } + value3 = {"foo": [{"test": "value", "foo": "bar"}]} self.assertTrue(are_different_dicts(value1, value2)) self.assertTrue(are_different_dicts(value1, value3)) diff --git a/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py b/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py index b38549b4a3a..596277c2b2a 100644 --- a/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py +++ b/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py @@ -7,7 +7,7 @@ import sys import unittest -from ansible_collections.community.general.plugins.module_utils.hwc_utils import (HwcModuleException, navigate_value) +from ansible_collections.community.general.plugins.module_utils.hwc_utils import HwcModuleException, navigate_value class HwcUtilsTestCase(unittest.TestCase): @@ -20,28 +20,24 @@ def setUp(self): def test_navigate_value(self): value = { - 'foo': { - 'quiet': { - 'tree': 'test', - "trees": [0, 1] - }, + "foo": { + "quiet": {"tree": "test", "trees": [0, 1]}, } } - self.assertEqual(navigate_value(value, ["foo", "quiet", "tree"]), - "test") + self.assertEqual(navigate_value(value, ["foo", "quiet", "tree"]), "test") - self.assertEqual( - navigate_value(value, ["foo", "quiet", "trees"], - {"foo.quiet.trees": 1}), - 1) + self.assertEqual(navigate_value(value, ["foo", "quiet", "trees"], {"foo.quiet.trees": 1}), 1) - self.assertRaisesRegex(HwcModuleException, - r".* key\(q\) is not exist in dict", - navigate_value, value, ["foo", "q", "tree"]) + self.assertRaisesRegex( + HwcModuleException, r".* key\(q\) is not exist in dict", navigate_value, value, ["foo", "q", "tree"] + ) - self.assertRaisesRegex(HwcModuleException, - r".* the index is out of list", - navigate_value, value, - ["foo", "quiet", "trees"], - {"foo.quiet.trees": 2}) + self.assertRaisesRegex( + HwcModuleException, + r".* the index is out of list", + navigate_value, + value, + ["foo", "quiet", "trees"], + {"foo.quiet.trees": 2}, + ) diff --git a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py index efe71d4fac1..8b954bee5a5 100644 --- a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py +++ b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py @@ -15,22 +15,23 @@ ) module_params_creds = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'validate_certs': True, - 'auth_realm': 'master', - 'client_id': 'admin-cli', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'client_secret': None, + "auth_keycloak_url": "http://keycloak.url/auth", + "validate_certs": True, + "auth_realm": "master", + "client_id": "admin-cli", + "auth_username": "admin", + "auth_password": "admin", + "client_secret": None, } def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -38,16 +39,14 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): try: call_number = get_id_call_count.__next__() except AttributeError: # manage python 2 versions. call_number = get_id_call_count.next() - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response @@ -55,52 +54,52 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper @pytest.fixture() def mock_good_connection(mocker): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) def test_connect_to_keycloak_with_creds(mock_good_connection): keycloak_header = get_token(module_params_creds) - assert keycloak_header == { - 'Authorization': 'Bearer alongtoken', - 'Content-Type': 'application/json' - } + assert keycloak_header == {"Authorization": "Bearer alongtoken", "Content-Type": "application/json"} def test_connect_to_keycloak_with_token(mock_good_connection): module_params_token = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'validate_certs': True, - 'client_id': 'admin-cli', - 'token': "alongtoken" + "auth_keycloak_url": "http://keycloak.url/auth", + "validate_certs": True, + "client_id": "admin-cli", + "token": "alongtoken", } keycloak_header = get_token(module_params_token) - assert keycloak_header == { - 'Authorization': 'Bearer alongtoken', - 'Content-Type': 'application/json' - } + assert keycloak_header == {"Authorization": "Bearer alongtoken", "Content-Type": "application/json"} @pytest.fixture() def mock_bad_json_returned(mocker): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token":'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper('{"access_token":'), + } return mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -110,27 +109,29 @@ def test_bad_json_returned(mock_bad_json_returned): # cannot check all the message, different errors message for the value # error in python 2.6, 2.7 and 3.*. assert ( - 'API returned invalid JSON when trying to obtain access token from ' - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token: ' + "API returned invalid JSON when trying to obtain access token from " + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token: " ) in str(raised_error.value) def raise_401(url): def _raise_401(): - raise HTTPError(url=url, code=401, msg='Unauthorized', hdrs='', fp=StringIO('')) + raise HTTPError(url=url, code=401, msg="Unauthorized", hdrs="", fp=StringIO("")) + return _raise_401 @pytest.fixture() def mock_401_returned(mocker): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': raise_401( - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token'), + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": raise_401( + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token" + ), } return mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -138,20 +139,23 @@ def test_error_returned(mock_401_returned): with pytest.raises(KeycloakError) as raised_error: get_token(module_params_creds) assert str(raised_error.value) == ( - 'Could not obtain access token from http://keycloak.url' - '/auth/realms/master/protocol/openid-connect/token: ' - 'HTTP Error 401: Unauthorized' + "Could not obtain access token from http://keycloak.url" + "/auth/realms/master/protocol/openid-connect/token: " + "HTTP Error 401: Unauthorized" ) @pytest.fixture() def mock_json_without_token_returned(mocker): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"not_token": "It is not a token"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"not_token": "It is not a token"}' + ), + } return mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -159,6 +163,6 @@ def test_json_without_token_returned(mock_json_without_token_returned): with pytest.raises(KeycloakError) as raised_error: get_token(module_params_creds) assert str(raised_error.value) == ( - 'API did not include access_token field in response from ' - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token' + "API did not include access_token field in response from " + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token" ) diff --git a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py index 73928193035..ceaf719b4a7 100644 --- a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py +++ b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py @@ -11,74 +11,44 @@ class KeycloakIsStructIncludedTestCase(unittest.TestCase): dict1 = dict( - test1='test1', - test2=dict( - test1='test1', - test2='test2' - ), - test3=['test1', dict(test='test1', test2='test2')] + test1="test1", test2=dict(test1="test1", test2="test2"), test3=["test1", dict(test="test1", test2="test2")] ) dict2 = dict( - test1='test1', - test2=dict( - test1='test1', - test2='test2', - test3='test3' - ), - test3=['test1', dict(test='test1', test2='test2'), 'test3'], - test4='test4' + test1="test1", + test2=dict(test1="test1", test2="test2", test3="test3"), + test3=["test1", dict(test="test1", test2="test2"), "test3"], + test4="test4", ) dict3 = dict( - test1='test1', - test2=dict( - test1='test1', - test2='test23', - test3='test3' - ), - test3=['test1', dict(test='test1', test2='test23'), 'test3'], - test4='test4' + test1="test1", + test2=dict(test1="test1", test2="test23", test3="test3"), + test3=["test1", dict(test="test1", test2="test23"), "test3"], + test4="test4", ) dict5 = dict( - test1='test1', - test2=dict( - test1=True, - test2='test23', - test3='test3' - ), - test3=['test1', dict(test='test1', test2='test23'), 'test3'], - test4='test4' + test1="test1", + test2=dict(test1=True, test2="test23", test3="test3"), + test3=["test1", dict(test="test1", test2="test23"), "test3"], + test4="test4", ) dict6 = dict( - test1='test1', - test2=dict( - test1='true', - test2='test23', - test3='test3' - ), - test3=['test1', dict(test='test1', test2='test23'), 'test3'], - test4='test4' + test1="test1", + test2=dict(test1="true", test2="test23", test3="test3"), + test3=["test1", dict(test="test1", test2="test23"), "test3"], + test4="test4", ) dict7 = [ { - 'roles': ['view-clients', 'view-identity-providers', 'view-users', 'query-realms', 'manage-users'], - 'clientid': 'master-realm' + "roles": ["view-clients", "view-identity-providers", "view-users", "query-realms", "manage-users"], + "clientid": "master-realm", }, - { - 'roles': ['manage-account', 'view-profile', 'manage-account-links'], - 'clientid': 'account' - } + {"roles": ["manage-account", "view-profile", "manage-account-links"], "clientid": "account"}, ] dict8 = [ - { - 'roles': ['view-clients', 'query-realms', 'view-users'], - 'clientid': 'master-realm' - }, - { - 'roles': ['manage-account-links', 'view-profile', 'manage-account'], - 'clientid': 'account' - } + {"roles": ["view-clients", "query-realms", "view-users"], "clientid": "master-realm"}, + {"roles": ["manage-account-links", "view-profile", "manage-account"], "clientid": "account"}, ] def test_trivial(self): diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py index 2bdd254cd25..75a2abf40ac 100644 --- a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py +++ b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py @@ -459,9 +459,7 @@ def test_list_filtered_pritunl_organization( ): api._get_pritunl_organizations = get_pritunl_organization_mock() - response = api.list_pritunl_organizations( - **dict_merge(pritunl_settings, {"filters": org_filters}) - ) + response = api.list_pritunl_organizations(**dict_merge(pritunl_settings, {"filters": org_filters})) assert len(response) == 1 assert response[0]["name"] == org_expected @@ -470,14 +468,10 @@ def test_list_filtered_pritunl_organization( "org_id,org_user_count", [("58070daee63f3b2e6e472c36", 3)], ) - def test_list_all_pritunl_user( - self, pritunl_settings, get_pritunl_user_mock, org_id, org_user_count - ): + def test_list_all_pritunl_user(self, pritunl_settings, get_pritunl_user_mock, org_id, org_user_count): api._get_pritunl_users = get_pritunl_user_mock() - response = api.list_pritunl_users( - **dict_merge(pritunl_settings, {"organization_id": org_id}) - ) + response = api.list_pritunl_users(**dict_merge(pritunl_settings, {"organization_id": org_id})) assert len(response) == org_user_count @@ -499,9 +493,7 @@ def test_list_filtered_pritunl_user( api._get_pritunl_users = get_pritunl_user_mock() response = api.list_pritunl_users( - **dict_merge( - pritunl_settings, {"organization_id": org_id, "filters": user_filters} - ) + **dict_merge(pritunl_settings, {"organization_id": org_id, "filters": user_filters}) ) assert len(response) > 0 @@ -586,9 +578,7 @@ def test_add_and_update_pritunl_user( # Test for DELETE operation on Pritunl API @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")]) - def test_delete_pritunl_organization( - self, pritunl_settings, org_id, delete_pritunl_organization_mock - ): + def test_delete_pritunl_organization(self, pritunl_settings, org_id, delete_pritunl_organization_mock): api._delete_pritunl_organization = delete_pritunl_organization_mock() response = api.delete_pritunl_organization( @@ -602,12 +592,8 @@ def test_delete_pritunl_organization( assert response == {} - @pytest.mark.parametrize( - "org_id,user_id", [("58070daee63f3b2e6e472c36", "590add71e63f3b72d8bb951a")] - ) - def test_delete_pritunl_user( - self, pritunl_settings, org_id, user_id, delete_pritunl_user_mock - ): + @pytest.mark.parametrize("org_id,user_id", [("58070daee63f3b2e6e472c36", "590add71e63f3b72d8bb951a")]) + def test_delete_pritunl_user(self, pritunl_settings, org_id, user_id, delete_pritunl_user_mock): api._delete_pritunl_user = delete_pritunl_user_mock() response = api.delete_pritunl_user( diff --git a/tests/unit/plugins/module_utils/test_cmd_runner.py b/tests/unit/plugins/module_utils/test_cmd_runner.py index c5d6b957927..b06eeffa3d6 100644 --- a/tests/unit/plugins/module_utils/test_cmd_runner.py +++ b/tests/unit/plugins/module_utils/test_cmd_runner.py @@ -16,9 +16,24 @@ simple_boolean__true=(partial(cmd_runner_fmt.as_bool, "--superflag"), True, ["--superflag"], None), simple_boolean__false=(partial(cmd_runner_fmt.as_bool, "--superflag"), False, [], None), simple_boolean__none=(partial(cmd_runner_fmt.as_bool, "--superflag"), None, [], None), - simple_boolean_both__true=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), True, ["--superflag"], None), - simple_boolean_both__false=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), False, ["--falseflag"], None), - simple_boolean_both__none=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), None, ["--falseflag"], None), + simple_boolean_both__true=( + partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), + True, + ["--superflag"], + None, + ), + simple_boolean_both__false=( + partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), + False, + ["--falseflag"], + None, + ), + simple_boolean_both__none=( + partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag"), + None, + ["--falseflag"], + None, + ), simple_boolean_both__none_ig=(partial(cmd_runner_fmt.as_bool, "--superflag", "--falseflag", True), None, [], None), simple_boolean_not__true=(partial(cmd_runner_fmt.as_bool_not, "--superflag"), True, [], None), simple_boolean_not__false=(partial(cmd_runner_fmt.as_bool_not, "--superflag"), False, ["--superflag"], None), @@ -36,21 +51,56 @@ simple_list_min_len_fail=(partial(cmd_runner_fmt.as_list, min_len=10), 42, None, ValueError), simple_list_max_len_ok=(partial(cmd_runner_fmt.as_list, max_len=1), 42, ["42"], None), simple_list_max_len_fail=(partial(cmd_runner_fmt.as_list, max_len=2), [42, 42, 42], None, ValueError), - simple_map=(partial(cmd_runner_fmt.as_map, {'a': 1, 'b': 2, 'c': 3}), 'b', ["2"], None), - simple_fixed_true=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), True, ["--always-here", "--forever"], None), - simple_fixed_false=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), False, ["--always-here", "--forever"], None), - simple_fixed_none=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), None, ["--always-here", "--forever"], None), - simple_fixed_str=(partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), "something", ["--always-here", "--forever"], None), - stack_optval__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_optval), "-t"), ["potatoes", "bananas"], ["-tpotatoes", "-tbananas"], None), - stack_opt_val__str=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val), "-t"), ["potatoes", "bananas"], ["-t", "potatoes", "-t", "bananas"], None), - stack_opt_eq_val__int=(partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_eq_val), "--answer"), [42, 17], ["--answer=42", "--answer=17"], None), + simple_map=(partial(cmd_runner_fmt.as_map, {"a": 1, "b": 2, "c": 3}), "b", ["2"], None), + simple_fixed_true=( + partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), + True, + ["--always-here", "--forever"], + None, + ), + simple_fixed_false=( + partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), + False, + ["--always-here", "--forever"], + None, + ), + simple_fixed_none=( + partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), + None, + ["--always-here", "--forever"], + None, + ), + simple_fixed_str=( + partial(cmd_runner_fmt.as_fixed, ["--always-here", "--forever"]), + "something", + ["--always-here", "--forever"], + None, + ), + stack_optval__str=( + partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_optval), "-t"), + ["potatoes", "bananas"], + ["-tpotatoes", "-tbananas"], + None, + ), + stack_opt_val__str=( + partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val), "-t"), + ["potatoes", "bananas"], + ["-t", "potatoes", "-t", "bananas"], + None, + ), + stack_opt_eq_val__int=( + partial(cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_eq_val), "--answer"), + [42, 17], + ["--answer=42", "--answer=17"], + None, + ), ) TC_FORMATS_IDS = sorted(TC_FORMATS.keys()) -@pytest.mark.parametrize('func, value, expected, exception', - (TC_FORMATS[tc] for tc in TC_FORMATS_IDS), - ids=TC_FORMATS_IDS) +@pytest.mark.parametrize( + "func, value, expected, exception", (TC_FORMATS[tc] for tc in TC_FORMATS_IDS), ids=TC_FORMATS_IDS +) def test_arg_format(func, value, expected, exception): fmt_func = func() try: @@ -120,14 +170,14 @@ def test_arg_format(func, value, expected, exception): bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/testing", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -137,15 +187,15 @@ def test_arg_format(func, value, expected, exception): aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), - runner_init_args=dict(default_args_order=['bb', 'aa']), + runner_init_args=dict(default_args_order=["bb", "aa"]), runner_ctx_args=dict(), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--bb-here', '--answer=11'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('bb', 'aa'), + cmd=["/mock/bin/testing", "--bb-here", "--answer=11"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("bb", "aa"), ), ), ), @@ -155,15 +205,15 @@ def test_arg_format(func, value, expected, exception): aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), - runner_init_args=dict(default_args_order=['bb', 'aa']), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_init_args=dict(default_args_order=["bb", "aa"]), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/testing", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -174,12 +224,12 @@ def test_arg_format(func, value, expected, exception): bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(), - runner_ctx_args=dict(args_order=['aa', 'bb', 'aa']), + runner_ctx_args=dict(args_order=["aa", "bb", "aa"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', '--bb-here', '--answer=11'], + cmd=["/mock/bin/testing", "--answer=11", "--bb-here", "--answer=11"], ), ), ), @@ -189,18 +239,17 @@ def test_arg_format(func, value, expected, exception): aa=dict(type="int", value=11, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), - runner_init_args=dict(default_args_order=['bb', 'aa']), + runner_init_args=dict(default_args_order=["bb", "aa"]), runner_ctx_args=dict( - args_order=['aa', 'bb'], - output_process=lambda rc, out, err: f"{rc!s}-/-{out}-/-{err}" + args_order=["aa", "bb"], output_process=lambda rc, out, err: f"{rc!s}-/-{out}-/-{err}" ), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="ni", err="nu"), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', '--bb-here'], + cmd=["/mock/bin/testing", "--answer=11", "--bb-here"], ), - results="0-/-ni-/-nu" + results="0-/-ni-/-nu", ), ), aa_bb_with_none=( @@ -209,15 +258,15 @@ def test_arg_format(func, value, expected, exception): aa=dict(type="int", value=49, fmt_func=cmd_runner_fmt.as_opt_eq_val, fmt_arg="--answer"), bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), - runner_init_args=dict(default_args_order=['bb', 'aa']), + runner_init_args=dict(default_args_order=["bb", "aa"]), runner_ctx_args=dict( - args_order=['aa', 'bb'], + args_order=["aa", "bb"], ), ), dict(runner_ctx_run_args=dict(bb=None), rc=0, out="ni", err="nu"), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=49'], + cmd=["/mock/bin/testing", "--answer=49"], ), ), ), @@ -228,14 +277,14 @@ def test_arg_format(func, value, expected, exception): bb=dict(fmt_func=cmd_runner_fmt.as_fixed, fmt_arg=["fixed", "args"]), ), runner_init_args=dict(), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', 'fixed', 'args'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/testing", "--answer=11", "fixed", "args"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -246,14 +295,14 @@ def test_arg_format(func, value, expected, exception): bb=dict(fmt_func=cmd_runner_fmt.as_map, fmt_arg={"v1": 111, "v2": 222}), ), runner_init_args=dict(), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb="v2"), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11', '222'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/testing", "--answer=11", "222"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -264,14 +313,14 @@ def test_arg_format(func, value, expected, exception): bb=dict(fmt_func=cmd_runner_fmt.as_map, fmt_arg={"v1": 111, "v2": 222}), ), runner_init_args=dict(), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb="v123456789"), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/testing', '--answer=11'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/testing", "--answer=11"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -279,69 +328,64 @@ def test_arg_format(func, value, expected, exception): TC_RUNNER_IDS = sorted(TC_RUNNER.keys()) -@pytest.mark.parametrize('runner_input, cmd_execution, expected', - (TC_RUNNER[tc] for tc in TC_RUNNER_IDS), - ids=TC_RUNNER_IDS) +@pytest.mark.parametrize( + "runner_input, cmd_execution, expected", (TC_RUNNER[tc] for tc in TC_RUNNER_IDS), ids=TC_RUNNER_IDS +) def test_runner_context(runner_input, cmd_execution, expected): arg_spec = {} params = {} arg_formats = {} - for k, v in runner_input['args_bundle'].items(): + for k, v in runner_input["args_bundle"].items(): try: - arg_spec[k] = {'type': v['type']} + arg_spec[k] = {"type": v["type"]} except KeyError: pass try: - params[k] = v['value'] + params[k] = v["value"] except KeyError: pass try: - arg_formats[k] = v['fmt_func'](v['fmt_arg']) + arg_formats[k] = v["fmt_func"](v["fmt_arg"]) except KeyError: pass - orig_results = tuple(cmd_execution[x] for x in ('rc', 'out', 'err')) + orig_results = tuple(cmd_execution[x] for x in ("rc", "out", "err")) print(f"arg_spec={arg_spec}\nparams={params}\narg_formats={arg_formats}\n") module = MagicMock() type(module).argument_spec = PropertyMock(return_value=arg_spec) type(module).params = PropertyMock(return_value=params) - module.get_bin_path.return_value = '/mock/bin/testing' + module.get_bin_path.return_value = "/mock/bin/testing" module.run_command.return_value = orig_results - runner = CmdRunner( - module=module, - command="testing", - arg_formats=arg_formats, - **runner_input['runner_init_args'] - ) + runner = CmdRunner(module=module, command="testing", arg_formats=arg_formats, **runner_input["runner_init_args"]) def _assert_run_info(actual, expected): reduced = {k: actual[k] for k in expected.keys()} assert reduced == expected, f"{reduced}" def _assert_run(runner_input, cmd_execution, expected, ctx, results): - _assert_run_info(ctx.run_info, expected['run_info']) - assert results == expected.get('results', orig_results) + _assert_run_info(ctx.run_info, expected["run_info"]) + assert results == expected.get("results", orig_results) exc = expected.get("exc") if exc: with pytest.raises(exc): - with runner.context(**runner_input['runner_ctx_args']) as ctx: - results = ctx.run(**cmd_execution['runner_ctx_run_args']) + with runner.context(**runner_input["runner_ctx_args"]) as ctx: + results = ctx.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(runner_input, cmd_execution, expected, ctx, results) with pytest.raises(exc): - with runner(**runner_input['runner_ctx_args']) as ctx2: - results2 = ctx2.run(**cmd_execution['runner_ctx_run_args']) + with runner(**runner_input["runner_ctx_args"]) as ctx2: + results2 = ctx2.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(runner_input, cmd_execution, expected, ctx2, results2) else: - with runner.context(**runner_input['runner_ctx_args']) as ctx: - results = ctx.run(**cmd_execution['runner_ctx_run_args']) + with runner.context(**runner_input["runner_ctx_args"]) as ctx: + results = ctx.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(runner_input, cmd_execution, expected, ctx, results) - with runner(**runner_input['runner_ctx_args']) as ctx2: - results2 = ctx2.run(**cmd_execution['runner_ctx_run_args']) + with runner(**runner_input["runner_ctx_args"]) as ctx2: + results2 = ctx2.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(runner_input, cmd_execution, expected, ctx2, results2) diff --git a/tests/unit/plugins/module_utils/test_csv.py b/tests/unit/plugins/module_utils/test_csv.py index 918413f77d4..bbef9ee4a5d 100644 --- a/tests/unit/plugins/module_utils/test_csv.py +++ b/tests/unit/plugins/module_utils/test_csv.py @@ -1,4 +1,3 @@ - # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -14,7 +13,7 @@ VALID_CSV = [ ( - 'excel', + "excel", {}, None, "id,name,role\n1,foo,bar\n2,bar,baz", @@ -29,10 +28,10 @@ "name": "bar", "role": "baz", }, - ] + ], ), ( - 'excel', + "excel", {"skipinitialspace": True}, None, "id,name,role\n1, foo, bar\n2, bar, baz", @@ -47,11 +46,11 @@ "name": "bar", "role": "baz", }, - ] + ], ), ( - 'excel', - {"delimiter": '|'}, + "excel", + {"delimiter": "|"}, None, "id|name|role\n1|foo|bar\n2|bar|baz", [ @@ -65,10 +64,10 @@ "name": "bar", "role": "baz", }, - ] + ], ), ( - 'unix', + "unix", {}, None, "id,name,role\n1,foo,bar\n2,bar,baz", @@ -83,12 +82,12 @@ "name": "bar", "role": "baz", }, - ] + ], ), ( - 'excel', + "excel", {}, - ['id', 'name', 'role'], + ["id", "name", "role"], "1,foo,bar\n2,bar,baz", [ { @@ -101,14 +100,14 @@ "name": "bar", "role": "baz", }, - ] + ], ), ] INVALID_CSV = [ ( - 'excel', - {'strict': True}, + "excel", + {"strict": True}, None, 'id,name,role\n1,"f"oo",bar\n2,bar,baz', ), @@ -116,7 +115,7 @@ INVALID_DIALECT: list[tuple[str, t.Any, t.Any, str]] = [ ( - 'invalid', + "invalid", {}, None, "id,name,role\n1,foo,bar\n2,bar,baz", diff --git a/tests/unit/plugins/module_utils/test_database.py b/tests/unit/plugins/module_utils/test_database.py index 0f7f493b3ae..0adaf282804 100644 --- a/tests/unit/plugins/module_utils/test_database.py +++ b/tests/unit/plugins/module_utils/test_database.py @@ -19,15 +19,13 @@ '"public.table"': '"public.table"', '"public"."table"': '"public"."table"', '"schema test"."table test"': '"schema test"."table test"', - # We quote part - 'public.table': '"public"."table"', + "public.table": '"public"."table"', '"public".table': '"public"."table"', 'public."table"': '"public"."table"', - 'schema test.table test': '"schema test"."table test"', + "schema test.table test": '"schema test"."table test"', '"schema test".table test': '"schema test"."table test"', 'schema test."table test"': '"schema test"."table test"', - # Embedded double quotes 'table "test"': '"table ""test"""', 'public."table ""test"""': '"public"."table ""test"""', @@ -40,7 +38,6 @@ 'schema."table': '"schema"."""table"', '"schema.table': '"""schema"."table"', 'schema."table.something': '"schema"."""table"."something"', - # Embedded dots '"schema.test"."table.test"': '"schema.test"."table.test"', '"schema.".table': '"schema."."table"', @@ -50,61 +47,61 @@ '"schema.".".table"': '"schema.".".table"', # These are valid but maybe not what the user intended '."table"': '".""table"""', - 'table.': '"table."', + "table.": '"table."', } INVALID = { - ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots', - ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots', - ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots', - ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots", - ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots", - ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots", - ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots", - ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes', - ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes', - ('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes', - ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot', + ("test.too.many.dots", "table"): "PostgreSQL does not support table with more than 3 dots", + ('"test.too".many.dots', "database"): "PostgreSQL does not support database with more than 1 dots", + ('test.too."many.dots"', "database"): "PostgreSQL does not support database with more than 1 dots", + ('"test"."too"."many"."dots"', "database"): "PostgreSQL does not support database with more than 1 dots", + ('"test"."too"."many"."dots"', "schema"): "PostgreSQL does not support schema with more than 2 dots", + ('"test"."too"."many"."dots"', "table"): "PostgreSQL does not support table with more than 3 dots", + ('"test"."too"."many"."dots"."for"."column"', "column"): "PostgreSQL does not support column with more than 4 dots", + ('"table "invalid" double quote"', "table"): "User escaped identifiers must escape extra quotes", + ('"schema "invalid"""."table "invalid"', "table"): "User escaped identifiers must escape extra quotes", + ('"schema."table"', "table"): "User escaped identifiers must escape extra quotes", + ('"schema".', "table"): "Identifier name unspecified or unquoted trailing dot", } HOW_MANY_DOTS = ( - ('role', 'role', '"role"', - 'PostgreSQL does not support role with more than 1 dots'), - ('db', 'database', '"db"', - 'PostgreSQL does not support database with more than 1 dots'), - ('db.schema', 'schema', '"db"."schema"', - 'PostgreSQL does not support schema with more than 2 dots'), - ('db.schema.table', 'table', '"db"."schema"."table"', - 'PostgreSQL does not support table with more than 3 dots'), - ('db.schema.table.column', 'column', '"db"."schema"."table"."column"', - 'PostgreSQL does not support column with more than 4 dots'), + ("role", "role", '"role"', "PostgreSQL does not support role with more than 1 dots"), + ("db", "database", '"db"', "PostgreSQL does not support database with more than 1 dots"), + ("db.schema", "schema", '"db"."schema"', "PostgreSQL does not support schema with more than 2 dots"), + ("db.schema.table", "table", '"db"."schema"."table"', "PostgreSQL does not support table with more than 3 dots"), + ( + "db.schema.table.column", + "column", + '"db"."schema"."table"."column"', + "PostgreSQL does not support column with more than 4 dots", + ), ) VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID)) INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID)) IS_STRINGS_DANGEROUS = ( - ('', False), - (' ', False), - ('alternative database', False), - ('backup of TRUNCATED table', False), - ('bob.dropper', False), - ('d\'artagnan', False), - ('user_with_select_update_truncate_right', False), - (';DROP DATABASE fluffy_pets_photos', True), - (';drop DATABASE fluffy_pets_photos', True), - ('; TRUNCATE TABLE his_valuable_table', True), - ('; truncate TABLE his_valuable_table', True), - ('\'--', True), + ("", False), + (" ", False), + ("alternative database", False), + ("backup of TRUNCATED table", False), + ("bob.dropper", False), + ("d'artagnan", False), + ("user_with_select_update_truncate_right", False), + (";DROP DATABASE fluffy_pets_photos", True), + (";drop DATABASE fluffy_pets_photos", True), + ("; TRUNCATE TABLE his_valuable_table", True), + ("; truncate TABLE his_valuable_table", True), + ("'--", True), ('"--', True), - ('\' union select username, password from admin_credentials', True), - ('\' UNION SELECT username, password from admin_credentials', True), - ('\' intersect select', True), - ('\' INTERSECT select', True), - ('\' except select', True), - ('\' EXCEPT select', True), - (';ALTER TABLE prices', True), - (';alter table prices', True), + ("' union select username, password from admin_credentials", True), + ("' UNION SELECT username, password from admin_credentials", True), + ("' intersect select", True), + ("' INTERSECT select", True), + ("' except select", True), + ("' EXCEPT select", True), + (";ALTER TABLE prices", True), + (";alter table prices", True), ("; UPDATE products SET price = '0'", True), (";update products SET price = '0'", True), ("; DELETE FROM products", True), @@ -116,7 +113,7 @@ @pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES) def test_valid_quotes(identifier, quoted_identifier): - assert pg_quote_identifier(identifier, 'table') == quoted_identifier + assert pg_quote_identifier(identifier, "table") == quoted_identifier @pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES) @@ -132,7 +129,7 @@ def test_how_many_dots(identifier, id_type, quoted_identifier, msg): assert pg_quote_identifier(identifier, id_type) == quoted_identifier with pytest.raises(SQLParseError) as ex: - pg_quote_identifier(f'{identifier}.more', id_type) + pg_quote_identifier(f"{identifier}.more", id_type) ex.match(msg) diff --git a/tests/unit/plugins/module_utils/test_known_hosts.py b/tests/unit/plugins/module_utils/test_known_hosts.py index 6327e793ffa..b4c73c475a5 100644 --- a/tests/unit/plugins/module_utils/test_known_hosts.py +++ b/tests/unit/plugins/module_utils/test_known_hosts.py @@ -11,89 +11,94 @@ URLS = { - 'ssh://one.example.org/example.git': { - 'is_ssh_url': True, - 'get_fqdn': 'one.example.org', - 'add_host_key_cmd': " -t rsa one.example.org", - 'port': None, + "ssh://one.example.org/example.git": { + "is_ssh_url": True, + "get_fqdn": "one.example.org", + "add_host_key_cmd": " -t rsa one.example.org", + "port": None, }, - 'ssh+git://two.example.org/example.git': { - 'is_ssh_url': True, - 'get_fqdn': 'two.example.org', - 'add_host_key_cmd': " -t rsa two.example.org", - 'port': None, + "ssh+git://two.example.org/example.git": { + "is_ssh_url": True, + "get_fqdn": "two.example.org", + "add_host_key_cmd": " -t rsa two.example.org", + "port": None, }, - 'rsync://three.example.org/user/example.git': { - 'is_ssh_url': False, - 'get_fqdn': 'three.example.org', - 'add_host_key_cmd': None, # not called for non-ssh urls - 'port': None, + "rsync://three.example.org/user/example.git": { + "is_ssh_url": False, + "get_fqdn": "three.example.org", + "add_host_key_cmd": None, # not called for non-ssh urls + "port": None, }, - 'git@four.example.org:user/example.git': { - 'is_ssh_url': True, - 'get_fqdn': 'four.example.org', - 'add_host_key_cmd': " -t rsa four.example.org", - 'port': None, + "git@four.example.org:user/example.git": { + "is_ssh_url": True, + "get_fqdn": "four.example.org", + "add_host_key_cmd": " -t rsa four.example.org", + "port": None, }, - 'git+ssh://five.example.org/example.git': { - 'is_ssh_url': True, - 'get_fqdn': 'five.example.org', - 'add_host_key_cmd': " -t rsa five.example.org", - 'port': None, + "git+ssh://five.example.org/example.git": { + "is_ssh_url": True, + "get_fqdn": "five.example.org", + "add_host_key_cmd": " -t rsa five.example.org", + "port": None, }, - 'ssh://six.example.org:21/example.org': { + "ssh://six.example.org:21/example.org": { # ssh on FTP Port? - 'is_ssh_url': True, - 'get_fqdn': 'six.example.org', - 'add_host_key_cmd': " -t rsa -p 21 six.example.org", - 'port': '21', + "is_ssh_url": True, + "get_fqdn": "six.example.org", + "add_host_key_cmd": " -t rsa -p 21 six.example.org", + "port": "21", }, - 'ssh://[2001:DB8::abcd:abcd]/example.git': { - 'is_ssh_url': True, - 'get_fqdn': '[2001:DB8::abcd:abcd]', - 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]", - 'port': None, + "ssh://[2001:DB8::abcd:abcd]/example.git": { + "is_ssh_url": True, + "get_fqdn": "[2001:DB8::abcd:abcd]", + "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", + "port": None, }, - 'ssh://[2001:DB8::abcd:abcd]:22/example.git': { - 'is_ssh_url': True, - 'get_fqdn': '[2001:DB8::abcd:abcd]', - 'add_host_key_cmd': " -t rsa -p 22 [2001:DB8::abcd:abcd]", - 'port': '22', + "ssh://[2001:DB8::abcd:abcd]:22/example.git": { + "is_ssh_url": True, + "get_fqdn": "[2001:DB8::abcd:abcd]", + "add_host_key_cmd": " -t rsa -p 22 [2001:DB8::abcd:abcd]", + "port": "22", }, - 'username@[2001:DB8::abcd:abcd]/example.git': { - 'is_ssh_url': True, - 'get_fqdn': '[2001:DB8::abcd:abcd]', - 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]", - 'port': None, + "username@[2001:DB8::abcd:abcd]/example.git": { + "is_ssh_url": True, + "get_fqdn": "[2001:DB8::abcd:abcd]", + "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", + "port": None, }, - 'username@[2001:DB8::abcd:abcd]:path/example.git': { - 'is_ssh_url': True, - 'get_fqdn': '[2001:DB8::abcd:abcd]', - 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]", - 'port': None, + "username@[2001:DB8::abcd:abcd]:path/example.git": { + "is_ssh_url": True, + "get_fqdn": "[2001:DB8::abcd:abcd]", + "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", + "port": None, }, - 'ssh://internal.git.server:7999/repos/repo.git': { - 'is_ssh_url': True, - 'get_fqdn': 'internal.git.server', - 'add_host_key_cmd': " -t rsa -p 7999 internal.git.server", - 'port': '7999', + "ssh://internal.git.server:7999/repos/repo.git": { + "is_ssh_url": True, + "get_fqdn": "internal.git.server", + "add_host_key_cmd": " -t rsa -p 7999 internal.git.server", + "port": "7999", }, } -@pytest.mark.parametrize('url, is_ssh_url', ((k, URLS[k]['is_ssh_url']) for k in sorted(URLS))) +@pytest.mark.parametrize("url, is_ssh_url", ((k, URLS[k]["is_ssh_url"]) for k in sorted(URLS))) def test_is_ssh_url(url, is_ssh_url): assert known_hosts.is_ssh_url(url) == is_ssh_url -@pytest.mark.parametrize('url, fqdn, port', ((k, URLS[k]['get_fqdn'], URLS[k]['port']) for k in sorted(URLS))) +@pytest.mark.parametrize("url, fqdn, port", ((k, URLS[k]["get_fqdn"], URLS[k]["port"]) for k in sorted(URLS))) def test_get_fqdn_and_port(url, fqdn, port): assert known_hosts.get_fqdn_and_port(url) == (fqdn, port) -@pytest.mark.parametrize('fqdn, port, add_host_key_cmd', - ((URLS[k]['get_fqdn'], URLS[k]['port'], URLS[k]['add_host_key_cmd']) - for k in sorted(URLS) if URLS[k]['is_ssh_url'])) +@pytest.mark.parametrize( + "fqdn, port, add_host_key_cmd", + ( + (URLS[k]["get_fqdn"], URLS[k]["port"], URLS[k]["add_host_key_cmd"]) + for k in sorted(URLS) + if URLS[k]["is_ssh_url"] + ), +) def test_add_host_key(mocker, fqdn, port, add_host_key_cmd): am = mocker.MagicMock() @@ -109,8 +114,8 @@ def test_add_host_key(mocker, fqdn, port, add_host_key_cmd): append_to_file.return_value = (None,) am.append_to_file = append_to_file - mocker.patch('os.path.isdir', return_value=True) - mocker.patch('os.path.exists', return_value=True) + mocker.patch("os.path.isdir", return_value=True) + mocker.patch("os.path.exists", return_value=True) known_hosts.add_host_key(am, fqdn, port=port) run_command.assert_called_with(keyscan_cmd + add_host_key_cmd) diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index acf17f45615..5719de17fe3 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -7,9 +7,7 @@ import pytest -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - cause_changes -) +from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes # @@ -17,7 +15,7 @@ # Parameters on_success and on_failure are deprecated and will be removed in community.general 12.0.0 # Remove testcases with those params when releasing 12.0.0 # -CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed'] +CAUSE_CHG_DECO_PARAMS = ["deco_args", "expect_exception", "expect_changed"] CAUSE_CHG_DECO = dict( none_succ=dict(deco_args={}, expect_exception=False, expect_changed=None), none_fail=dict(deco_args={}, expect_exception=True, expect_changed=None), @@ -31,13 +29,12 @@ CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) -@pytest.mark.parametrize(CAUSE_CHG_DECO_PARAMS, - [[CAUSE_CHG_DECO[tc][param] - for param in CAUSE_CHG_DECO_PARAMS] - for tc in CAUSE_CHG_DECO_IDS], - ids=CAUSE_CHG_DECO_IDS) +@pytest.mark.parametrize( + CAUSE_CHG_DECO_PARAMS, + [[CAUSE_CHG_DECO[tc][param] for param in CAUSE_CHG_DECO_PARAMS] for tc in CAUSE_CHG_DECO_IDS], + ids=CAUSE_CHG_DECO_IDS, +) def test_cause_changes_deco(deco_args, expect_exception, expect_changed): - class MockMH: changed = None diff --git a/tests/unit/plugins/module_utils/test_ocapi_utils.py b/tests/unit/plugins/module_utils/test_ocapi_utils.py index 2d8d11c0711..7fb0a491faf 100644 --- a/tests/unit/plugins/module_utils/test_ocapi_utils.py +++ b/tests/unit/plugins/module_utils/test_ocapi_utils.py @@ -16,11 +16,13 @@ class TestOcapiUtils(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() - self.utils = OcapiUtils(creds={"user": "a_user", "pswd": "a_password"}, - base_uri="fakeUri", - proxy_slot_number=None, - timeout=30, - module=None) + self.utils = OcapiUtils( + creds={"user": "a_user", "pswd": "a_password"}, + base_uri="fakeUri", + proxy_slot_number=None, + timeout=30, + module=None, + ) def tearDown(self): shutil.rmtree(self.tempdir) @@ -29,8 +31,8 @@ def test_prepare_multipart_firmware_upload(self): # Generate a binary file and save it filename = "fake_firmware.bin" filepath = os.path.join(self.tempdir, filename) - file_contents = b'\x00\x01\x02\x03\x04' - with open(filepath, 'wb+') as f: + file_contents = b"\x00\x01\x02\x03\x04" + with open(filepath, "wb+") as f: f.write(file_contents) # Call prepare_mutipart_firmware_upload @@ -43,10 +45,10 @@ def test_prepare_multipart_firmware_upload(self): # Check the returned binary data boundary = m.group(1) - expected_content_text = f'--{boundary}\r\n' + expected_content_text = f"--{boundary}\r\n" expected_content_text += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{filename}"\r\n' - expected_content_text += 'Content-Type: application/octet-stream\r\n\r\n' - expected_content_bytes = bytearray(expected_content_text, 'utf-8') + expected_content_text += "Content-Type: application/octet-stream\r\n\r\n" + expected_content_bytes = bytearray(expected_content_text, "utf-8") expected_content_bytes += file_contents - expected_content_bytes += bytearray(f'\r\n--{boundary}--', 'utf-8') + expected_content_bytes += bytearray(f"\r\n--{boundary}--", "utf-8") self.assertEqual(expected_content_bytes, b_form_data) diff --git a/tests/unit/plugins/module_utils/test_opennebula.py b/tests/unit/plugins/module_utils/test_opennebula.py index 7720a4c3e5d..4a30394e2e3 100644 --- a/tests/unit/plugins/module_utils/test_opennebula.py +++ b/tests/unit/plugins/module_utils/test_opennebula.py @@ -12,36 +12,12 @@ FLATTEN_VALID = [ - ( - [[[1]], [2], 3], - False, - [1, 2, 3] - ), - ( - [[[1]], [2], 3], - True, - [1, 2, 3] - ), - ( - [[1]], - False, - [1] - ), - ( - [[1]], - True, - 1 - ), - ( - 1, - False, - [1] - ), - ( - 1, - True, - 1 - ), + ([[[1]], [2], 3], False, [1, 2, 3]), + ([[[1]], [2], 3], True, [1, 2, 3]), + ([[1]], False, [1]), + ([[1]], True, 1), + (1, False, [1]), + (1, True, 1), ] RENDER_VALID = [ @@ -51,11 +27,11 @@ "CPU": 1, "MEMORY": 1024, }, - textwrap.dedent(''' + textwrap.dedent(""" CPU="1" MEMORY="1024" NIC=[NAME="NIC0",NETWORK_ID="0"] - ''').strip() + """).strip(), ), ( { @@ -66,35 +42,35 @@ "CPU": 1, "MEMORY": 1024, }, - textwrap.dedent(''' + textwrap.dedent(""" CPU="1" MEMORY="1024" NIC=[NAME="NIC0",NETWORK_ID="0"] NIC=[NAME="NIC1",NETWORK_ID="1"] - ''').strip() + """).strip(), ), ( { - 'EMPTY_VALUE': None, - 'SCHED_REQUIREMENTS': 'CLUSTER_ID="100"', - 'BACKSLASH_ESCAPED': "this is escaped: \\n; this isn't: \"\nend", + "EMPTY_VALUE": None, + "SCHED_REQUIREMENTS": 'CLUSTER_ID="100"', + "BACKSLASH_ESCAPED": "this is escaped: \\n; this isn't: \"\nend", }, - textwrap.dedent(''' + textwrap.dedent(""" BACKSLASH_ESCAPED="this is escaped: \\\\n; this isn't: \\" end" SCHED_REQUIREMENTS="CLUSTER_ID=\\"100\\"" - ''').strip() + """).strip(), ), ] -@pytest.mark.parametrize('to_flatten,extract,expected_result', FLATTEN_VALID) +@pytest.mark.parametrize("to_flatten,extract,expected_result", FLATTEN_VALID) def test_flatten(to_flatten, extract, expected_result): result = flatten(to_flatten, extract) assert result == expected_result, repr(result) -@pytest.mark.parametrize('to_render,expected_result', RENDER_VALID) +@pytest.mark.parametrize("to_render,expected_result", RENDER_VALID) def test_render(to_render, expected_result): result = render(to_render) assert result == expected_result, repr(result) diff --git a/tests/unit/plugins/module_utils/test_python_runner.py b/tests/unit/plugins/module_utils/test_python_runner.py index 4c0bc109c4a..bc18c983a91 100644 --- a/tests/unit/plugins/module_utils/test_python_runner.py +++ b/tests/unit/plugins/module_utils/test_python_runner.py @@ -70,14 +70,14 @@ bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(command="testing"), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/python', 'testing', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/python", "testing", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -88,14 +88,14 @@ bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(command="toasting", python="python3"), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/mock/bin/python3', 'toasting', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/mock/bin/python3", "toasting", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -106,14 +106,14 @@ bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(command="toasting", python="/crazy/local/bin/python3"), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/crazy/local/bin/python3', 'toasting', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}, - args_order=('aa', 'bb'), + cmd=["/crazy/local/bin/python3", "toasting", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + args_order=("aa", "bb"), ), ), ), @@ -124,14 +124,14 @@ bb=dict(fmt_func=cmd_runner_fmt.as_bool, fmt_arg="--bb-here"), ), runner_init_args=dict(command="toasting", venv="/venv"), - runner_ctx_args=dict(args_order=['aa', 'bb']), + runner_ctx_args=dict(args_order=["aa", "bb"]), ), dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""), dict( run_info=dict( - cmd=['/venv/bin/python', 'toasting', '--answer=11', '--bb-here'], - environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C', 'VIRTUAL_ENV': '/venv', 'PATH': '/venv/bin'}, - args_order=('aa', 'bb'), + cmd=["/venv/bin/python", "toasting", "--answer=11", "--bb-here"], + environ_update={"LANGUAGE": "C", "LC_ALL": "C", "VIRTUAL_ENV": "/venv", "PATH": "/venv/bin"}, + args_order=("aa", "bb"), ), ), ), @@ -139,28 +139,28 @@ TC_RUNNER_IDS = sorted(TC_RUNNER.keys()) -@pytest.mark.parametrize('runner_input, cmd_execution, expected', - (TC_RUNNER[tc] for tc in TC_RUNNER_IDS), - ids=TC_RUNNER_IDS) +@pytest.mark.parametrize( + "runner_input, cmd_execution, expected", (TC_RUNNER[tc] for tc in TC_RUNNER_IDS), ids=TC_RUNNER_IDS +) def test_runner_context(runner_input, cmd_execution, expected): arg_spec = {} params = {} arg_formats = {} - for k, v in runner_input['args_bundle'].items(): + for k, v in runner_input["args_bundle"].items(): try: - arg_spec[k] = {'type': v['type']} + arg_spec[k] = {"type": v["type"]} except KeyError: pass try: - params[k] = v['value'] + params[k] = v["value"] except KeyError: pass try: - arg_formats[k] = v['fmt_func'](v['fmt_arg']) + arg_formats[k] = v["fmt_func"](v["fmt_arg"]) except KeyError: pass - orig_results = tuple(cmd_execution[x] for x in ('rc', 'out', 'err')) + orig_results = tuple(cmd_execution[x] for x in ("rc", "out", "err")) print(f"arg_spec={arg_spec}\nparams={params}\narg_formats={arg_formats}\n") @@ -170,24 +170,16 @@ def test_runner_context(runner_input, cmd_execution, expected): module.get_bin_path.return_value = os.path.join( runner_input["runner_init_args"].get("venv", "/mock"), "bin", - runner_input["runner_init_args"].get("python", "python") + runner_input["runner_init_args"].get("python", "python"), ) module.run_command.return_value = orig_results - runner = PythonRunner( - module=module, - arg_formats=arg_formats, - **runner_input['runner_init_args'] - ) + runner = PythonRunner(module=module, arg_formats=arg_formats, **runner_input["runner_init_args"]) def _extract_path(run_info): path = run_info.get("environ_update", {}).get("PATH") if path is not None: - run_info["environ_update"] = { - k: v - for k, v in run_info["environ_update"].items() - if k != "PATH" - } + run_info["environ_update"] = {k: v for k, v in run_info["environ_update"].items() if k != "PATH"} return run_info, path def _assert_run_info_env_path(actual, expected): @@ -203,17 +195,17 @@ def _assert_run_info(actual, expected): assert reduced == expected, f"{reduced}" def _assert_run(expected, ctx, results): - _assert_run_info(ctx.run_info, expected['run_info']) - assert results == expected.get('results', orig_results) + _assert_run_info(ctx.run_info, expected["run_info"]) + assert results == expected.get("results", orig_results) exc = expected.get("exc") if exc: with pytest.raises(exc): - with runner.context(**runner_input['runner_ctx_args']) as ctx: - results = ctx.run(**cmd_execution['runner_ctx_run_args']) + with runner.context(**runner_input["runner_ctx_args"]) as ctx: + results = ctx.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(expected, ctx, results) else: - with runner.context(**runner_input['runner_ctx_args']) as ctx: - results = ctx.run(**cmd_execution['runner_ctx_run_args']) + with runner.context(**runner_input["runner_ctx_args"]) as ctx: + results = ctx.run(**cmd_execution["runner_ctx_run_args"]) _assert_run(expected, ctx, results) diff --git a/tests/unit/plugins/module_utils/test_saslprep.py b/tests/unit/plugins/module_utils/test_saslprep.py index 8feb209e23e..cc6229b8e42 100644 --- a/tests/unit/plugins/module_utils/test_saslprep.py +++ b/tests/unit/plugins/module_utils/test_saslprep.py @@ -11,45 +11,45 @@ VALID = [ - ('', ''), - ('\u00A0', ' '), - ('a', 'a'), - ('й', 'й'), - ('\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9', '\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9'), - ('The\u00ADM\u00AAtr\u2168', 'TheMatrIX'), - ('I\u00ADX', 'IX'), - ('user', 'user'), - ('USER', 'USER'), - ('\u00AA', 'a'), - ('\u2168', 'IX'), - ('\u05BE\u00A0\u05BE', '\u05BE\u0020\u05BE'), + ("", ""), + ("\u00a0", " "), + ("a", "a"), + ("й", "й"), + ("\u30de\u30c8\u30ea\u30c3\u30af\u30b9", "\u30de\u30c8\u30ea\u30c3\u30af\u30b9"), + ("The\u00adM\u00aatr\u2168", "TheMatrIX"), + ("I\u00adX", "IX"), + ("user", "user"), + ("USER", "USER"), + ("\u00aa", "a"), + ("\u2168", "IX"), + ("\u05be\u00a0\u05be", "\u05be\u0020\u05be"), ] INVALID = [ (None, TypeError), - (b'', TypeError), - ('\u0221', ValueError), - ('\u0007', ValueError), - ('\u0627\u0031', ValueError), - ('\uE0001', ValueError), - ('\uE0020', ValueError), - ('\uFFF9', ValueError), - ('\uFDD0', ValueError), - ('\u0000', ValueError), - ('\u06DD', ValueError), - ('\uFFFFD', ValueError), - ('\uD800', ValueError), - ('\u200E', ValueError), - ('\u05BE\u00AA\u05BE', ValueError), + (b"", TypeError), + ("\u0221", ValueError), + ("\u0007", ValueError), + ("\u0627\u0031", ValueError), + ("\ue0001", ValueError), + ("\ue0020", ValueError), + ("\ufff9", ValueError), + ("\ufdd0", ValueError), + ("\u0000", ValueError), + ("\u06dd", ValueError), + ("\uffffD", ValueError), + ("\ud800", ValueError), + ("\u200e", ValueError), + ("\u05be\u00aa\u05be", ValueError), ] -@pytest.mark.parametrize('source,target', VALID) +@pytest.mark.parametrize("source,target", VALID) def test_saslprep_conversions(source, target): assert saslprep(source) == target -@pytest.mark.parametrize('source,exception', INVALID) +@pytest.mark.parametrize("source,exception", INVALID) def test_saslprep_exceptions(source, exception): with pytest.raises(exception) as ex: saslprep(source) diff --git a/tests/unit/plugins/module_utils/test_utm_utils.py b/tests/unit/plugins/module_utils/test_utm_utils.py index 8fc8292ffe0..56d7c697646 100644 --- a/tests/unit/plugins/module_utils/test_utm_utils.py +++ b/tests/unit/plugins/module_utils/test_utm_utils.py @@ -22,8 +22,15 @@ def __init__(self, params): def test_combine_headers_returns_only_default(): expected = {"Accept": "application/json", "Content-type": "application/json"} module = FakeModule( - params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token', - 'name': 'FakeName', 'headers': {}}) + params={ + "utm_protocol": "utm_protocol", + "utm_host": "utm_host", + "utm_port": 1234, + "utm_token": "utm_token", + "name": "FakeName", + "headers": {}, + } + ) result = UTM(module, "endpoint", [])._combine_headers() assert result == expected @@ -31,17 +38,29 @@ def test_combine_headers_returns_only_default(): def test_combine_headers_returns_only_default2(): expected = {"Accept": "application/json", "Content-type": "application/json"} module = FakeModule( - params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token', - 'name': 'FakeName'}) + params={ + "utm_protocol": "utm_protocol", + "utm_host": "utm_host", + "utm_port": 1234, + "utm_token": "utm_token", + "name": "FakeName", + } + ) result = UTM(module, "endpoint", [])._combine_headers() assert result == expected def test_combine_headers_returns_combined(): - expected = {"Accept": "application/json", "Content-type": "application/json", - "extraHeader": "extraHeaderValue"} - module = FakeModule(params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, - 'utm_token': 'utm_token', 'name': 'FakeName', - "headers": {"extraHeader": "extraHeaderValue"}}) + expected = {"Accept": "application/json", "Content-type": "application/json", "extraHeader": "extraHeaderValue"} + module = FakeModule( + params={ + "utm_protocol": "utm_protocol", + "utm_host": "utm_host", + "utm_port": 1234, + "utm_token": "utm_token", + "name": "FakeName", + "headers": {"extraHeader": "extraHeaderValue"}, + } + ) result = UTM(module, "endpoint", [])._combine_headers() assert result == expected diff --git a/tests/unit/plugins/module_utils/test_vardict.py b/tests/unit/plugins/module_utils/test_vardict.py index 4d2ed2bb5f9..8fa22d6646b 100644 --- a/tests/unit/plugins/module_utils/test_vardict.py +++ b/tests/unit/plugins/module_utils/test_vardict.py @@ -80,7 +80,9 @@ def test_var_diff_dict(): vd.set("aa", 123, diff=True) vd.aa = 456 - assert vd.diff() == {"before": {"aa": 123, "dd": val_before}, "after": {"aa": 456, "dd": val_after}}, f"actual={vd.diff()}" + assert vd.diff() == {"before": {"aa": 123, "dd": val_before}, "after": {"aa": 456, "dd": val_after}}, ( + f"actual={vd.diff()}" + ) def test_vardict_set_meta(): diff --git a/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py b/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py index 84645acf598..ac22c3502b1 100644 --- a/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py +++ b/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py @@ -17,9 +17,7 @@ def __str__(self): class Session: - def __init__(self, uri, transport=None, encoding=None, verbose=0, - allow_none=1, ignore_ssl=False): - + def __init__(self, uri, transport=None, encoding=None, verbose=0, allow_none=1, ignore_ssl=False): self.transport = transport self._session = None self.last_login_method = None @@ -42,10 +40,10 @@ def _logout(self): self.API_version = FAKE_API_VERSION def xenapi_request(self, methodname, params): - if methodname.startswith('login'): + if methodname.startswith("login"): self._login(methodname, params) return None - elif methodname == 'logout' or methodname == 'session.logout': + elif methodname == "logout" or methodname == "session.logout": self._logout() return None else: @@ -53,14 +51,14 @@ def xenapi_request(self, methodname, params): return None def __getattr__(self, name): - if name == 'handle': + if name == "handle": return self._session - elif name == 'xenapi': + elif name == "xenapi": # Should be patched with mocker.patch(). return None - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith("login") or name.startswith("slave_local"): return lambda *params: self._login(name, params) - elif name == 'logout': + elif name == "logout": return self._logout diff --git a/tests/unit/plugins/module_utils/xenserver/common.py b/tests/unit/plugins/module_utils/xenserver/common.py index 1e9f8e51dd3..a930c9c41da 100644 --- a/tests/unit/plugins/module_utils/xenserver/common.py +++ b/tests/unit/plugins/module_utils/xenserver/common.py @@ -13,12 +13,12 @@ def fake_xenapi_ref(xenapi_class): testcase_bad_xenapi_refs = { "params": [ None, - '', - 'OpaqueRef:NULL', + "", + "OpaqueRef:NULL", ], "ids": [ - 'none', - 'empty', - 'ref-null', + "none", + "empty", + "ref-null", ], } diff --git a/tests/unit/plugins/module_utils/xenserver/conftest.py b/tests/unit/plugins/module_utils/xenserver/conftest.py index 77645179cca..f2c62eb6726 100644 --- a/tests/unit/plugins/module_utils/xenserver/conftest.py +++ b/tests/unit/plugins/module_utils/xenserver/conftest.py @@ -20,7 +20,7 @@ @pytest.fixture def fake_ansible_module(request): """Returns fake AnsibleModule with fake module params.""" - if hasattr(request, 'param'): + if hasattr(request, "param"): return FakeAnsibleModule(request.param) else: params = { @@ -42,12 +42,14 @@ def XenAPI(): # First we use importlib.import_module() to import the module and assign # it to a local symbol. - fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.module_utils.xenserver.FakeXenAPI') + fake_xenapi = importlib.import_module( + "ansible_collections.community.general.tests.unit.plugins.module_utils.xenserver.FakeXenAPI" + ) # Now we populate Python module cache with imported fake module using the # original module name (XenAPI). That way, any 'import XenAPI' statement # will just load already imported fake module from the cache. - sys.modules['XenAPI'] = fake_xenapi + sys.modules["XenAPI"] = fake_xenapi return fake_xenapi @@ -83,7 +85,7 @@ def _get_child_mock(self, **kw): child_mock.side_effect = self.side_effect return child_mock - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', new=MagicMockSideEffect(), create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", new=MagicMockSideEffect(), create=True) mocked_xenapi.side_effect = XenAPI.Failure(fake_error_msg) return mocked_xenapi, fake_error_msg @@ -92,10 +94,10 @@ def _get_child_mock(self, **kw): @pytest.fixture def fixture_data_from_file(request): """Loads fixture data from files.""" - if not hasattr(request, 'param'): + if not hasattr(request, "param"): return {} - fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + fixture_path = os.path.join(os.path.dirname(__file__), "fixtures") fixture_data = {} if isinstance(request.param, str): diff --git a/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py b/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py index cbe5249f483..5029cbd5da1 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py +++ b/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py @@ -25,7 +25,7 @@ } -@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids']) # type: ignore +@pytest.mark.parametrize("vm_ref", testcase_bad_xenapi_refs["params"], ids=testcase_bad_xenapi_refs["ids"]) # type: ignore def test_gather_vm_params_bad_vm_ref(fake_ansible_module, xenserver, vm_ref): """Tests return of empty dict on bad vm_ref.""" assert xenserver.gather_vm_params(fake_ansible_module, vm_ref) == {} @@ -37,13 +37,15 @@ def test_gather_vm_facts_no_vm_params(fake_ansible_module, xenserver): assert xenserver.gather_vm_facts(fake_ansible_module, {}) == {} -@pytest.mark.parametrize('fixture_data_from_file', - testcase_gather_vm_params_and_facts['params'], # type: ignore - ids=testcase_gather_vm_params_and_facts['ids'], # type: ignore - indirect=True) +@pytest.mark.parametrize( + "fixture_data_from_file", + testcase_gather_vm_params_and_facts["params"], # type: ignore + ids=testcase_gather_vm_params_and_facts["ids"], # type: ignore + indirect=True, +) def test_gather_vm_params_and_facts(mocker, fake_ansible_module, XenAPI, xenserver, fixture_data_from_file): """Tests proper parsing of VM parameters and facts.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) if "params" in list(fixture_data_from_file.keys())[0]: params_file = list(fixture_data_from_file.keys())[0] @@ -53,21 +55,29 @@ def test_gather_vm_params_and_facts(mocker, fake_ansible_module, XenAPI, xenserv facts_file = list(fixture_data_from_file.keys())[0] mocked_returns = { - "VM.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM'][obj_ref], - "VM_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_metrics'][obj_ref], - "VM_guest_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_guest_metrics'][obj_ref], - "VBD.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VBD'][obj_ref], - "VDI.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VDI'][obj_ref], - "SR.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['SR'][obj_ref], - "VIF.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VIF'][obj_ref], - "network.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['network'][obj_ref], - "host.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['host'][obj_ref], + "VM.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["VM"][obj_ref], + "VM_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["VM_metrics"][obj_ref], + "VM_guest_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file][ + "VM_guest_metrics" + ][obj_ref], + "VBD.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["VBD"][obj_ref], + "VDI.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["VDI"][obj_ref], + "SR.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["SR"][obj_ref], + "VIF.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["VIF"][obj_ref], + "network.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["network"][obj_ref], + "host.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]["host"][obj_ref], } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) - vm_ref = list(fixture_data_from_file[params_file]['VM'].keys())[0] + vm_ref = list(fixture_data_from_file[params_file]["VM"].keys())[0] - assert xenserver.gather_vm_facts(fake_ansible_module, xenserver.gather_vm_params(fake_ansible_module, vm_ref)) == fixture_data_from_file[facts_file] + assert ( + xenserver.gather_vm_facts(fake_ansible_module, xenserver.gather_vm_params(fake_ansible_module, vm_ref)) + == fixture_data_from_file[facts_file] + ) diff --git a/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py b/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py index 80c9346b443..bcd8d3c3ceb 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py +++ b/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py @@ -14,59 +14,67 @@ def test_get_object_ref_xenapi_failure(mocker, fake_ansible_module, XenAPI, xenserver): """Tests catching of XenAPI failures.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI method call error!')) + mocked_xenapi = mocker.patch.object( + XenAPI.Session, "xenapi_request", side_effect=XenAPI.Failure("Fake XAPI method call error!") + ) with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, "name") - assert exc_info.value.kwargs['msg'] == "XAPI ERROR: Fake XAPI method call error!" + assert exc_info.value.kwargs["msg"] == "XAPI ERROR: Fake XAPI method call error!" def test_get_object_ref_bad_uuid_and_name(mocker, fake_ansible_module, XenAPI, xenserver): """Tests failure on bad object uuid and/or name.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request') + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi_request") with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, None, msg_prefix="Test: ") mocked_xenapi.xenapi_request.assert_not_called() - assert exc_info.value.kwargs['msg'] == "Test: no valid name or UUID supplied for VM!" + assert exc_info.value.kwargs["msg"] == "Test: no valid name or UUID supplied for VM!" def test_get_object_ref_uuid_not_found(mocker, fake_ansible_module, XenAPI, xenserver): """Tests when object is not found by uuid.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI not found error!')) + mocked_xenapi = mocker.patch.object( + XenAPI.Session, "xenapi_request", side_effect=XenAPI.Failure("Fake XAPI not found error!") + ) with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", msg_prefix="Test: ") - assert exc_info.value.kwargs['msg'] == "Test: VM with UUID 'fake-uuid' not found!" - assert xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", fail=False, msg_prefix="Test: ") is None + assert exc_info.value.kwargs["msg"] == "Test: VM with UUID 'fake-uuid' not found!" + assert ( + xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", fail=False, msg_prefix="Test: ") is None + ) def test_get_object_ref_name_not_found(mocker, fake_ansible_module, XenAPI, xenserver): """Tests when object is not found by name.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[]) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi_request", return_value=[]) with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ") - assert exc_info.value.kwargs['msg'] == "Test: VM with name 'name' not found!" + assert exc_info.value.kwargs["msg"] == "Test: VM with name 'name' not found!" assert xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ") is None def test_get_object_ref_name_multiple_found(mocker, fake_ansible_module, XenAPI, xenserver): """Tests when multiple objects are found by name.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[fake_xenapi_ref('VM'), fake_xenapi_ref('VM')]) + mocked_xenapi = mocker.patch.object( + XenAPI.Session, "xenapi_request", return_value=[fake_xenapi_ref("VM"), fake_xenapi_ref("VM")] + ) error_msg = "Test: multiple VMs with name 'name' found! Please use UUID." with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ") - assert exc_info.value.kwargs['msg'] == error_msg + assert exc_info.value.kwargs["msg"] == error_msg with pytest.raises(FailJsonException) as exc_info: xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ") - assert exc_info.value.kwargs['msg'] == error_msg + assert exc_info.value.kwargs["msg"] == error_msg diff --git a/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py b/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py index e070f77fee3..076c2e26471 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py +++ b/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py @@ -11,171 +11,179 @@ from ansible.module_utils.common.network import is_mac testcase_is_valid_mac_addr = [ - ('A4-23-8D-F8-C9-E5', True), - ('35:71:F4:11:0B:D8', True), - ('b3-bd-20-59-0c-cf', True), - ('32:61:ca:65:f1:f4', True), - ('asdf', False), - ('A4-23-8D-G8-C9-E5', False), - ('A4-3-8D-F8-C9-E5', False), - ('A4-23-88D-F8-C9-E5', False), - ('A4-23-8D-F8-C9_E5', False), - ('A4-23--8D-F8-C9-E5', False), + ("A4-23-8D-F8-C9-E5", True), + ("35:71:F4:11:0B:D8", True), + ("b3-bd-20-59-0c-cf", True), + ("32:61:ca:65:f1:f4", True), + ("asdf", False), + ("A4-23-8D-G8-C9-E5", False), + ("A4-3-8D-F8-C9-E5", False), + ("A4-23-88D-F8-C9-E5", False), + ("A4-23-8D-F8-C9_E5", False), + ("A4-23--8D-F8-C9-E5", False), ] testcase_is_valid_ip_addr = [ - ('0.0.0.0', True), - ('10.0.0.1', True), - ('192.168.0.1', True), - ('255.255.255.255', True), - ('asdf', False), - ('a.b.c.d', False), - ('345.345.345.345', False), - ('-10.0.0.1', False), + ("0.0.0.0", True), + ("10.0.0.1", True), + ("192.168.0.1", True), + ("255.255.255.255", True), + ("asdf", False), + ("a.b.c.d", False), + ("345.345.345.345", False), + ("-10.0.0.1", False), ] testcase_is_valid_ip_netmask = [ - ('240.0.0.0', True), - ('255.224.0.0', True), - ('255.255.248.0', True), - ('255.255.255.255', True), - ('asdf', False), - ('a.b.c.d', False), - ('192.168.0.1', False), - ('255.0.248.0', False), + ("240.0.0.0", True), + ("255.224.0.0", True), + ("255.255.248.0", True), + ("255.255.255.255", True), + ("asdf", False), + ("a.b.c.d", False), + ("192.168.0.1", False), + ("255.0.248.0", False), ] testcase_is_valid_ip_prefix = [ - ('0', True), - ('16', True), - ('24', True), - ('32', True), - ('asdf', False), - ('-10', False), - ('60', False), - ('60s', False), + ("0", True), + ("16", True), + ("24", True), + ("32", True), + ("asdf", False), + ("-10", False), + ("60", False), + ("60s", False), ] testcase_ip_prefix_to_netmask = { "params": [ - ('0', '0.0.0.0'), - ('8', '255.0.0.0'), - ('11', '255.224.0.0'), - ('16', '255.255.0.0'), - ('21', '255.255.248.0'), - ('24', '255.255.255.0'), - ('26', '255.255.255.192'), - ('32', '255.255.255.255'), - ('a', ''), - ('60', ''), + ("0", "0.0.0.0"), + ("8", "255.0.0.0"), + ("11", "255.224.0.0"), + ("16", "255.255.0.0"), + ("21", "255.255.248.0"), + ("24", "255.255.255.0"), + ("26", "255.255.255.192"), + ("32", "255.255.255.255"), + ("a", ""), + ("60", ""), ], "ids": [ - '0', - '8', - '11', - '16', - '21', - '24', - '26', - '32', - 'a', - '60', + "0", + "8", + "11", + "16", + "21", + "24", + "26", + "32", + "a", + "60", ], } testcase_ip_netmask_to_prefix = { "params": [ - ('0.0.0.0', '0'), - ('255.0.0.0', '8'), - ('255.224.0.0', '11'), - ('255.255.0.0', '16'), - ('255.255.248.0', '21'), - ('255.255.255.0', '24'), - ('255.255.255.192', '26'), - ('255.255.255.255', '32'), - ('a', ''), - ('60', ''), + ("0.0.0.0", "0"), + ("255.0.0.0", "8"), + ("255.224.0.0", "11"), + ("255.255.0.0", "16"), + ("255.255.248.0", "21"), + ("255.255.255.0", "24"), + ("255.255.255.192", "26"), + ("255.255.255.255", "32"), + ("a", ""), + ("60", ""), ], "ids": [ - '0.0.0.0', - '255.0.0.0', - '255.224.0.0', - '255.255.0.0', - '255.255.248.0', - '255.255.255.0', - '255.255.255.192', - '255.255.255.255', - 'a', - '60', + "0.0.0.0", + "255.0.0.0", + "255.224.0.0", + "255.255.0.0", + "255.255.248.0", + "255.255.255.0", + "255.255.255.192", + "255.255.255.255", + "a", + "60", ], } testcase_is_valid_ip6_addr = [ - ('::1', True), - ('2001:DB8:0:0:8:800:200C:417A', True), - ('2001:DB8::8:800:200C:417A', True), - ('FF01::101', True), - ('asdf', False), - ('2001:DB8:0:0:8:800:200C:417A:221', False), - ('FF01::101::2', False), - ('2001:db8:85a3::8a2e:370k:7334', False), + ("::1", True), + ("2001:DB8:0:0:8:800:200C:417A", True), + ("2001:DB8::8:800:200C:417A", True), + ("FF01::101", True), + ("asdf", False), + ("2001:DB8:0:0:8:800:200C:417A:221", False), + ("FF01::101::2", False), + ("2001:db8:85a3::8a2e:370k:7334", False), ] testcase_is_valid_ip6_prefix = [ - ('0', True), - ('56', True), - ('78', True), - ('128', True), - ('asdf', False), - ('-10', False), - ('345', False), - ('60s', False), + ("0", True), + ("56", True), + ("78", True), + ("128", True), + ("asdf", False), + ("-10", False), + ("345", False), + ("60s", False), ] -@pytest.mark.parametrize('mac_addr, result', testcase_is_valid_mac_addr) +@pytest.mark.parametrize("mac_addr, result", testcase_is_valid_mac_addr) def test_is_valid_mac_addr(xenserver, mac_addr, result): """Tests against examples of valid and invalid mac addresses.""" assert is_mac(mac_addr) is result -@pytest.mark.parametrize('ip_addr, result', testcase_is_valid_ip_addr) +@pytest.mark.parametrize("ip_addr, result", testcase_is_valid_ip_addr) def test_is_valid_ip_addr(xenserver, ip_addr, result): """Tests against examples of valid and invalid ip addresses.""" assert xenserver.is_valid_ip_addr(ip_addr) is result -@pytest.mark.parametrize('ip_netmask, result', testcase_is_valid_ip_netmask) +@pytest.mark.parametrize("ip_netmask, result", testcase_is_valid_ip_netmask) def test_is_valid_ip_netmask(xenserver, ip_netmask, result): """Tests against examples of valid and invalid ip netmasks.""" assert xenserver.is_valid_ip_netmask(ip_netmask) is result -@pytest.mark.parametrize('ip_prefix, result', testcase_is_valid_ip_prefix) +@pytest.mark.parametrize("ip_prefix, result", testcase_is_valid_ip_prefix) def test_is_valid_ip_prefix(xenserver, ip_prefix, result): """Tests against examples of valid and invalid ip prefixes.""" assert xenserver.is_valid_ip_prefix(ip_prefix) is result -@pytest.mark.parametrize('ip_prefix, ip_netmask', testcase_ip_prefix_to_netmask['params'], ids=testcase_ip_prefix_to_netmask['ids']) # type: ignore +@pytest.mark.parametrize( + "ip_prefix, ip_netmask", + testcase_ip_prefix_to_netmask["params"], # type: ignore + ids=testcase_ip_prefix_to_netmask["ids"], # type: ignore +) def test_ip_prefix_to_netmask(xenserver, ip_prefix, ip_netmask): """Tests ip prefix to netmask conversion.""" assert xenserver.ip_prefix_to_netmask(ip_prefix) == ip_netmask -@pytest.mark.parametrize('ip_netmask, ip_prefix', testcase_ip_netmask_to_prefix['params'], ids=testcase_ip_netmask_to_prefix['ids']) # type: ignore +@pytest.mark.parametrize( + "ip_netmask, ip_prefix", + testcase_ip_netmask_to_prefix["params"], # type: ignore + ids=testcase_ip_netmask_to_prefix["ids"], # type: ignore +) def test_ip_netmask_to_prefix(xenserver, ip_netmask, ip_prefix): """Tests ip netmask to prefix conversion.""" assert xenserver.ip_netmask_to_prefix(ip_netmask) == ip_prefix -@pytest.mark.parametrize('ip6_addr, result', testcase_is_valid_ip6_addr) +@pytest.mark.parametrize("ip6_addr, result", testcase_is_valid_ip6_addr) def test_is_valid_ip6_addr(xenserver, ip6_addr, result): """Tests against examples of valid and invalid ip6 addresses.""" assert xenserver.is_valid_ip6_addr(ip6_addr) is result -@pytest.mark.parametrize('ip6_prefix, result', testcase_is_valid_ip6_prefix) +@pytest.mark.parametrize("ip6_prefix, result", testcase_is_valid_ip6_prefix) def test_is_valid_ip6_prefix(xenserver, ip6_prefix, result): """Tests against examples of valid and invalid ip6 prefixes.""" assert xenserver.is_valid_ip6_prefix(ip6_prefix) is result diff --git a/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py b/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py index a0f0b0ca651..ec62a74fcc6 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py +++ b/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py @@ -14,16 +14,16 @@ testcase_set_vm_power_state_bad_transitions = { "params": [ - ('restarted', 'Halted', "Cannot restart VM in state 'poweredoff'!"), - ('restarted', 'Suspended', "Cannot restart VM in state 'suspended'!"), - ('suspended', 'Halted', "Cannot suspend VM in state 'poweredoff'!"), - ('suspended', 'Paused', "Cannot suspend VM in state 'paused'!"), - ('shutdownguest', 'Halted', "Cannot shutdown guest when VM is in state 'poweredoff'!"), - ('shutdownguest', 'Suspended', "Cannot shutdown guest when VM is in state 'suspended'!"), - ('shutdownguest', 'Paused', "Cannot shutdown guest when VM is in state 'paused'!"), - ('rebootguest', 'Halted', "Cannot reboot guest when VM is in state 'poweredoff'!"), - ('rebootguest', 'Suspended', "Cannot reboot guest when VM is in state 'suspended'!"), - ('rebootguest', 'Paused', "Cannot reboot guest when VM is in state 'paused'!"), + ("restarted", "Halted", "Cannot restart VM in state 'poweredoff'!"), + ("restarted", "Suspended", "Cannot restart VM in state 'suspended'!"), + ("suspended", "Halted", "Cannot suspend VM in state 'poweredoff'!"), + ("suspended", "Paused", "Cannot suspend VM in state 'paused'!"), + ("shutdownguest", "Halted", "Cannot shutdown guest when VM is in state 'poweredoff'!"), + ("shutdownguest", "Suspended", "Cannot shutdown guest when VM is in state 'suspended'!"), + ("shutdownguest", "Paused", "Cannot shutdown guest when VM is in state 'paused'!"), + ("rebootguest", "Halted", "Cannot reboot guest when VM is in state 'poweredoff'!"), + ("rebootguest", "Suspended", "Cannot reboot guest when VM is in state 'suspended'!"), + ("rebootguest", "Paused", "Cannot reboot guest when VM is in state 'paused'!"), ], "ids": [ "poweredoff->restarted", @@ -41,8 +41,8 @@ testcase_set_vm_power_state_task_timeout = { "params": [ - ('shutdownguest', "Guest shutdown task failed: 'timeout'!"), - ('rebootguest', "Guest reboot task failed: 'timeout'!"), + ("shutdownguest", "Guest shutdown task failed: 'timeout'!"), + ("rebootguest", "Guest reboot task failed: 'timeout'!"), ], "ids": [ "shutdownguest-timeout", @@ -52,16 +52,16 @@ testcase_set_vm_power_state_no_transitions = { "params": [ - ('poweredon', "Running"), - ('Poweredon', "Running"), - ('powered-on', "Running"), - ('Powered_on', "Running"), - ('poweredoff', "Halted"), - ('Poweredoff', "Halted"), - ('powered-off', "Halted"), - ('powered_off', "Halted"), - ('suspended', "Suspended"), - ('Suspended', "Suspended"), + ("poweredon", "Running"), + ("Poweredon", "Running"), + ("powered-on", "Running"), + ("Powered_on", "Running"), + ("poweredoff", "Halted"), + ("Poweredoff", "Halted"), + ("powered-off", "Halted"), + ("powered_off", "Halted"), + ("suspended", "Suspended"), + ("Suspended", "Suspended"), ], "ids": [ "poweredon", @@ -79,44 +79,44 @@ testcase_set_vm_power_state_transitions = { "params": [ - ('poweredon', 'Halted', 'running', 'VM.start'), - ('Poweredon', 'Halted', 'running', 'VM.start'), - ('powered-on', 'Halted', 'running', 'VM.start'), - ('Powered_on', 'Halted', 'running', 'VM.start'), - ('poweredon', 'Suspended', 'running', 'VM.resume'), - ('Poweredon', 'Suspended', 'running', 'VM.resume'), - ('powered-on', 'Suspended', 'running', 'VM.resume'), - ('Powered_on', 'Suspended', 'running', 'VM.resume'), - ('poweredon', 'Paused', 'running', 'VM.unpause'), - ('Poweredon', 'Paused', 'running', 'VM.unpause'), - ('powered-on', 'Paused', 'running', 'VM.unpause'), - ('Powered_on', 'Paused', 'running', 'VM.unpause'), - ('poweredoff', 'Running', 'halted', 'VM.hard_shutdown'), - ('Poweredoff', 'Running', 'halted', 'VM.hard_shutdown'), - ('powered-off', 'Running', 'halted', 'VM.hard_shutdown'), - ('powered_off', 'Running', 'halted', 'VM.hard_shutdown'), - ('poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'), - ('Poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'), - ('powered-off', 'Suspended', 'halted', 'VM.hard_shutdown'), - ('powered_off', 'Suspended', 'halted', 'VM.hard_shutdown'), - ('poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'), - ('Poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'), - ('powered-off', 'Paused', 'halted', 'VM.hard_shutdown'), - ('powered_off', 'Paused', 'halted', 'VM.hard_shutdown'), - ('restarted', 'Running', 'running', 'VM.hard_reboot'), - ('Restarted', 'Running', 'running', 'VM.hard_reboot'), - ('restarted', 'Paused', 'running', 'VM.hard_reboot'), - ('Restarted', 'Paused', 'running', 'VM.hard_reboot'), - ('suspended', 'Running', 'suspended', 'VM.suspend'), - ('Suspended', 'Running', 'suspended', 'VM.suspend'), - ('shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'), - ('Shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'), - ('shutdown-guest', 'Running', 'halted', 'VM.clean_shutdown'), - ('shutdown_guest', 'Running', 'halted', 'VM.clean_shutdown'), - ('rebootguest', 'Running', 'running', 'VM.clean_reboot'), - ('rebootguest', 'Running', 'running', 'VM.clean_reboot'), - ('reboot-guest', 'Running', 'running', 'VM.clean_reboot'), - ('reboot_guest', 'Running', 'running', 'VM.clean_reboot'), + ("poweredon", "Halted", "running", "VM.start"), + ("Poweredon", "Halted", "running", "VM.start"), + ("powered-on", "Halted", "running", "VM.start"), + ("Powered_on", "Halted", "running", "VM.start"), + ("poweredon", "Suspended", "running", "VM.resume"), + ("Poweredon", "Suspended", "running", "VM.resume"), + ("powered-on", "Suspended", "running", "VM.resume"), + ("Powered_on", "Suspended", "running", "VM.resume"), + ("poweredon", "Paused", "running", "VM.unpause"), + ("Poweredon", "Paused", "running", "VM.unpause"), + ("powered-on", "Paused", "running", "VM.unpause"), + ("Powered_on", "Paused", "running", "VM.unpause"), + ("poweredoff", "Running", "halted", "VM.hard_shutdown"), + ("Poweredoff", "Running", "halted", "VM.hard_shutdown"), + ("powered-off", "Running", "halted", "VM.hard_shutdown"), + ("powered_off", "Running", "halted", "VM.hard_shutdown"), + ("poweredoff", "Suspended", "halted", "VM.hard_shutdown"), + ("Poweredoff", "Suspended", "halted", "VM.hard_shutdown"), + ("powered-off", "Suspended", "halted", "VM.hard_shutdown"), + ("powered_off", "Suspended", "halted", "VM.hard_shutdown"), + ("poweredoff", "Paused", "halted", "VM.hard_shutdown"), + ("Poweredoff", "Paused", "halted", "VM.hard_shutdown"), + ("powered-off", "Paused", "halted", "VM.hard_shutdown"), + ("powered_off", "Paused", "halted", "VM.hard_shutdown"), + ("restarted", "Running", "running", "VM.hard_reboot"), + ("Restarted", "Running", "running", "VM.hard_reboot"), + ("restarted", "Paused", "running", "VM.hard_reboot"), + ("Restarted", "Paused", "running", "VM.hard_reboot"), + ("suspended", "Running", "suspended", "VM.suspend"), + ("Suspended", "Running", "suspended", "VM.suspend"), + ("shutdownguest", "Running", "halted", "VM.clean_shutdown"), + ("Shutdownguest", "Running", "halted", "VM.clean_shutdown"), + ("shutdown-guest", "Running", "halted", "VM.clean_shutdown"), + ("shutdown_guest", "Running", "halted", "VM.clean_shutdown"), + ("rebootguest", "Running", "running", "VM.clean_reboot"), + ("rebootguest", "Running", "running", "VM.clean_reboot"), + ("reboot-guest", "Running", "running", "VM.clean_reboot"), + ("reboot_guest", "Running", "running", "VM.clean_reboot"), ], "ids": [ "poweredoff->poweredon", @@ -162,14 +162,14 @@ testcase_set_vm_power_state_transitions_async = { "params": [ - ('shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'), - ('Shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'), - ('shutdown-guest', 'Running', 'halted', 'Async.VM.clean_shutdown'), - ('shutdown_guest', 'Running', 'halted', 'Async.VM.clean_shutdown'), - ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'), - ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'), - ('reboot-guest', 'Running', 'running', 'Async.VM.clean_reboot'), - ('reboot_guest', 'Running', 'running', 'Async.VM.clean_reboot'), + ("shutdownguest", "Running", "halted", "Async.VM.clean_shutdown"), + ("Shutdownguest", "Running", "halted", "Async.VM.clean_shutdown"), + ("shutdown-guest", "Running", "halted", "Async.VM.clean_shutdown"), + ("shutdown_guest", "Running", "halted", "Async.VM.clean_shutdown"), + ("rebootguest", "Running", "running", "Async.VM.clean_reboot"), + ("rebootguest", "Running", "running", "Async.VM.clean_reboot"), + ("reboot-guest", "Running", "running", "Async.VM.clean_reboot"), + ("reboot_guest", "Running", "running", "Async.VM.clean_reboot"), ], "ids": [ "poweredon->shutdownguest", @@ -184,26 +184,26 @@ } -@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids']) # type: ignore +@pytest.mark.parametrize("vm_ref", testcase_bad_xenapi_refs["params"], ids=testcase_bad_xenapi_refs["ids"]) # type: ignore def test_set_vm_power_state_bad_vm_ref(fake_ansible_module, xenserver, vm_ref): """Tests failure on bad vm_ref.""" with pytest.raises(FailJsonException) as exc_info: xenserver.set_vm_power_state(fake_ansible_module, vm_ref, None) - assert exc_info.value.kwargs['msg'] == "Cannot set VM power state. Invalid VM reference supplied!" + assert exc_info.value.kwargs["msg"] == "Cannot set VM power state. Invalid VM reference supplied!" def test_set_vm_power_state_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver): """Tests catching of XenAPI failures.""" with pytest.raises(FailJsonException) as exc_info: - xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "poweredon") + xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), "poweredon") - assert exc_info.value.kwargs['msg'] == f"XAPI ERROR: {mock_xenapi_failure[1]}" + assert exc_info.value.kwargs["msg"] == f"XAPI ERROR: {mock_xenapi_failure[1]}" def test_set_vm_power_state_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver): """Tests failure on unsupported power state.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": "Running", @@ -212,21 +212,25 @@ def test_set_vm_power_state_bad_power_state(mocker, fake_ansible_module, XenAPI, mocked_xenapi.configure_mock(**mocked_returns) with pytest.raises(FailJsonException) as exc_info: - xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "bad") + xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), "bad") # Beside VM.get_power_state() no other method should have been # called additionally. assert len(mocked_xenapi.method_calls) == 1 - assert exc_info.value.kwargs['msg'] == "Requested VM power state 'bad' is unsupported!" + assert exc_info.value.kwargs["msg"] == "Requested VM power state 'bad' is unsupported!" -@pytest.mark.parametrize('power_state_desired, power_state_current, error_msg', - testcase_set_vm_power_state_bad_transitions['params'], # type: ignore - ids=testcase_set_vm_power_state_bad_transitions['ids']) # type: ignore -def test_set_vm_power_state_bad_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current, error_msg): +@pytest.mark.parametrize( + "power_state_desired, power_state_current, error_msg", + testcase_set_vm_power_state_bad_transitions["params"], # type: ignore + ids=testcase_set_vm_power_state_bad_transitions["ids"], # type: ignore +) +def test_set_vm_power_state_bad_transition( + mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current, error_msg +): """Tests failure on bad power state transition.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": power_state_current, @@ -235,48 +239,56 @@ def test_set_vm_power_state_bad_transition(mocker, fake_ansible_module, XenAPI, mocked_xenapi.configure_mock(**mocked_returns) with pytest.raises(FailJsonException) as exc_info: - xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired) + xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state_desired) # Beside VM.get_power_state() no other method should have been # called additionally. assert len(mocked_xenapi.method_calls) == 1 - assert exc_info.value.kwargs['msg'] == error_msg + assert exc_info.value.kwargs["msg"] == error_msg -@pytest.mark.parametrize('power_state, error_msg', - testcase_set_vm_power_state_task_timeout['params'], # type: ignore - ids=testcase_set_vm_power_state_task_timeout['ids']) # type: ignore +@pytest.mark.parametrize( + "power_state, error_msg", + testcase_set_vm_power_state_task_timeout["params"], # type: ignore + ids=testcase_set_vm_power_state_task_timeout["ids"], # type: ignore +) def test_set_vm_power_state_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver, power_state, error_msg): """Tests failure on async task timeout.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": "Running", - "Async.VM.clean_shutdown.return_value": fake_xenapi_ref('task'), - "Async.VM.clean_reboot.return_value": fake_xenapi_ref('task'), + "Async.VM.clean_shutdown.return_value": fake_xenapi_ref("task"), + "Async.VM.clean_reboot.return_value": fake_xenapi_ref("task"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="timeout") + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task", return_value="timeout" + ) with pytest.raises(FailJsonException) as exc_info: - xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state, timeout=1) + xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state, timeout=1) # Beside VM.get_power_state() only one of Async.VM.clean_shutdown or # Async.VM.clean_reboot should have been called additionally. assert len(mocked_xenapi.method_calls) == 2 - assert exc_info.value.kwargs['msg'] == error_msg + assert exc_info.value.kwargs["msg"] == error_msg -@pytest.mark.parametrize('power_state_desired, power_state_current', - testcase_set_vm_power_state_no_transitions['params'], # type: ignore - ids=testcase_set_vm_power_state_no_transitions['ids']) # type: ignore -def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current): +@pytest.mark.parametrize( + "power_state_desired, power_state_current", + testcase_set_vm_power_state_no_transitions["params"], # type: ignore + ids=testcase_set_vm_power_state_no_transitions["ids"], # type: ignore +) +def test_set_vm_power_state_no_transition( + mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current +): """Tests regular invocation without power state transition.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": power_state_current, @@ -284,7 +296,7 @@ def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, x mocked_xenapi.configure_mock(**mocked_returns) - result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired) + result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state_desired) # Beside VM.get_power_state() no other method should have been # called additionally. @@ -294,19 +306,23 @@ def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, x assert result[1] == power_state_current.lower() -@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method', - testcase_set_vm_power_state_transitions['params'], # type: ignore - ids=testcase_set_vm_power_state_transitions['ids']) # type: ignore -def test_set_vm_power_state_transition(mocker, - fake_ansible_module, - XenAPI, - xenserver, - power_state_desired, - power_state_current, - power_state_resulting, - activated_xenapi_method): +@pytest.mark.parametrize( + "power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method", + testcase_set_vm_power_state_transitions["params"], # type: ignore + ids=testcase_set_vm_power_state_transitions["ids"], # type: ignore +) +def test_set_vm_power_state_transition( + mocker, + fake_ansible_module, + XenAPI, + xenserver, + power_state_desired, + power_state_current, + power_state_resulting, + activated_xenapi_method, +): """Tests regular invocation with power state transition.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": power_state_current, @@ -314,11 +330,11 @@ def test_set_vm_power_state_transition(mocker, mocked_xenapi.configure_mock(**mocked_returns) - result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0) + result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state_desired, timeout=0) mocked_xenapi_method = mocked_xenapi - for activated_xenapi_class in activated_xenapi_method.split('.'): + for activated_xenapi_class in activated_xenapi_method.split("."): mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class) mocked_xenapi_method.assert_called_once() @@ -331,37 +347,41 @@ def test_set_vm_power_state_transition(mocker, assert result[1] == power_state_resulting -@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method', - testcase_set_vm_power_state_transitions_async['params'], # type: ignore - ids=testcase_set_vm_power_state_transitions_async['ids']) # type: ignore -def test_set_vm_power_state_transition_async(mocker, - fake_ansible_module, - XenAPI, - xenserver, - power_state_desired, - power_state_current, - power_state_resulting, - activated_xenapi_method): +@pytest.mark.parametrize( + "power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method", + testcase_set_vm_power_state_transitions_async["params"], # type: ignore + ids=testcase_set_vm_power_state_transitions_async["ids"], # type: ignore +) +def test_set_vm_power_state_transition_async( + mocker, + fake_ansible_module, + XenAPI, + xenserver, + power_state_desired, + power_state_current, + power_state_resulting, + activated_xenapi_method, +): """ Tests regular invocation with async power state transition (shutdownguest and rebootguest only). """ - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": power_state_current, - f"{activated_xenapi_method}.return_value": fake_xenapi_ref('task'), + f"{activated_xenapi_method}.return_value": fake_xenapi_ref("task"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="") + mocker.patch("ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task", return_value="") - result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=1) + result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state_desired, timeout=1) mocked_xenapi_method = mocked_xenapi - for activated_xenapi_class in activated_xenapi_method.split('.'): + for activated_xenapi_class in activated_xenapi_method.split("."): mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class) mocked_xenapi_method.assert_called_once() @@ -374,19 +394,23 @@ def test_set_vm_power_state_transition_async(mocker, assert result[1] == power_state_resulting -@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method', - testcase_set_vm_power_state_transitions['params'], # type: ignore - ids=testcase_set_vm_power_state_transitions['ids']) # type: ignore -def test_set_vm_power_state_transition_check_mode(mocker, - fake_ansible_module, - XenAPI, - xenserver, - power_state_desired, - power_state_current, - power_state_resulting, - activated_xenapi_method): +@pytest.mark.parametrize( + "power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method", + testcase_set_vm_power_state_transitions["params"], # type: ignore + ids=testcase_set_vm_power_state_transitions["ids"], # type: ignore +) +def test_set_vm_power_state_transition_check_mode( + mocker, + fake_ansible_module, + XenAPI, + xenserver, + power_state_desired, + power_state_current, + power_state_resulting, + activated_xenapi_method, +): """Tests regular invocation with power state transition in check mode.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": power_state_current, @@ -395,11 +419,11 @@ def test_set_vm_power_state_transition_check_mode(mocker, mocked_xenapi.configure_mock(**mocked_returns) fake_ansible_module.check_mode = True - result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0) + result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref("VM"), power_state_desired, timeout=0) mocked_xenapi_method = mocked_xenapi - for activated_xenapi_class in activated_xenapi_method.split('.'): + for activated_xenapi_class in activated_xenapi_method.split("."): mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class) mocked_xenapi_method.assert_not_called() diff --git a/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py b/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py index 7c934e81263..8028396dc79 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py +++ b/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py @@ -14,71 +14,73 @@ testcase_wait_for_vm_ip_address_bad_power_states = { "params": [ - 'Halted', - 'Paused', - 'Suspended', - 'Other', + "Halted", + "Paused", + "Suspended", + "Other", ], "ids": [ - 'state-halted', - 'state-paused', - 'state-suspended', - 'state-other', - ] + "state-halted", + "state-paused", + "state-suspended", + "state-other", + ], } testcase_wait_for_vm_ip_address_bad_guest_metrics = { "params": [ - ('OpaqueRef:NULL', {"networks": {}}), - (fake_xenapi_ref('VM_guest_metrics'), {"networks": {}}), + ("OpaqueRef:NULL", {"networks": {}}), + (fake_xenapi_ref("VM_guest_metrics"), {"networks": {}}), ], "ids": [ - 'vm_guest_metrics_ref-null, no-ip', - 'vm_guest_metrics_ref-ok, no-ip', + "vm_guest_metrics_ref-null, no-ip", + "vm_guest_metrics_ref-ok, no-ip", ], } testcase_wait_for_task_all_statuses = { "params": [ - ('Success', ''), - ('Failure', 'failure'), - ('Cancelling', 'cancelling'), - ('Cancelled', 'cancelled'), - ('Other', 'other'), + ("Success", ""), + ("Failure", "failure"), + ("Cancelling", "cancelling"), + ("Cancelled", "cancelled"), + ("Other", "other"), ], "ids": [ - 'task-success', - 'task-failure', - 'task-cancelling', - 'task-cancelled', - 'task-other', - ] + "task-success", + "task-failure", + "task-cancelling", + "task-cancelled", + "task-other", + ], } -@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids']) # type: ignore +@pytest.mark.parametrize("vm_ref", testcase_bad_xenapi_refs["params"], ids=testcase_bad_xenapi_refs["ids"]) # type: ignore def test_wait_for_vm_ip_address_bad_vm_ref(fake_ansible_module, xenserver, vm_ref): """Tests failure on bad vm_ref.""" with pytest.raises(FailJsonException) as exc_info: xenserver.wait_for_vm_ip_address(fake_ansible_module, vm_ref) - assert exc_info.value.kwargs['msg'] == "Cannot wait for VM IP address. Invalid VM reference supplied!" + assert exc_info.value.kwargs["msg"] == "Cannot wait for VM IP address. Invalid VM reference supplied!" def test_wait_for_vm_ip_address_xenapi_failure(mock_xenapi_failure, xenserver, fake_ansible_module): """Tests catching of XenAPI failures.""" with pytest.raises(FailJsonException) as exc_info: - xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM')) + xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref("VM")) - assert exc_info.value.kwargs['msg'] == f"XAPI ERROR: {mock_xenapi_failure[1]}" + assert exc_info.value.kwargs["msg"] == f"XAPI ERROR: {mock_xenapi_failure[1]}" -@pytest.mark.parametrize('bad_power_state', - testcase_wait_for_vm_ip_address_bad_power_states['params'], - ids=testcase_wait_for_vm_ip_address_bad_power_states['ids']) +@pytest.mark.parametrize( + "bad_power_state", + testcase_wait_for_vm_ip_address_bad_power_states["params"], + ids=testcase_wait_for_vm_ip_address_bad_power_states["ids"], +) def test_wait_for_vm_ip_address_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver, bad_power_state): """Tests failure on bad power state.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": bad_power_state, @@ -87,18 +89,23 @@ def test_wait_for_vm_ip_address_bad_power_state(mocker, fake_ansible_module, Xen mocked_xenapi.configure_mock(**mocked_returns) with pytest.raises(FailJsonException) as exc_info: - xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM')) + xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref("VM")) - assert exc_info.value.kwargs['msg'] == ( - f"Cannot wait for VM IP address when VM is in state '{xenserver.xapi_to_module_vm_power_state(bad_power_state.lower())}'!") + assert exc_info.value.kwargs["msg"] == ( + f"Cannot wait for VM IP address when VM is in state '{xenserver.xapi_to_module_vm_power_state(bad_power_state.lower())}'!" + ) -@pytest.mark.parametrize('bad_guest_metrics_ref, bad_guest_metrics', - testcase_wait_for_vm_ip_address_bad_guest_metrics['params'], # type: ignore - ids=testcase_wait_for_vm_ip_address_bad_guest_metrics['ids']) # type: ignore -def test_wait_for_vm_ip_address_timeout(mocker, fake_ansible_module, XenAPI, xenserver, bad_guest_metrics_ref, bad_guest_metrics): +@pytest.mark.parametrize( + "bad_guest_metrics_ref, bad_guest_metrics", + testcase_wait_for_vm_ip_address_bad_guest_metrics["params"], # type: ignore + ids=testcase_wait_for_vm_ip_address_bad_guest_metrics["ids"], # type: ignore +) +def test_wait_for_vm_ip_address_timeout( + mocker, fake_ansible_module, XenAPI, xenserver, bad_guest_metrics_ref, bad_guest_metrics +): """Tests timeout.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "VM.get_power_state.return_value": "Running", @@ -108,17 +115,17 @@ def test_wait_for_vm_ip_address_timeout(mocker, fake_ansible_module, XenAPI, xen mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('time.sleep') + mocker.patch("time.sleep") with pytest.raises(FailJsonException) as exc_info: - xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'), timeout=1) + xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref("VM"), timeout=1) - assert exc_info.value.kwargs['msg'] == "Timed out waiting for VM IP address!" + assert exc_info.value.kwargs["msg"] == "Timed out waiting for VM IP address!" def test_wait_for_vm_ip_address(mocker, fake_ansible_module, XenAPI, xenserver): """Tests regular invocation.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) # This mock simulates regular VM IP acquirement lifecycle: # @@ -130,9 +137,9 @@ def test_wait_for_vm_ip_address(mocker, fake_ansible_module, XenAPI, xenserver): mocked_returns = { "VM.get_power_state.return_value": "Running", "VM.get_guest_metrics.side_effect": [ - 'OpaqueRef:NULL', - fake_xenapi_ref('VM_guest_metrics'), - fake_xenapi_ref('VM_guest_metrics'), + "OpaqueRef:NULL", + fake_xenapi_ref("VM_guest_metrics"), + fake_xenapi_ref("VM_guest_metrics"), ], "VM_guest_metrics.get_record.side_effect": [ { @@ -149,33 +156,33 @@ def test_wait_for_vm_ip_address(mocker, fake_ansible_module, XenAPI, xenserver): mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('time.sleep') + mocker.patch("time.sleep") - fake_guest_metrics = xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM')) + fake_guest_metrics = xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref("VM")) - assert fake_guest_metrics == mocked_returns['VM_guest_metrics.get_record.side_effect'][1] + assert fake_guest_metrics == mocked_returns["VM_guest_metrics.get_record.side_effect"][1] -@pytest.mark.parametrize('task_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids']) # type: ignore +@pytest.mark.parametrize("task_ref", testcase_bad_xenapi_refs["params"], ids=testcase_bad_xenapi_refs["ids"]) # type: ignore def test_wait_for_task_bad_task_ref(fake_ansible_module, xenserver, task_ref): """Tests failure on bad task_ref.""" with pytest.raises(FailJsonException) as exc_info: xenserver.wait_for_task(fake_ansible_module, task_ref) - assert exc_info.value.kwargs['msg'] == "Cannot wait for task. Invalid task reference supplied!" + assert exc_info.value.kwargs["msg"] == "Cannot wait for task. Invalid task reference supplied!" def test_wait_for_task_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver): """Tests catching of XenAPI failures.""" with pytest.raises(FailJsonException) as exc_info: - xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task')) + xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref("task")) - assert exc_info.value.kwargs['msg'] == f"XAPI ERROR: {mock_xenapi_failure[1]}" + assert exc_info.value.kwargs["msg"] == f"XAPI ERROR: {mock_xenapi_failure[1]}" def test_wait_for_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver): """Tests timeout.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { "task.get_status.return_value": "Pending", @@ -184,26 +191,28 @@ def test_wait_for_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver): mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('time.sleep') + mocker.patch("time.sleep") - fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'), timeout=1) + fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref("task"), timeout=1) mocked_xenapi.task.destroy.assert_called_once() assert fake_result == "timeout" -@pytest.mark.parametrize('task_status, result', - testcase_wait_for_task_all_statuses['params'], # type: ignore - ids=testcase_wait_for_task_all_statuses['ids']) # type: ignore +@pytest.mark.parametrize( + "task_status, result", + testcase_wait_for_task_all_statuses["params"], # type: ignore + ids=testcase_wait_for_task_all_statuses["ids"], # type: ignore +) def test_wait_for_task(mocker, fake_ansible_module, XenAPI, xenserver, task_status, result): """Tests regular invocation.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) # Mock will first return Pending status and on second invocation it will # return one of possible final statuses. mocked_returns = { "task.get_status.side_effect": [ - 'Pending', + "Pending", task_status, ], "task.destroy.return_value": None, @@ -211,9 +220,9 @@ def test_wait_for_task(mocker, fake_ansible_module, XenAPI, xenserver, task_stat mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('time.sleep') + mocker.patch("time.sleep") - fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task')) + fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref("task")) mocked_xenapi.task.destroy.assert_called_once() assert fake_result == result diff --git a/tests/unit/plugins/module_utils/xenserver/test_xapi.py b/tests/unit/plugins/module_utils/xenserver/test_xapi.py index 0dd4def027c..868589545f2 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_xapi.py +++ b/tests/unit/plugins/module_utils/xenserver/test_xapi.py @@ -63,24 +63,34 @@ } -@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True) # type: ignore +@pytest.mark.parametrize( + "fake_ansible_module", + testcase_module_local_conn["params"], # type: ignore + ids=testcase_module_local_conn["ids"], # type: ignore + indirect=True, +) def test_xapi_connect_local_session(mocker, fake_ansible_module, XenAPI, xenserver): """Tests that connection to localhost uses XenAPI.xapi_local() function.""" - mocker.patch('XenAPI.xapi_local') + mocker.patch("XenAPI.xapi_local") xapi_session = xenserver.XAPI.connect(fake_ansible_module) XenAPI.xapi_local.assert_called_once() -@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True) # type: ignore +@pytest.mark.parametrize( + "fake_ansible_module", + testcase_module_local_conn["params"], # type: ignore + ids=testcase_module_local_conn["ids"], # type: ignore + indirect=True, +) def test_xapi_connect_local_login(mocker, fake_ansible_module, XenAPI, xenserver): """Tests that connection to localhost uses empty username and password.""" - mocker.patch.object(XenAPI.Session, 'login_with_password', create=True) + mocker.patch.object(XenAPI.Session, "login_with_password", create=True) xapi_session = xenserver.XAPI.connect(fake_ansible_module) - XenAPI.Session.login_with_password.assert_called_once_with('', '', ANSIBLE_VERSION, 'Ansible') + XenAPI.Session.login_with_password.assert_called_once_with("", "", ANSIBLE_VERSION, "Ansible") def test_xapi_connect_login(mocker, fake_ansible_module, XenAPI, xenserver): @@ -88,80 +98,88 @@ def test_xapi_connect_login(mocker, fake_ansible_module, XenAPI, xenserver): Tests that username and password are properly propagated to XenAPI.Session.login_with_password() function. """ - mocker.patch.object(XenAPI.Session, 'login_with_password', create=True) + mocker.patch.object(XenAPI.Session, "login_with_password", create=True) xapi_session = xenserver.XAPI.connect(fake_ansible_module) - username = fake_ansible_module.params['username'] - password = fake_ansible_module.params['password'] + username = fake_ansible_module.params["username"] + password = fake_ansible_module.params["password"] - XenAPI.Session.login_with_password.assert_called_once_with(username, password, ANSIBLE_VERSION, 'Ansible') + XenAPI.Session.login_with_password.assert_called_once_with(username, password, ANSIBLE_VERSION, "Ansible") def test_xapi_connect_login_failure(mocker, fake_ansible_module, XenAPI, xenserver): """Tests that login failure is properly handled.""" fake_error_msg = "Fake XAPI login error!" - mocked_login = mocker.patch.object(XenAPI.Session, 'login_with_password', create=True) + mocked_login = mocker.patch.object(XenAPI.Session, "login_with_password", create=True) mocked_login.side_effect = XenAPI.Failure(fake_error_msg) - hostname = fake_ansible_module.params['hostname'] - username = fake_ansible_module.params['username'] + hostname = fake_ansible_module.params["hostname"] + username = fake_ansible_module.params["username"] with pytest.raises(FailJsonException) as exc_info: xapi_session = xenserver.XAPI.connect(fake_ansible_module) - assert exc_info.value.kwargs['msg'] == f"Unable to log on to XenServer at http://{hostname} as {username}: {fake_error_msg}" + assert ( + exc_info.value.kwargs["msg"] + == f"Unable to log on to XenServer at http://{hostname} as {username}: {fake_error_msg}" + ) @pytest.mark.parametrize( - 'fake_ansible_module', - testcase_module_remote_conn_scheme['params'], # type: ignore - ids=testcase_module_remote_conn_scheme['ids'], # type: ignore + "fake_ansible_module", + testcase_module_remote_conn_scheme["params"], # type: ignore + ids=testcase_module_remote_conn_scheme["ids"], # type: ignore indirect=True, ) def test_xapi_connect_remote_scheme(mocker, fake_ansible_module, XenAPI, xenserver): """Tests that explicit scheme in hostname param is preserved.""" - mocker.patch('XenAPI.Session') + mocker.patch("XenAPI.Session") xapi_session = xenserver.XAPI.connect(fake_ansible_module) - hostname = fake_ansible_module.params['hostname'] - ignore_ssl = not fake_ansible_module.params['validate_certs'] + hostname = fake_ansible_module.params["hostname"] + ignore_ssl = not fake_ansible_module.params["validate_certs"] XenAPI.Session.assert_called_once_with(hostname, ignore_ssl=ignore_ssl) -@pytest.mark.parametrize('fake_ansible_module', testcase_module_remote_conn['params'], ids=testcase_module_remote_conn['ids'], indirect=True) # type: ignore +@pytest.mark.parametrize( + "fake_ansible_module", + testcase_module_remote_conn["params"], # type: ignore + ids=testcase_module_remote_conn["ids"], # type: ignore + indirect=True, +) def test_xapi_connect_remote_no_scheme(mocker, fake_ansible_module, XenAPI, xenserver): """Tests that proper scheme is prepended to hostname without scheme.""" - mocker.patch('XenAPI.Session') + mocker.patch("XenAPI.Session") xapi_session = xenserver.XAPI.connect(fake_ansible_module) - hostname = fake_ansible_module.params['hostname'] - ignore_ssl = not fake_ansible_module.params['validate_certs'] + hostname = fake_ansible_module.params["hostname"] + ignore_ssl = not fake_ansible_module.params["validate_certs"] XenAPI.Session.assert_called_once_with(f"http://{hostname}", ignore_ssl=ignore_ssl) def test_xapi_connect_support_ignore_ssl(mocker, fake_ansible_module, XenAPI, xenserver): """Tests proper handling of ignore_ssl support.""" - mocked_session = mocker.patch('XenAPI.Session') + mocked_session = mocker.patch("XenAPI.Session") mocked_session.side_effect = TypeError() with pytest.raises(TypeError) as exc_info: xapi_session = xenserver.XAPI.connect(fake_ansible_module) - hostname = fake_ansible_module.params['hostname'] - ignore_ssl = not fake_ansible_module.params['validate_certs'] + hostname = fake_ansible_module.params["hostname"] + ignore_ssl = not fake_ansible_module.params["validate_certs"] XenAPI.Session.assert_called_with(f"http://{hostname}") def test_xapi_connect_no_disconnect_atexit(mocker, fake_ansible_module, XenAPI, xenserver): """Tests skipping registration of atexit disconnect handler.""" - mocker.patch('atexit.register') + mocker.patch("atexit.register") xapi_session = xenserver.XAPI.connect(fake_ansible_module, disconnect_atexit=False) @@ -170,7 +188,7 @@ def test_xapi_connect_no_disconnect_atexit(mocker, fake_ansible_module, XenAPI, def test_xapi_connect_singleton(mocker, fake_ansible_module, XenAPI, xenserver): """Tests if XAPI.connect() returns singleton.""" - mocker.patch('XenAPI.Session') + mocker.patch("XenAPI.Session") xapi_session1 = xenserver.XAPI.connect(fake_ansible_module) xapi_session2 = xenserver.XAPI.connect(fake_ansible_module) diff --git a/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py b/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py index a99dc591acf..1d286ece49c 100644 --- a/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py +++ b/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py @@ -14,12 +14,12 @@ def test_xenserverobject_xenapi_lib_detection(mocker, fake_ansible_module, xenserver): """Tests XenAPI lib detection code.""" - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.HAS_XENAPI', new=False) + mocker.patch("ansible_collections.community.general.plugins.module_utils.xenserver.HAS_XENAPI", new=False) with pytest.raises(FailJsonException) as exc_info: xenserver.XenServerObject(fake_ansible_module) - assert 'Failed to import the required Python library (XenAPI) on' in exc_info.value.kwargs['msg'] + assert "Failed to import the required Python library (XenAPI) on" in exc_info.value.kwargs["msg"] def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver): @@ -27,17 +27,17 @@ def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module with pytest.raises(FailJsonException) as exc_info: xenserver.XenServerObject(fake_ansible_module) - assert exc_info.value.kwargs['msg'] == f"XAPI ERROR: {mock_xenapi_failure[1]}" + assert exc_info.value.kwargs["msg"] == f"XAPI ERROR: {mock_xenapi_failure[1]}" def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver): """Tests successful creation of XenServerObject.""" - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), - "session.get_this_host.return_value": fake_xenapi_ref('host'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), + "session.get_this_host.return_value": fake_xenapi_ref("host"), "host.get_software_version.return_value": {"product_version": "7.2.0"}, } @@ -45,5 +45,5 @@ def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver): xso = xenserver.XenServerObject(fake_ansible_module) - assert xso.pool_ref == fake_xenapi_ref('pool') + assert xso.pool_ref == fake_xenapi_ref("pool") assert xso.xenserver_version == [7, 2, 0] diff --git a/tests/unit/plugins/modules/FakeXenAPI.py b/tests/unit/plugins/modules/FakeXenAPI.py index 84645acf598..ac22c3502b1 100644 --- a/tests/unit/plugins/modules/FakeXenAPI.py +++ b/tests/unit/plugins/modules/FakeXenAPI.py @@ -17,9 +17,7 @@ def __str__(self): class Session: - def __init__(self, uri, transport=None, encoding=None, verbose=0, - allow_none=1, ignore_ssl=False): - + def __init__(self, uri, transport=None, encoding=None, verbose=0, allow_none=1, ignore_ssl=False): self.transport = transport self._session = None self.last_login_method = None @@ -42,10 +40,10 @@ def _logout(self): self.API_version = FAKE_API_VERSION def xenapi_request(self, methodname, params): - if methodname.startswith('login'): + if methodname.startswith("login"): self._login(methodname, params) return None - elif methodname == 'logout' or methodname == 'session.logout': + elif methodname == "logout" or methodname == "session.logout": self._logout() return None else: @@ -53,14 +51,14 @@ def xenapi_request(self, methodname, params): return None def __getattr__(self, name): - if name == 'handle': + if name == "handle": return self._session - elif name == 'xenapi': + elif name == "xenapi": # Should be patched with mocker.patch(). return None - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith("login") or name.startswith("slave_local"): return lambda *params: self._login(name, params) - elif name == 'logout': + elif name == "logout": return self._logout diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index f087c3f5a90..4a41ebc1cf8 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -11,7 +11,9 @@ import pytest from ansible_collections.community.general.plugins.module_utils import deps -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args as _set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args as _set_module_args, +) def _fix_ansible_args(args): @@ -22,7 +24,7 @@ def _fix_ansible_args(args): if isinstance(args, MutableMapping): return args - raise Exception('Malformed data to the patch_ansible_module pytest fixture') + raise Exception("Malformed data to the patch_ansible_module pytest fixture") @pytest.fixture @@ -39,6 +41,7 @@ def _patch(args): args = _fix_ansible_args(args) with _set_module_args(args): yield + return _patch diff --git a/tests/unit/plugins/modules/gitlab.py b/tests/unit/plugins/modules/gitlab.py index 5738d2190ac..8e29664f11c 100644 --- a/tests/unit/plugins/modules/gitlab.py +++ b/tests/unit/plugins/modules/gitlab.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -55,235 +54,257 @@ def python_gitlab_version_match_requirement(): # Skip unittest test case if python version don't match requirement def unitest_python_version_check_requirement(unittest_testcase): if not python_version_match_requirement(): - unittest_testcase.skipTest(f"Python {'.'.join(map(str, GITLAB_MINIMUM_PYTHON_VERSION))}+ is needed for python-gitlab") + unittest_testcase.skipTest( + f"Python {'.'.join(map(str, GITLAB_MINIMUM_PYTHON_VERSION))}+ is needed for python-gitlab" + ) -''' +""" USER API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get") def resp_find_user(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",' - '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' - '"web_url": "http://localhost:3000/john_smith"}, {"id": 2,' - '"username": "jack_smith", "name": "Jack Smith", "state": "blocked",' - '"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",' - '"web_url": "http://localhost:3000/jack_smith"}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",' + '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' + '"web_url": "http://localhost:3000/john_smith"}, {"id": 2,' + '"username": "jack_smith", "name": "Jack Smith", "state": "blocked",' + '"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",' + '"web_url": "http://localhost:3000/jack_smith"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post") def resp_create_user(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",' - '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' - '"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",' - '"bio": null, "location": null, "public_email": "john@example.com", "skype": "",' - '"linkedin": "", "twitter": "", "website_url": "", "organization": ""}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",' + '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' + '"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",' + '"bio": null, "location": null, "public_email": "john@example.com", "skype": "",' + '"linkedin": "", "twitter": "", "website_url": "", "organization": ""}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get") def resp_get_user(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "username": "john_smith", "name": "John Smith",' - '"state": "active",' - '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' - '"web_url": "http://localhost:3000/john_smith",' - '"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,' - '"public_email": "john@example.com", "skype": "", "linkedin": "",' - '"twitter": "", "website_url": "", "organization": "", "is_admin": false}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "username": "john_smith", "name": "John Smith",' + '"state": "active",' + '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",' + '"web_url": "http://localhost:3000/john_smith",' + '"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,' + '"public_email": "john@example.com", "skype": "", "linkedin": "",' + '"twitter": "", "website_url": "", "organization": "", "is_admin": false}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get") def resp_get_missing_user(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(404, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete") def resp_delete_user(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete") def resp_delete_missing_user(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(404, content, headers, None, 5, request) -''' +""" USER SSHKEY API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get") def resp_get_user_keys(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1, "title": "Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596' - 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa' - 'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,' - '"title": "Another Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596' - 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS' - 'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2014-08-01T14:47:39.080Z"}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1, "title": "Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596' + "k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa" + 'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,' + '"title": "Another Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596' + "k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS" + 'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2014-08-01T14:47:39.080Z"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post") def resp_create_user_keys(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "title": "Private key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z' - 'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy' - 'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH' - '2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9' - 'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",' - '"created_at": "2014-08-01T14:47:39.080Z"}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "title": "Private key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z' + "szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy" + "Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH" + "2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9" + 'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",' + '"created_at": "2014-08-01T14:47:39.080Z"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) -''' +""" GROUP API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get") def resp_find_group(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' - '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' - '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' - '"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,' - '"full_name": "BarFoo Group", "full_path": "bar-foo",' - '"file_template_project_id": 1, "parent_id": null, "projects": []}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1, "name": "Foobar Group", "path": "foo-bar",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' + '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' + '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' + '"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,' + '"full_name": "BarFoo Group", "full_path": "bar-foo",' + '"file_template_project_id": 1, "parent_id": null, "projects": []}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get") def resp_get_group(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' - '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' - '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' - '"require_two_factor_authentication": true,' - '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}]}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "name": "Foobar Group", "path": "foo-bar",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' + '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' + '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' + '"require_two_factor_authentication": true,' + '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}]}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/foo-bar", method="get") def resp_get_group_by_name(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' - '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' - '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' - '"require_two_factor_authentication": true,' - '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}]}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "name": "Foobar Group", "path": "foo-bar",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' + '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' + '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' + '"require_two_factor_authentication": true,' + '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}]}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get") def resp_get_missing_group(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(404, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post") def resp_create_group(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' - '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' - '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"file_template_project_id": 1, "parent_id": null,' - '"project_creation_level": "developer", "subgroup_creation_level": "maintainer",' - '"require_two_factor_authentication": true}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "name": "Foobar Group", "path": "foo-bar",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' + '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' + '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"file_template_project_id": 1, "parent_id": null,' + '"project_creation_level": "developer", "subgroup_creation_level": "maintainer",' + '"require_two_factor_authentication": true}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post") def resp_create_subgroup(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",' - '"description": "An interesting group", "visibility": "public",' - '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' - '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,' - '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",' - '"file_template_project_id": 1, "parent_id": 1,' - '"project_creation_level": "noone",' - '"require_two_factor_authentication": true}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 2, "name": "BarFoo Group", "path": "bar-foor",' + '"description": "An interesting group", "visibility": "public",' + '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' + '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,' + '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",' + '"file_template_project_id": 1, "parent_id": 1,' + '"project_creation_level": "noone",' + '"require_two_factor_authentication": true}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete") def resp_delete_group(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens", method="get") def resp_list_group_access_tokens(url, request): - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} content = ( '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",' '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,' @@ -320,241 +341,269 @@ def resp_list_group_access_tokens(url, request): @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens", method="post") def resp_create_group_access_tokens(url, request): - headers = {'content-type': 'application/json'} - content = ('{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' - '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' - '"access_level": 40, "token": "Der423FErcdv35qEEWc"}') + headers = {"content-type": "application/json"} + content = ( + '{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' + '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' + '"access_level": 40, "token": "Der423FErcdv35qEEWc"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens/[0-9]+", method="delete") def resp_revoke_group_access_tokens(url, request): - headers = {'content-type': 'application/json'} - content = ('') + headers = {"content-type": "application/json"} + content = "" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) -''' +""" GROUP MEMBER API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get") def resp_get_member(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",' - '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' - '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",' + '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' + '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get") def resp_find_member(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",' - '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' - '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{' - '"id": 2, "username": "john_doe", "name": "John Doe","state": "active",' - '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' - '"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",' - '"access_level": 30}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",' + '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' + '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{' + '"id": 2, "username": "john_doe", "name": "John Doe","state": "active",' + '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' + '"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",' + '"access_level": 30}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post") def resp_add_member(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",' - '"state": "active",' - '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' - '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",' - '"access_level": 30}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",' + '"state": "active",' + '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' + '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",' + '"access_level": 30}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put") def resp_update_member(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",' - '"state": "active",' - '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' - '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",' - '"access_level": 10}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",' + '"state": "active",' + '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",' + '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",' + '"access_level": 10}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -''' +""" DEPLOY KEY API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get") def resp_find_project_deploy_key(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1,"title": "Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' - 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' - 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2013-10-02T11:12:29Z"}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1,"title": "Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' + 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' + 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2013-10-02T11:12:29Z"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get") def resp_get_project_deploy_key(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"title": "Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' - 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2013-10-02T10:12:29Z"}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"title": "Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' + 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2013-10-02T10:12:29Z"}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post") def resp_create_project_deploy_key(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"title": "Public key",' - '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' - 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' - '"created_at": "2013-10-02T10:12:29Z"}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"title": "Public key",' + '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc' + 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",' + '"created_at": "2013-10-02T10:12:29Z"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete") def resp_delete_project_deploy_key(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) -''' +""" PROJECT API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get") def resp_find_project(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get") def resp_get_project(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get") def resp_get_project_by_name(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get") def resp_find_group_project(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get") def resp_get_group_project(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post") def resp_create_project(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' - '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' - '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' - '"web_url": "http://example.com/diaspora/diaspora-client",' - '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' - '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' - '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' - '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' - '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' - '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' - '"star_count": 0}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",' + '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' + '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' + '"web_url": "http://example.com/diaspora/diaspora-client",' + '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",' + '"tag_list": ["example","disapora client"],"name": "Diaspora Client",' + '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",' + '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",' + '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,' + '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",' + '"star_count": 0}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete") def resp_delete_project(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) @@ -562,33 +611,35 @@ def resp_delete_project(url, request): @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") def resp_get_protected_branch(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1, "name": "master", "push_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' - '"merge_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' - '"allow_force_push":false, "code_owner_approval_required": false}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1, "name": "master", "push_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"merge_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"allow_force_push":false, "code_owner_approval_required": false}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") def resp_get_protected_branch_not_exist(url, request): - headers = {'content-type': 'application/json'} - content = ('') + headers = {"content-type": "application/json"} + content = "" content = content.encode("utf-8") return response(404, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="delete") def resp_delete_protected_branch(url, request): - headers = {'content-type': 'application/json'} - content = ('') + headers = {"content-type": "application/json"} + content = "" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens", method="get") def resp_list_project_access_tokens(url, request): - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} content = ( '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",' '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,' @@ -625,187 +676,219 @@ def resp_list_project_access_tokens(url, request): @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens", method="post") def resp_create_project_access_tokens(url, request): - headers = {'content-type': 'application/json'} - content = ('{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' - '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' - '"access_level": 40, "token": "Der423FErcdv35qEEWc"}') + headers = {"content-type": "application/json"} + content = ( + '{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",' + '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,' + '"access_level": 40, "token": "Der423FErcdv35qEEWc"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens/[0-9]+", method="delete") def resp_revoke_project_access_tokens(url, request): - headers = {'content-type': 'application/json'} - content = ('') + headers = {"content-type": "application/json"} + content = "" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) -''' +""" HOOK API -''' +""" @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get") def resp_find_project_hook(url, request): - headers = {'content-type': 'application/json'} - content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,' - '"push_events": true,"push_events_branch_filter": "","issues_events": true,' - '"confidential_issues_events": true,"merge_requests_events": true,' - '"tag_push_events": true,"note_events": true,"job_events": true,' - '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' - '"created_at": "2012-10-12T17:04:47Z"}]') + headers = {"content-type": "application/json"} + content = ( + '[{"id": 1,"url": "http://example.com/hook","project_id": 3,' + '"push_events": true,"push_events_branch_filter": "","issues_events": true,' + '"confidential_issues_events": true,"merge_requests_events": true,' + '"tag_push_events": true,"note_events": true,"job_events": true,' + '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' + '"created_at": "2012-10-12T17:04:47Z"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get") def resp_get_project_hook(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,' - '"push_events": true,"push_events_branch_filter": "","issues_events": true,' - '"confidential_issues_events": true,"merge_requests_events": true,' - '"tag_push_events": true,"note_events": true,"job_events": true,' - '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' - '"created_at": "2012-10-12T17:04:47Z"}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"url": "http://example.com/hook","project_id": 3,' + '"push_events": true,"push_events_branch_filter": "","issues_events": true,' + '"confidential_issues_events": true,"merge_requests_events": true,' + '"tag_push_events": true,"note_events": true,"job_events": true,' + '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' + '"created_at": "2012-10-12T17:04:47Z"}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post") def resp_create_project_hook(url, request): - headers = {'content-type': 'application/json'} - content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,' - '"push_events": true,"push_events_branch_filter": "","issues_events": true,' - '"confidential_issues_events": true,"merge_requests_events": true,' - '"tag_push_events": true,"note_events": true,"job_events": true,' - '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' - '"created_at": "2012-10-12T17:04:47Z"}') + headers = {"content-type": "application/json"} + content = ( + '{"id": 1,"url": "http://example.com/hook","project_id": 3,' + '"push_events": true,"push_events_branch_filter": "","issues_events": true,' + '"confidential_issues_events": true,"merge_requests_events": true,' + '"tag_push_events": true,"note_events": true,"job_events": true,' + '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,' + '"created_at": "2012-10-12T17:04:47Z"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) @urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete") def resp_delete_project_hook(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) -''' +""" RUNNER API -''' +""" -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/all$', method="get") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners/all$", method="get") def resp_find_runners_all(url, request): - headers = {'content-type': 'application/json', - "X-Page": 1, - "X-Next-Page": 2, - "X-Per-Page": 1, - "X-Total-Pages": 1, - "X-Total": 2} - content = ('[{"active": true,"description": "test-1-20150125","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"},{"active": true,' - '"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",' - '"is_shared": false,"name": null,"online": false,"status": "offline"}]') + headers = { + "content-type": "application/json", + "X-Page": 1, + "X-Next-Page": 2, + "X-Per-Page": 1, + "X-Total-Pages": 1, + "X-Total": 2, + } + content = ( + '[{"active": true,"description": "test-1-20150125","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"},{"active": true,' + '"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",' + '"is_shared": false,"name": null,"online": false,"status": "offline"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners$', method="get") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners$", method="get") def resp_find_runners_list(url, request): - headers = {'content-type': 'application/json', - "X-Page": 1, - "X-Next-Page": 2, - "X-Per-Page": 1, - "X-Total-Pages": 1, - "X-Total": 2} - content = ('[{"active": true,"description": "test-1-20201214","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"},{"active": true,' - '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",' - '"is_shared": false,"name": null,"online": false,"status": "offline"}]') + headers = { + "content-type": "application/json", + "X-Page": 1, + "X-Next-Page": 2, + "X-Per-Page": 1, + "X-Total-Pages": 1, + "X-Total": 2, + } + content = ( + '[{"active": true,"description": "test-1-20201214","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"},{"active": true,' + '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",' + '"is_shared": false,"name": null,"online": false,"status": "offline"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/projects/1/runners$', method="get") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/projects/1/runners$", method="get") def resp_find_project_runners(url, request): - headers = {'content-type': 'application/json', - "X-Page": 1, - "X-Next-Page": 2, - "X-Per-Page": 1, - "X-Total-Pages": 1, - "X-Total": 2} - content = ('[{"active": true,"description": "test-1-20220210","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"},{"active": true,' - '"description": "test-2-20220210","id": 2,"ip_address": "127.0.0.1",' - '"is_shared": false,"name": null,"online": false,"status": "offline"}]') + headers = { + "content-type": "application/json", + "X-Page": 1, + "X-Next-Page": 2, + "X-Per-Page": 1, + "X-Total-Pages": 1, + "X-Total": 2, + } + content = ( + '[{"active": true,"description": "test-1-20220210","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"},{"active": true,' + '"description": "test-2-20220210","id": 2,"ip_address": "127.0.0.1",' + '"is_shared": false,"name": null,"online": false,"status": "offline"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/groups/1/runners$', method="get") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/groups/1/runners$", method="get") def resp_find_group_runners(url, request): - headers = {'content-type': 'application/json', - "X-Page": 1, - "X-Next-Page": 2, - "X-Per-Page": 1, - "X-Total-Pages": 1, - "X-Total": 2} - content = ('[{"active": true,"description": "test-3-20220210","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"},{"active": true,' - '"description": "test-4-20220210","id": 2,"ip_address": "127.0.0.1",' - '"is_shared": false,"name": null,"online": false,"status": "offline"}]') + headers = { + "content-type": "application/json", + "X-Page": 1, + "X-Next-Page": 2, + "X-Per-Page": 1, + "X-Total-Pages": 1, + "X-Total": 2, + } + content = ( + '[{"active": true,"description": "test-3-20220210","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"},{"active": true,' + '"description": "test-4-20220210","id": 2,"ip_address": "127.0.0.1",' + '"is_shared": false,"name": null,"online": false,"status": "offline"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="put") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners/1$", method="put") def resp_update_runner(url, request): - headers = {'content-type': 'application/json', - "X-Page": 1, - "X-Next-Page": 2, - "X-Per-Page": 1, - "X-Total-Pages": 1, - "X-Total": 2} - content = ('[{"active": true,"description": "test-1-20201214","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"},{"active": true,' - '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",' - '"is_shared": false,"name": null,"online": false,"status": "offline"}]') + headers = { + "content-type": "application/json", + "X-Page": 1, + "X-Next-Page": 2, + "X-Per-Page": 1, + "X-Total-Pages": 1, + "X-Total": 2, + } + content = ( + '[{"active": true,"description": "test-1-20201214","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"},{"active": true,' + '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",' + '"is_shared": false,"name": null,"online": false,"status": "offline"}]' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="get") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners/1$", method="get") def resp_get_runner(url, request): - headers = {'content-type': 'application/json'} - content = ('{"active": true,"description": "test-1-20150125","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"}') + headers = {"content-type": "application/json"} + content = ( + '{"active": true,"description": "test-1-20150125","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"}' + ) content = content.encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners$', method="post") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners$", method="post") def resp_create_runner(url, request): - headers = {'content-type': 'application/json'} - content = ('{"active": true,"description": "test-1-20150125","id": 1,' - '"is_shared": false,"ip_address": "127.0.0.1","name": null,' - '"online": true,"status": "online"}') + headers = {"content-type": "application/json"} + content = ( + '{"active": true,"description": "test-1-20150125","id": 1,' + '"is_shared": false,"ip_address": "127.0.0.1","name": null,' + '"online": true,"status": "online"}' + ) content = content.encode("utf-8") return response(201, content, headers, None, 5, request) -@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="delete") +@urlmatch(scheme="http", netloc="localhost", path=r"/api/v4/runners/1$", method="delete") def resp_delete_runner(url, request): - headers = {'content-type': 'application/json'} - content = ('{}') + headers = {"content-type": "application/json"} + content = "{}" content = content.encode("utf-8") return response(204, content, headers, None, 5, request) diff --git a/tests/unit/plugins/modules/hpe_test_utils.py b/tests/unit/plugins/modules/hpe_test_utils.py index 96bed5aa89d..883dc650ef4 100644 --- a/tests/unit/plugins/modules/hpe_test_utils.py +++ b/tests/unit/plugins/modules/hpe_test_utils.py @@ -17,23 +17,23 @@ class OneViewBaseTest: @pytest.fixture(autouse=True) def setUp(self, mock_ansible_module, mock_ov_client, request): - marker = request.node.get_marker('resource') + marker = request.node.get_marker("resource") self.resource = getattr(mock_ov_client, f"{marker.args}") self.mock_ov_client = mock_ov_client self.mock_ansible_module = mock_ansible_module @pytest.fixture def testing_module(self): - resource_name = type(self).__name__.replace('Test', '') - resource_module_path_name = resource_name.replace('Module', '') - resource_module_path_name = re.findall('[A-Z][^A-Z]*', resource_module_path_name) + resource_name = type(self).__name__.replace("Test", "") + resource_module_path_name = resource_name.replace("Module", "") + resource_module_path_name = re.findall("[A-Z][^A-Z]*", resource_module_path_name) resource_module_path_name = f"oneview_{str.join('_', resource_module_path_name).lower()}" - ansible_collections = __import__('ansible_collections') + ansible_collections = __import__("ansible_collections") oneview_module = ansible_collections.community.general.plugins.modules resource_module = getattr(oneview_module, resource_module_path_name) self.testing_class = getattr(resource_module, resource_name) - testing_module = self.testing_class.__module__.split('.')[-1] + testing_module = self.testing_class.__module__.split(".")[-1] testing_module = getattr(oneview_module, testing_module) try: # Load scenarios from module examples (Also checks if it is a valid yaml) @@ -45,9 +45,9 @@ def testing_module(self): return testing_module def test_main_function_should_call_run_method(self, testing_module, mock_ansible_module): - mock_ansible_module.params = {'config': 'config.json'} + mock_ansible_module.params = {"config": "config.json"} - main_func = getattr(testing_module, 'main') + main_func = getattr(testing_module, "main") with patch.object(self.testing_class, "run") as mock_run: main_func() @@ -59,28 +59,28 @@ def test_should_get_all_using_filters(self, testing_module): self.resource.get_all.return_value = [] params_get_all_with_filters = dict( - config='config.json', + config="config.json", name=None, params={ - 'start': 1, - 'count': 3, - 'sort': 'name:descending', - 'filter': 'purpose=General', - 'query': 'imported eq true' - }) + "start": 1, + "count": 3, + "sort": "name:descending", + "filter": "purpose=General", + "query": "imported eq true", + }, + ) self.mock_ansible_module.params = params_get_all_with_filters self.testing_class().run() - self.resource.get_all.assert_called_once_with(start=1, count=3, sort='name:descending', filter='purpose=General', query='imported eq true') + self.resource.get_all.assert_called_once_with( + start=1, count=3, sort="name:descending", filter="purpose=General", query="imported eq true" + ) def test_should_get_all_without_params(self, testing_module): self.resource.get_all.return_value = [] - params_get_all_with_filters = dict( - config='config.json', - name=None - ) + params_get_all_with_filters = dict(config="config.json", name=None) self.mock_ansible_module.params = params_get_all_with_filters self.testing_class().run() @@ -106,7 +106,7 @@ def configure_mocks(self, test_case, testing_class): self.testing_class = testing_class # Define OneView Client Mock (FILE) - patcher_json_file = patch.object(OneViewClient, 'from_json_file') + patcher_json_file = patch.object(OneViewClient, "from_json_file") test_case.addCleanup(patcher_json_file.stop) self.mock_ov_client_from_json_file = patcher_json_file.start() @@ -123,9 +123,9 @@ def configure_mocks(self, test_case, testing_class): self.__set_module_examples() def test_main_function_should_call_run_method(self): - self.mock_ansible_module.params = {'config': 'config.json'} + self.mock_ansible_module.params = {"config": "config.json"} - main_func = getattr(self.testing_module, 'main') + main_func = getattr(self.testing_module, "main") with patch.object(self.testing_class, "run") as mock_run: main_func() @@ -133,8 +133,8 @@ def test_main_function_should_call_run_method(self): def __set_module_examples(self): # Load scenarios from module examples (Also checks if it is a valid yaml) - ansible_collections = __import__('ansible_collections') - testing_module = self.testing_class.__module__.split('.')[-1] + ansible_collections = __import__("ansible_collections") + testing_module = self.testing_class.__module__.split(".")[-1] self.testing_module = getattr(ansible_collections.community.general.plugins.modules, testing_module) try: @@ -165,38 +165,37 @@ def __validations(self): if not self.resource_client: raise Exception( - "Mock for the client not configured, you must call 'configure_client_mock' before running this test.") + "Mock for the client not configured, you must call 'configure_client_mock' before running this test." + ) def test_should_get_all_using_filters(self): self.__validations() self.resource_client.get_all.return_value = [] params_get_all_with_filters = dict( - config='config.json', + config="config.json", name=None, params={ - 'start': 1, - 'count': 3, - 'sort': 'name:descending', - 'filter': 'purpose=General', - 'query': 'imported eq true' - }) + "start": 1, + "count": 3, + "sort": "name:descending", + "filter": "purpose=General", + "query": "imported eq true", + }, + ) self.mock_ansible_module.params = params_get_all_with_filters self.testing_class().run() - self.resource_client.get_all.assert_called_once_with(start=1, count=3, sort='name:descending', - filter='purpose=General', - query='imported eq true') + self.resource_client.get_all.assert_called_once_with( + start=1, count=3, sort="name:descending", filter="purpose=General", query="imported eq true" + ) def test_should_get_all_without_params(self): self.__validations() self.resource_client.get_all.return_value = [] - params_get_all_with_filters = dict( - config='config.json', - name=None - ) + params_get_all_with_filters = dict(config="config.json", name=None) self.mock_ansible_module.params = params_get_all_with_filters self.testing_class().run() diff --git a/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py b/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py index 4f98fbe0e93..bdcc09a7f10 100644 --- a/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py +++ b/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py @@ -38,8 +38,8 @@ def fail_json(self, msg): module = ModuleMocked() -fixture_path = os.path.join(os.path.dirname(__file__), 'interfaces_file_fixtures', 'input') -golden_output_path = os.path.join(os.path.dirname(__file__), 'interfaces_file_fixtures', 'golden_output') +fixture_path = os.path.join(os.path.dirname(__file__), "interfaces_file_fixtures", "input") +golden_output_path = os.path.join(os.path.dirname(__file__), "interfaces_file_fixtures", "golden_output") class TestInterfacesFileModule(unittest.TestCase): @@ -47,7 +47,7 @@ class TestInterfacesFileModule(unittest.TestCase): def getTestFiles(self, include_filter=None, exclude_filter=None): flist = next(os.walk(fixture_path))[2] - flist = [file for file in flist if not file.endswith('.license')] + flist = [file for file in flist if not file.endswith(".license")] if include_filter: flist = filter(lambda x: re.match(include_filter, x), flist) if exclude_filter: @@ -57,10 +57,9 @@ def getTestFiles(self, include_filter=None, exclude_filter=None): def compareFileToBackup(self, path, backup): with open(path) as f1: with open(backup) as f2: - diffs = difflib.context_diff(f1.readlines(), - f2.readlines(), - fromfile=os.path.basename(path), - tofile=os.path.basename(backup)) + diffs = difflib.context_diff( + f1.readlines(), f2.readlines(), fromfile=os.path.basename(path), tofile=os.path.basename(backup) + ) # Restore backup move(backup, path) deltas = list(diffs) @@ -69,37 +68,37 @@ def compareFileToBackup(self, path, backup): def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None): if not testname: testname = f"{path}.{inspect.stack()[1][3]}" - self.compareStringWithFile("".join([d['line'] for d in interfaces_lines if 'line' in d]), testname) + self.compareStringWithFile("".join([d["line"] for d in interfaces_lines if "line" in d]), testname) def compareInterfacesToFile(self, ifaces, path, testname=None): if not testname: testname = f"{path}.{inspect.stack()[1][3]}.json" testfilepath = os.path.join(golden_output_path, testname) - string = json.dumps(ifaces, sort_keys=True, indent=4, separators=(',', ': ')) - if string and not string.endswith('\n'): - string += '\n' + string = json.dumps(ifaces, sort_keys=True, indent=4, separators=(",", ": ")) + if string and not string.endswith("\n"): + string += "\n" goldenstring = string goldenData = ifaces if not os.path.isfile(testfilepath): - with io.open(testfilepath, 'wb') as f: + with io.open(testfilepath, "wb") as f: f.write(string.encode()) else: - with open(testfilepath, 'r') as goldenfile: + with open(testfilepath, "r") as goldenfile: goldenData = json.load(goldenfile) self.assertEqual(goldenData, ifaces) def compareStringWithFile(self, string, path): testfilepath = os.path.join(golden_output_path, path) - if string and not string.endswith('\n'): - string += '\n' + if string and not string.endswith("\n"): + string += "\n" goldenstring = string if not os.path.isfile(testfilepath): - f = io.open(testfilepath, 'wb') + f = io.open(testfilepath, "wb") f.write(string.encode()) f.close() else: - with open(testfilepath, 'r') as goldenfile: + with open(testfilepath, "r") as goldenfile: goldenstring = goldenfile.read() goldenfile.close() self.assertEqual(goldenstring, string) @@ -115,74 +114,74 @@ def test_add_up_option_to_aggi(self): testcases = { "add_aggi_up": [ { - 'iface': 'aggi', - 'option': 'up', - 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', - 'state': 'present', + "iface": "aggi", + "option": "up", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi", + "state": "present", } ], "add_and_delete_aggi_up": [ { - 'iface': 'aggi', - 'option': 'up', - 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', - 'state': 'present', + "iface": "aggi", + "option": "up", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi", + "state": "present", }, { - 'iface': 'aggi', - 'option': 'up', - 'value': None, - 'state': 'absent', + "iface": "aggi", + "option": "up", + "value": None, + "state": "absent", }, ], "add_aggi_up_twice": [ { - 'iface': 'aggi', - 'option': 'up', - 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', - 'state': 'present', + "iface": "aggi", + "option": "up", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi", + "state": "present", }, { - 'iface': 'aggi', - 'option': 'up', - 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', - 'state': 'present', + "iface": "aggi", + "option": "up", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi", + "state": "present", }, ], "aggi_remove_dup": [ { - 'iface': 'aggi', - 'option': 'up', - 'value': None, - 'state': 'absent', + "iface": "aggi", + "option": "up", + "value": None, + "state": "absent", }, { - 'iface': 'aggi', - 'option': 'up', - 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi', - 'state': 'present', + "iface": "aggi", + "option": "up", + "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi", + "state": "present", }, ], "set_aggi_slaves": [ { - 'iface': 'aggi', - 'option': 'slaves', - 'value': 'int1 int3', - 'state': 'present', + "iface": "aggi", + "option": "slaves", + "value": "int1 int3", + "state": "present", }, ], "set_aggi_and_eth0_mtu": [ { - 'iface': 'aggi', - 'option': 'mtu', - 'value': '1350', - 'state': 'present', + "iface": "aggi", + "option": "mtu", + "value": "1350", + "state": "present", }, { - 'iface': 'eth0', - 'option': 'mtu', - 'value': '1350', - 'state': 'present', + "iface": "eth0", + "option": "mtu", + "value": "1350", + "state": "present", }, ], } @@ -193,12 +192,16 @@ def test_add_up_option_to_aggi(self): fail_json_iterations = [] for i, options in enumerate(options_list): try: - dummy, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'], - options['value'], options['state']) + dummy, lines = interfaces_file.set_interface_option( + module, lines, options["iface"], options["option"], options["value"], options["state"] + ) except AnsibleFailJson as e: fail_json_iterations.append( - f"[{i}] fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}") - self.compareStringWithFile("\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt") + f"[{i}] fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}" + ) + self.compareStringWithFile( + "\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt" + ) self.compareInterfacesLinesToFile(lines, testfile, f"{testfile}_{testname}") self.compareInterfacesToFile(ifaces, testfile, f"{testfile}_{testname}.json") @@ -207,9 +210,9 @@ def test_revert(self): testcases = { "revert": [ { - 'iface': 'eth0', - 'option': 'mtu', - 'value': '1350', + "iface": "eth0", + "option": "mtu", + "value": "1350", } ], } @@ -222,18 +225,22 @@ def test_revert(self): lines, ifaces = interfaces_file.read_interfaces_file(module, path) backupp = module.backup_local(path) options = options_list[0] - for state in ['present', 'absent']: + for state in ["present", "absent"]: fail_json_iterations = [] - options['state'] = state + options["state"] = state try: - dummy, lines = interfaces_file.set_interface_option(module, lines, - options['iface'], options['option'], options['value'], options['state']) + dummy, lines = interfaces_file.set_interface_option( + module, lines, options["iface"], options["option"], options["value"], options["state"] + ) except AnsibleFailJson as e: fail_json_iterations.append( - f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}") - interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}" + ) + interfaces_file.write_changes(module, [d["line"] for d in lines if "line" in d], path) - self.compareStringWithFile("\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt") + self.compareStringWithFile( + "\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt" + ) self.compareInterfacesLinesToFile(lines, testfile, f"{testfile}_{testname}") self.compareInterfacesToFile(ifaces, testfile, f"{testfile}_{testname}.json") @@ -245,10 +252,10 @@ def test_change_method(self): testcases = { "change_method": [ { - 'iface': 'eth1', - 'option': 'method', - 'value': 'dhcp', - 'state': 'present', + "iface": "eth1", + "option": "method", + "value": "dhcp", + "state": "present", } ], } @@ -263,20 +270,27 @@ def test_change_method(self): options = options_list[0] fail_json_iterations = [] try: - changed, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'], - options['value'], options['state']) + changed, lines = interfaces_file.set_interface_option( + module, lines, options["iface"], options["option"], options["value"], options["state"] + ) # When a changed is made try running it again for proper idempotency if changed: - changed_again, lines = interfaces_file.set_interface_option(module, lines, options['iface'], - options['option'], options['value'], options['state']) - self.assertFalse(changed_again, - msg=f'Second request for change should return false for {testname} running on {testfile}') + changed_again, lines = interfaces_file.set_interface_option( + module, lines, options["iface"], options["option"], options["value"], options["state"] + ) + self.assertFalse( + changed_again, + msg=f"Second request for change should return false for {testname} running on {testfile}", + ) except AnsibleFailJson as e: fail_json_iterations.append( - f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}") - interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}" + ) + interfaces_file.write_changes(module, [d["line"] for d in lines if "line" in d], path) - self.compareStringWithFile("\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt") + self.compareStringWithFile( + "\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt" + ) self.compareInterfacesLinesToFile(lines, testfile, f"{testfile}_{testname}") self.compareInterfacesToFile(ifaces, testfile, f"{testfile}_{testname}.json") @@ -312,8 +326,8 @@ def test_get_interface_options(self): "netmask": "", "post-up": [], "pre-up": [], - "up": [] - } + "up": [], + }, }, { "address_family": "inet", @@ -321,7 +335,7 @@ def test_get_interface_options(self): "line": " address 1.2.3.5", "line_type": "option", "option": "address", - "value": "1.2.3.5" + "value": "1.2.3.5", }, { "address_family": "inet", @@ -329,7 +343,7 @@ def test_get_interface_options(self): "line": " netmask 255.255.255.0", "line_type": "option", "option": "netmask", - "value": "255.255.255.0" + "value": "255.255.255.0", }, { "address_family": "inet", @@ -337,8 +351,8 @@ def test_get_interface_options(self): "line": " gateway 1.2.3.1", "line_type": "option", "option": "gateway", - "value": "1.2.3.1" - } + "value": "1.2.3.1", + }, ], "iface_options": [ { @@ -347,7 +361,7 @@ def test_get_interface_options(self): "line": " address 1.2.3.5", "line_type": "option", "option": "address", - "value": "1.2.3.5" + "value": "1.2.3.5", }, { "address_family": "inet", @@ -355,7 +369,7 @@ def test_get_interface_options(self): "line": " netmask 255.255.255.0", "line_type": "option", "option": "netmask", - "value": "255.255.255.0" + "value": "255.255.255.0", }, { "address_family": "inet", @@ -363,9 +377,9 @@ def test_get_interface_options(self): "line": " gateway 1.2.3.1", "line_type": "option", "option": "gateway", - "value": "1.2.3.1" - } - ] + "value": "1.2.3.1", + }, + ], }, } @@ -383,7 +397,7 @@ def test_get_interface_options_2(self): "line": " address 1.2.3.5", "line_type": "option", "option": "address", - "value": "1.2.3.5" + "value": "1.2.3.5", }, { "address_family": "inet", @@ -391,7 +405,7 @@ def test_get_interface_options_2(self): "line": " netmask 255.255.255.0", "line_type": "option", "option": "netmask", - "value": "255.255.255.0" + "value": "255.255.255.0", }, { "address_family": "inet", @@ -399,8 +413,8 @@ def test_get_interface_options_2(self): "line": " gateway 1.2.3.1", "line_type": "option", "option": "gateway", - "value": "1.2.3.1" - } + "value": "1.2.3.1", + }, ], "target_options": [ { @@ -409,15 +423,17 @@ def test_get_interface_options_2(self): "line": " address 1.2.3.5", "line_type": "option", "option": "address", - "value": "1.2.3.5" + "value": "1.2.3.5", } ], - "option": "address" + "option": "address", }, } for testname in testcases.keys(): - target_options = interfaces_file.get_target_options(testcases[testname]["iface_options"], testcases[testname]["option"]) + target_options = interfaces_file.get_target_options( + testcases[testname]["iface_options"], testcases[testname]["option"] + ) self.assertEqual(testcases[testname]["target_options"], target_options) def test_update_existing_option_line(self): @@ -429,7 +445,7 @@ def test_update_existing_option_line(self): "line": " address 1.2.3.5", "line_type": "option", "option": "address", - "value": "1.2.3.5" + "value": "1.2.3.5", }, "value": "1.2.3.4", "result": " address 1.2.3.4", @@ -437,7 +453,9 @@ def test_update_existing_option_line(self): } for testname in testcases.keys(): - updated = interfaces_file.update_existing_option_line(testcases[testname]["target_option"], testcases[testname]["value"]) + updated = interfaces_file.update_existing_option_line( + testcases[testname]["target_option"], testcases[testname]["value"] + ) self.assertEqual(testcases[testname]["result"], updated) def test_predefined(self): @@ -453,7 +471,7 @@ def test_predefined(self): "iface": "eno1", "option": "address", "value": "1.2.3.5", - 'state': 'present', + "state": "present", }, "result_lines": [ "iface eno1 inet static", @@ -467,65 +485,71 @@ def test_predefined(self): for testname in testcases.keys(): lines, ifaces = interfaces_file.read_interfaces_lines(module, testcases[testname]["source_lines"]) - changed, lines = interfaces_file.set_interface_option(module, lines, testcases[testname]["input"]['iface'], testcases[testname]["input"]['option'], - testcases[testname]["input"]['value'], testcases[testname]["input"]['state']) - self.assertEqual(testcases[testname]["result_lines"], [d['line'] for d in lines if 'line' in d]) - assert testcases[testname]['changed'] == changed + changed, lines = interfaces_file.set_interface_option( + module, + lines, + testcases[testname]["input"]["iface"], + testcases[testname]["input"]["option"], + testcases[testname]["input"]["value"], + testcases[testname]["input"]["state"], + ) + self.assertEqual(testcases[testname]["result_lines"], [d["line"] for d in lines if "line" in d]) + assert testcases[testname]["changed"] == changed def test_inet_inet6(self): testcases = { "change_ipv4": [ { - 'iface': 'eth0', - 'address_family': 'inet', - 'option': 'address', - 'value': '192.168.0.42', - 'state': 'present', + "iface": "eth0", + "address_family": "inet", + "option": "address", + "value": "192.168.0.42", + "state": "present", } ], "change_ipv6": [ { - 'iface': 'eth0', - 'address_family': 'inet6', - 'option': 'address', - 'value': 'fc00::42', - 'state': 'present', + "iface": "eth0", + "address_family": "inet6", + "option": "address", + "value": "fc00::42", + "state": "present", } ], "change_ipv4_pre_up": [ { - 'iface': 'eth0', - 'address_family': 'inet', - 'option': 'pre-up', - 'value': 'XXXX_ipv4', - 'state': 'present', + "iface": "eth0", + "address_family": "inet", + "option": "pre-up", + "value": "XXXX_ipv4", + "state": "present", } ], "change_ipv6_pre_up": [ { - 'iface': 'eth0', - 'address_family': 'inet6', - 'option': 'pre-up', - 'value': 'XXXX_ipv6', - 'state': 'present', + "iface": "eth0", + "address_family": "inet6", + "option": "pre-up", + "value": "XXXX_ipv6", + "state": "present", } ], "change_ipv4_post_up": [ { - 'iface': 'eth0', - 'address_family': 'inet', - 'option': 'post-up', - 'value': 'XXXX_ipv4', - 'state': 'present', + "iface": "eth0", + "address_family": "inet", + "option": "post-up", + "value": "XXXX_ipv4", + "state": "present", } ], "change_ipv6_post_up": [ { - 'iface': 'eth0', - 'address_family': 'inet6', - 'option': 'post-up', - 'value': 'XXXX_ipv6', - 'state': 'present', + "iface": "eth0", + "address_family": "inet6", + "option": "post-up", + "value": "XXXX_ipv6", + "state": "present", } ], } @@ -540,14 +564,24 @@ def test_inet_inet6(self): options = options_list[0] fail_json_iterations = [] try: - dummy, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'], - options['value'], options['state'], options['address_family']) + dummy, lines = interfaces_file.set_interface_option( + module, + lines, + options["iface"], + options["option"], + options["value"], + options["state"], + options["address_family"], + ) except AnsibleFailJson as e: fail_json_iterations.append( - f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}") - interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path) + f"fail_json message: {e!s}\noptions:\n{json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))}" + ) + interfaces_file.write_changes(module, [d["line"] for d in lines if "line" in d], path) - self.compareStringWithFile("\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt") + self.compareStringWithFile( + "\n=====\n".join(fail_json_iterations), f"{testfile}_{testname}.exceptions.txt" + ) self.compareInterfacesLinesToFile(lines, testfile, f"{testfile}_{testname}") self.compareInterfacesToFile(ifaces, testfile, f"{testfile}_{testname}.json") diff --git a/tests/unit/plugins/modules/linode_conftest.py b/tests/unit/plugins/modules/linode_conftest.py index 4813c9a083f..97d9ea40292 100644 --- a/tests/unit/plugins/modules/linode_conftest.py +++ b/tests/unit/plugins/modules/linode_conftest.py @@ -9,78 +9,67 @@ @pytest.fixture def api_key(monkeypatch): - monkeypatch.setenv('LINODE_API_KEY', 'foobar') + monkeypatch.setenv("LINODE_API_KEY", "foobar") @pytest.fixture def auth(monkeypatch): def patched_test_echo(dummy): return [] - monkeypatch.setattr('linode.api.Api.test_echo', patched_test_echo) + + monkeypatch.setattr("linode.api.Api.test_echo", patched_test_echo) @pytest.fixture def access_token(monkeypatch): - monkeypatch.setenv('LINODE_ACCESS_TOKEN', 'barfoo') + monkeypatch.setenv("LINODE_ACCESS_TOKEN", "barfoo") @pytest.fixture def no_access_token_in_env(monkeypatch): try: - monkeypatch.delenv('LINODE_ACCESS_TOKEN') + monkeypatch.delenv("LINODE_ACCESS_TOKEN") except KeyError: pass @pytest.fixture def default_args(): - return {'state': 'present', 'label': 'foo'} + return {"state": "present", "label": "foo"} @pytest.fixture def mock_linode(): - class Linode(): + class Linode: def delete(self, *args, **kwargs): pass @property def _raw_json(self): return { - "alerts": { - "cpu": 90, - "io": 10000, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80 - }, + "alerts": {"cpu": 90, "io": 10000, "network_in": 10, "network_out": 10, "transfer_quota": 80}, "backups": { "enabled": False, "schedule": { "day": None, "window": None, - } + }, }, "created": "2018-09-26T08:12:33", "group": "Foobar Group", "hypervisor": "kvm", "id": 10480444, "image": "linode/centos7", - "ipv4": [ - "130.132.285.233" - ], + "ipv4": ["130.132.285.233"], "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", "label": "lin-foo", "region": "eu-west", - "specs": { - "disk": 25600, - "memory": 1024, - "transfer": 1000, - "vcpus": 1 - }, + "specs": {"disk": 25600, "memory": 1024, "transfer": 1000, "vcpus": 1}, "status": "running", "tags": [], "type": "g6-nanode-1", "updated": "2018-09-26T10:10:14", - "watchdog_enabled": True + "watchdog_enabled": True, } + return Linode() diff --git a/tests/unit/plugins/modules/oneview_conftest.py b/tests/unit/plugins/modules/oneview_conftest.py index 85e8eedaec5..a0598239b21 100644 --- a/tests/unit/plugins/modules/oneview_conftest.py +++ b/tests/unit/plugins/modules/oneview_conftest.py @@ -13,7 +13,7 @@ @pytest.fixture def mock_ov_client(): - patcher_json_file = patch.object(OneViewClient, 'from_json_file') + patcher_json_file = patch.object(OneViewClient, "from_json_file") client = patcher_json_file.start() return client.return_value diff --git a/tests/unit/plugins/modules/oneview_module_loader.py b/tests/unit/plugins/modules/oneview_module_loader.py index c46306ede4e..f979424be08 100644 --- a/tests/unit/plugins/modules/oneview_module_loader.py +++ b/tests/unit/plugins/modules/oneview_module_loader.py @@ -9,11 +9,11 @@ # FIXME: These should be done inside of a fixture so that they're only mocked during # these unittests -if 'hpOneView' not in sys.modules: - sys.modules['hpOneView'] = Mock() - sys.modules['hpOneView.oneview_client'] = Mock() +if "hpOneView" not in sys.modules: + sys.modules["hpOneView"] = Mock() + sys.modules["hpOneView.oneview_client"] = Mock() -ONEVIEW_MODULE_UTILS_PATH = 'ansible_collections.community.general.plugins.module_utils.oneview' +ONEVIEW_MODULE_UTILS_PATH = "ansible_collections.community.general.plugins.module_utils.oneview" from ansible_collections.community.general.plugins.module_utils.oneview import ( # noqa: F401, pylint: disable=unused-import OneViewModuleException, OneViewModuleTaskError, diff --git a/tests/unit/plugins/modules/test_alerta_customer.py b/tests/unit/plugins/modules/test_alerta_customer.py index e68ebe5b4e5..94f141847a9 100644 --- a/tests/unit/plugins/modules/test_alerta_customer.py +++ b/tests/unit/plugins/modules/test_alerta_customer.py @@ -9,7 +9,12 @@ import pytest from ansible_collections.community.general.plugins.modules import alerta_customer -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class MockedReponse: @@ -21,53 +26,62 @@ def read(self): def customer_response_page1(): - server_response = json.dumps({"customers": [ + server_response = json.dumps( { - "customer": "admin", - "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616", - "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616", - "match": "admin@example.com" - }, - { - "customer": "Developer", - "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c", - "id": "188ed093-84cc-4f46-bf80-4c9127180d9c", - "match": "dev@example.com" - }], - "more": True, - "page": 1, - "pageSize": 50, - "pages": 1, - "status": "ok", - "total": 2}) + "customers": [ + { + "customer": "admin", + "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616", + "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616", + "match": "admin@example.com", + }, + { + "customer": "Developer", + "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c", + "id": "188ed093-84cc-4f46-bf80-4c9127180d9c", + "match": "dev@example.com", + }, + ], + "more": True, + "page": 1, + "pageSize": 50, + "pages": 1, + "status": "ok", + "total": 2, + } + ) return (MockedReponse(server_response), {"status": 200}) def customer_response_page2(): - server_response = json.dumps({"customers": [ - { - "customer": "admin", - "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616", - "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616", - "match": "admin@example.com" - }, + server_response = json.dumps( { - "customer": "Developer", - "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c", - "id": "188ed093-84cc-4f46-bf80-4c9127180d9c", - "match": "dev@example.com" - }], - "more": True, - "page": 2, - "pageSize": 50, - "pages": 2, - "status": "ok", - "total": 52}) + "customers": [ + { + "customer": "admin", + "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616", + "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616", + "match": "admin@example.com", + }, + { + "customer": "Developer", + "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c", + "id": "188ed093-84cc-4f46-bf80-4c9127180d9c", + "match": "dev@example.com", + }, + ], + "more": True, + "page": 2, + "pageSize": 50, + "pages": 2, + "status": "ok", + "total": 52, + } + ) return (MockedReponse(server_response), {"status": 200}) class TestAlertaCustomerModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = alerta_customer @@ -87,23 +101,23 @@ def test_without_parameters(self): def test_without_content(self): """Failure if customer and match are missing""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password" - }): + with set_module_args( + {"alerta_url": "http://localhost:8080", "api_username": "admin@example.com", "api_password": "password"} + ): with self.assertRaises(AnsibleFailJson): self.module.main() def test_successful_existing_customer_creation(self): """Test the customer creation (already exists).""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page1() with self.assertRaises(AnsibleExitJson): @@ -112,97 +126,118 @@ def test_successful_existing_customer_creation(self): def test_successful_customer_creation(self): """Test the customer creation.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev2@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev2@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page1() with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['match'] == "dev2@example.com" - assert call_data['customer'] == "Developer" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["match"] == "dev2@example.com" + assert call_data["customer"] == "Developer" def test_successful_customer_creation_key(self): """Test the customer creation using api_key.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_key': "demo-key", - 'customer': 'Developer', - 'match': 'dev2@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_key": "demo-key", + "customer": "Developer", + "match": "dev2@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page1() with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['match'] == "dev2@example.com" - assert call_data['customer'] == "Developer" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["match"] == "dev2@example.com" + assert call_data["customer"] == "Developer" def test_failed_not_found(self): """Test failure with wrong URL.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080/s", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080/s", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'Not found for request GET on http://localhost:8080/a/api/customers'}) + fetch_url_mock.return_value = ( + None, + {"status": 404, "msg": "Not found for request GET on http://localhost:8080/a/api/customers"}, + ) with self.assertRaises(AnsibleFailJson): self.module.main() def test_failed_forbidden(self): """Test failure with wrong user.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "dev@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "dev@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 403, 'msg': 'Permission Denied for GET on http://localhost:8080/api/customers'}) + fetch_url_mock.return_value = ( + None, + {"status": 403, "msg": "Permission Denied for GET on http://localhost:8080/api/customers"}, + ) with self.assertRaises(AnsibleFailJson): self.module.main() def test_failed_unauthorized(self): """Test failure with wrong username or password.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password_wrong", - 'customer': 'Developer', - 'match': 'dev@example.com' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password_wrong", + "customer": "Developer", + "match": "dev@example.com", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 401, 'msg': 'Unauthorized to request GET on http://localhost:8080/api/customers'}) + fetch_url_mock.return_value = ( + None, + {"status": 401, "msg": "Unauthorized to request GET on http://localhost:8080/api/customers"}, + ) with self.assertRaises(AnsibleFailJson): self.module.main() def test_successful_customer_deletion(self): """Test the customer deletion.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev@example.com', - 'state': 'absent' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev@example.com", + "state": "absent", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page1() with self.assertRaises(AnsibleExitJson): @@ -211,14 +246,16 @@ def test_successful_customer_deletion(self): def test_successful_customer_deletion_page2(self): """Test the customer deletion on the second page.""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Developer', - 'match': 'dev@example.com', - 'state': 'absent' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Developer", + "match": "dev@example.com", + "state": "absent", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page2() with self.assertRaises(AnsibleExitJson): @@ -227,14 +264,16 @@ def test_successful_customer_deletion_page2(self): def test_successful_nonexisting_customer_deletion(self): """Test the customer deletion (non existing).""" - with set_module_args({ - 'alerta_url': "http://localhost:8080", - 'api_username': "admin@example.com", - 'api_password': "password", - 'customer': 'Billing', - 'match': 'dev@example.com', - 'state': 'absent' - }): + with set_module_args( + { + "alerta_url": "http://localhost:8080", + "api_username": "admin@example.com", + "api_password": "password", + "customer": "Billing", + "match": "dev@example.com", + "state": "absent", + } + ): with patch.object(alerta_customer, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = customer_response_page1() with self.assertRaises(AnsibleExitJson): diff --git a/tests/unit/plugins/modules/test_apache2_module.py b/tests/unit/plugins/modules/test_apache2_module.py index ec549377c14..dc655743a8e 100644 --- a/tests/unit/plugins/modules/test_apache2_module.py +++ b/tests/unit/plugins/modules/test_apache2_module.py @@ -9,11 +9,11 @@ from ansible_collections.community.general.plugins.modules.apache2_module import create_apache_identifier REPLACEMENTS = [ - ('php7.1', 'php7_module'), - ('php5.6', 'php5_module'), - ('shib2', 'mod_shib'), - ('evasive', 'evasive20_module'), - ('thismoduledoesnotexist', 'thismoduledoesnotexist_module'), # the default + ("php7.1", "php7_module"), + ("php5.6", "php5_module"), + ("shib2", "mod_shib"), + ("evasive", "evasive20_module"), + ("thismoduledoesnotexist", "thismoduledoesnotexist_module"), # the default ] diff --git a/tests/unit/plugins/modules/test_apk.py b/tests/unit/plugins/modules/test_apk.py index 25ba0fce229..46a4aacb54f 100644 --- a/tests/unit/plugins/modules/test_apk.py +++ b/tests/unit/plugins/modules/test_apk.py @@ -11,14 +11,13 @@ class TestApkQueryLatest(unittest.TestCase): - def setUp(self): self.module_names = [ - 'bash', - 'g++', + "bash", + "g++", ] - @mock.patch('ansible_collections.community.general.plugins.modules.apk.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.apk.AnsibleModule") def test_not_latest(self, mock_module): apk.APK_PATH = [""] for module_name in self.module_names: @@ -27,7 +26,7 @@ def test_not_latest(self, mock_module): command_result = apk.query_latest(mock_module, module_name) self.assertFalse(command_result) - @mock.patch('ansible_collections.community.general.plugins.modules.apk.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.apk.AnsibleModule") def test_latest(self, mock_module): apk.APK_PATH = [""] for module_name in self.module_names: diff --git a/tests/unit/plugins/modules/test_archive.py b/tests/unit/plugins/modules/test_archive.py index b4656824dab..510ef95b91b 100644 --- a/tests/unit/plugins/modules/test_archive.py +++ b/tests/unit/plugins/modules/test_archive.py @@ -8,7 +8,10 @@ from unittest.mock import Mock, patch from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules.archive import get_archive, common_path @@ -16,29 +19,23 @@ class TestArchive(ModuleTestCase): def setUp(self): super().setUp() - self.mock_os_path_isdir = patch('os.path.isdir') + self.mock_os_path_isdir = patch("os.path.isdir") self.os_path_isdir = self.mock_os_path_isdir.start() def tearDown(self): self.os_path_isdir = self.mock_os_path_isdir.stop() def test_archive_removal_safety(self): - with set_module_args( - dict( - path=['/foo', '/bar', '/baz'], - dest='/foo/destination.tgz', - remove=True - ) - ): + with set_module_args(dict(path=["/foo", "/bar", "/baz"], dest="/foo/destination.tgz", remove=True)): module = AnsibleModule( argument_spec=dict( - path=dict(type='list', elements='path', required=True), - format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), - dest=dict(type='path'), - exclude_path=dict(type='list', elements='path', default=[]), - exclusion_patterns=dict(type='list', elements='path'), - force_archive=dict(type='bool', default=False), - remove=dict(type='bool', default=False), + path=dict(type="list", elements="path", required=True), + format=dict(type="str", default="gz", choices=["bz2", "gz", "tar", "xz", "zip"]), + dest=dict(type="path"), + exclude_path=dict(type="list", elements="path", default=[]), + exclusion_patterns=dict(type="list", elements="path"), + force_archive=dict(type="bool", default=False), + remove=dict(type="bool", default=False), ), add_file_common_args=True, supports_check_mode=True, @@ -51,19 +48,19 @@ def test_archive_removal_safety(self): archive = get_archive(module) module.fail_json.assert_called_once_with( - path=b', '.join(archive.paths), - msg='Error, created archive can not be contained in source paths when remove=true' + path=b", ".join(archive.paths), + msg="Error, created archive can not be contained in source paths when remove=true", ) PATHS: tuple[tuple[list[str | bytes], str | bytes], ...] = ( - ([], ''), - (['/'], '/'), - ([b'/'], b'/'), - (['/foo', '/bar', '/baz', '/foobar', '/barbaz', '/foo/bar'], '/'), - ([b'/foo', b'/bar', b'/baz', b'/foobar', b'/barbaz', b'/foo/bar'], b'/'), - (['/foo/bar/baz', '/foo/bar'], '/foo/'), - (['/foo/bar/baz', '/foo/bar/'], '/foo/bar/'), + ([], ""), + (["/"], "/"), + ([b"/"], b"/"), + (["/foo", "/bar", "/baz", "/foobar", "/barbaz", "/foo/bar"], "/"), + ([b"/foo", b"/bar", b"/baz", b"/foobar", b"/barbaz", b"/foo/bar"], b"/"), + (["/foo/bar/baz", "/foo/bar"], "/foo/"), + (["/foo/bar/baz", "/foo/bar/"], "/foo/bar/"), ) diff --git a/tests/unit/plugins/modules/test_bitbucket_access_key.py b/tests/unit/plugins/modules/test_bitbucket_access_key.py index bbf478c6fc8..58cafa5981e 100644 --- a/tests/unit/plugins/modules/test_bitbucket_access_key.py +++ b/tests/unit/plugins/modules/test_bitbucket_access_key.py @@ -9,7 +9,12 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_access_key -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class TestBucketAccessKeyModule(ModuleTestCase): @@ -19,325 +24,313 @@ def setUp(self): def test_missing_key_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'label': 'key name', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "label": "key name", + "state": "present", + } + ): self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key']) + self.assertEqual(exec_info.exception.args[0]["msg"], self.module.error_messages["required_key"]) - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None) + @patch.object(bitbucket_access_key, "get_existing_deploy_key", return_value=None) def test_create_deploy_key(self, *args): - with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: + with patch.object(self.module, "create_deploy_key") as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'user': 'ABC', - 'password': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'key': 'public_key', - 'label': 'key name', - 'state': 'present', - }): + with set_module_args( + { + "user": "ABC", + "password": "XXX", + "workspace": "name", + "repository": "repo", + "key": "public_key", + "label": "key name", + "state": "present", + } + ): self.module.main() self.assertEqual(create_deploy_key_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_access_key, "get_existing_deploy_key", return_value=None) def test_create_deploy_key_check_mode(self, *args): - with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: + with patch.object(self.module, "create_deploy_key") as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'key': 'public_key', - 'label': 'key name', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "key": "public_key", + "label": "key name", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(create_deploy_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={ - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" - }, - "html": { - "href": "https://bitbucket.org/mleu/test" + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_access_key, + "get_existing_deploy_key", + return_value={ + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"}, + "html": {"href": "https://bitbucket.org/mleu/test"}, + "avatar": {"href": "..."}, }, - "avatar": { - "href": "..." - } + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}", }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" - }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } + "links": {"self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"}}, }, - }) + ) def test_update_deploy_key(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: - with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: + with patch.object(self.module, "create_deploy_key") as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'key': 'new public key', - 'label': 'mykey', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "key": "new public key", + "label": "mykey", + "state": "present", + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 1) self.assertEqual(create_deploy_key_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={ - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "new public key", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_access_key, + "get_existing_deploy_key", + return_value={ + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "new public key", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"}, + "html": {"href": "https://bitbucket.org/mleu/test"}, + "avatar": {"href": "..."}, }, - "html": { - "href": "https://bitbucket.org/mleu/test" - }, - "avatar": { - "href": "..." - } + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}", }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" + "links": {"self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"}}, }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } - }, - }) + ) def test_dont_update_same_value(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: - with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: + with patch.object(self.module, "create_deploy_key") as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'key': 'new public key', - 'label': 'mykey', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "key": "new public key", + "label": "mykey", + "state": "present", + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(create_deploy_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) + self.assertEqual(exec_info.exception.args[0]["changed"], False) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={ - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" - }, - "html": { - "href": "https://bitbucket.org/mleu/test" + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_access_key, + "get_existing_deploy_key", + return_value={ + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"}, + "html": {"href": "https://bitbucket.org/mleu/test"}, + "avatar": {"href": "..."}, }, - "avatar": { - "href": "..." - } + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}", }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" - }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } + "links": {"self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"}}, }, - }) + ) def test_update_deploy_key_check_mode(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: - with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: + with patch.object(self.module, "create_deploy_key") as create_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'key': 'new public key', - 'label': 'mykey', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "key": "new public key", + "label": "mykey", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) self.assertEqual(create_deploy_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={ - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_access_key, + "get_existing_deploy_key", + return_value={ + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"}, + "html": {"href": "https://bitbucket.org/mleu/test"}, + "avatar": {"href": "..."}, }, - "html": { - "href": "https://bitbucket.org/mleu/test" - }, - "avatar": { - "href": "..." - } + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}", }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" + "links": {"self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"}}, }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } - }, - }) + ) def test_delete_deploy_key(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'label': 'mykey', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "label": "mykey", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_access_key, "get_existing_deploy_key", return_value=None) def test_delete_absent_deploy_key(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'label': 'mykey', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "label": "mykey", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) + self.assertEqual(exec_info.exception.args[0]["changed"], False) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={ - "id": 123, - "label": "mykey", - "created_on": "2019-03-23T10:15:21.517377+00:00", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", - "type": "deploy_key", - "comment": "", - "last_used": None, - "repository": { - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" - }, - "html": { - "href": "https://bitbucket.org/mleu/test" + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_access_key, + "get_existing_deploy_key", + return_value={ + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"}, + "html": {"href": "https://bitbucket.org/mleu/test"}, + "avatar": {"href": "..."}, }, - "avatar": { - "href": "..." - } + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}", }, - "type": "repository", - "name": "test", - "full_name": "mleu/test", - "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" - }, - "links": { - "self": { - "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" - } + "links": {"self": {"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"}}, }, - }) + ) def test_delete_deploy_key_check_mode(self, *args): - with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock: + with patch.object(self.module, "delete_deploy_key") as delete_deploy_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'label': 'mykey', - 'state': 'absent', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "label": "mykey", + "state": "absent", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(delete_deploy_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py index ab140d40da2..444127e8864 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py @@ -9,7 +9,12 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_key_pair -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class TestBucketPipelineKeyPairModule(ModuleTestCase): @@ -19,180 +24,218 @@ def setUp(self): def test_missing_keys_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "state": "present", + } + ): self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_keys']) + self.assertEqual(exec_info.exception.args[0]["msg"], self.module.error_messages["required_keys"]) - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None) + @patch.object(bitbucket_pipeline_key_pair, "get_existing_ssh_key_pair", return_value=None) def test_create_keys(self, *args): - with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: + with patch.object(self.module, "update_ssh_key_pair") as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'user': 'ABC', - 'password': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'public_key': 'public', - 'private_key': 'PRIVATE', - 'state': 'present', - }): + with set_module_args( + { + "user": "ABC", + "password": "XXX", + "workspace": "name", + "repository": "repo", + "public_key": "public", + "private_key": "PRIVATE", + "state": "present", + } + ): self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_key_pair, "get_existing_ssh_key_pair", return_value=None) def test_create_keys_check_mode(self, *args): - with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: + with patch.object(self.module, "update_ssh_key_pair") as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'public_key': 'public', - 'private_key': 'PRIVATE', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "public_key": "public", + "private_key": "PRIVATE", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={ - 'public_key': 'unknown', - 'type': 'pipeline_ssh_key_pair', - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_key_pair, + "get_existing_ssh_key_pair", + return_value={ + "public_key": "unknown", + "type": "pipeline_ssh_key_pair", + }, + ) def test_update_keys(self, *args): - with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: + with patch.object(self.module, "update_ssh_key_pair") as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'public_key': 'public', - 'private_key': 'PRIVATE', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "public_key": "public", + "private_key": "PRIVATE", + "state": "present", + } + ): self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={ - 'public_key': 'public', - 'type': 'pipeline_ssh_key_pair', - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_key_pair, + "get_existing_ssh_key_pair", + return_value={ + "public_key": "public", + "type": "pipeline_ssh_key_pair", + }, + ) def test_dont_update_same_key(self, *args): - with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: + with patch.object(self.module, "update_ssh_key_pair") as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'public_key': 'public', - 'private_key': 'PRIVATE', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "public_key": "public", + "private_key": "PRIVATE", + "state": "present", + } + ): self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={ - 'public_key': 'unknown', - 'type': 'pipeline_ssh_key_pair', - }) + self.assertEqual(exec_info.exception.args[0]["changed"], False) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_key_pair, + "get_existing_ssh_key_pair", + return_value={ + "public_key": "unknown", + "type": "pipeline_ssh_key_pair", + }, + ) def test_update_keys_check_mode(self, *args): - with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock: + with patch.object(self.module, "update_ssh_key_pair") as update_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'public_key': 'public', - 'private_key': 'PRIVATE', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "public_key": "public", + "private_key": "PRIVATE", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(update_ssh_key_pair_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={ - 'public_key': 'public', - 'type': 'pipeline_ssh_key_pair', - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_key_pair, + "get_existing_ssh_key_pair", + return_value={ + "public_key": "public", + "type": "pipeline_ssh_key_pair", + }, + ) def test_delete_keys(self, *args): - with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: + with patch.object(self.module, "delete_ssh_key_pair") as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_key_pair, "get_existing_ssh_key_pair", return_value=None) def test_delete_absent_keys(self, *args): - with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: + with patch.object(self.module, "delete_ssh_key_pair") as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={ - 'public_key': 'public', - 'type': 'pipeline_ssh_key_pair', - }) + self.assertEqual(exec_info.exception.args[0]["changed"], False) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_key_pair, + "get_existing_ssh_key_pair", + return_value={ + "public_key": "public", + "type": "pipeline_ssh_key_pair", + }, + ) def test_delete_keys_check_mode(self, *args): - with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock: + with patch.object(self.module, "delete_ssh_key_pair") as delete_ssh_key_pair_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'state': 'absent', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "state": "absent", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(delete_ssh_key_pair_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py index be85d9d4854..7e2180ab031 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py @@ -12,7 +12,11 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_known_host from ansible_collections.community.general.plugins.modules.bitbucket_pipeline_known_host import HAS_PARAMIKO -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class TestBucketPipelineKnownHostModule(ModuleTestCase): @@ -20,174 +24,200 @@ def setUp(self): super().setUp() self.module = bitbucket_pipeline_known_host - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None) + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_known_host, "get_existing_known_host", return_value=None) def test_create_known_host(self, *args): - with patch.object(self.module, 'create_known_host') as create_known_host_mock: + with patch.object(self.module, "create_known_host") as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "present", + } + ): self.module.main() self.assertEqual(create_known_host_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'request', return_value=(dict(status=201), dict())) - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None) + @patch.object(BitbucketHelper, "request", return_value=(dict(status=201), dict())) + @patch.object(bitbucket_pipeline_known_host, "get_existing_known_host", return_value=None) def test_create_known_host_with_key(self, *args): - with patch.object(self.module, 'get_host_key') as get_host_key_mock: + with patch.object(self.module, "get_host_key") as get_host_key_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'user': 'ABC', - 'password': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'key': 'ssh-rsa public', - 'state': 'present', - }): + with set_module_args( + { + "user": "ABC", + "password": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "key": "ssh-rsa public", + "state": "present", + } + ): self.module.main() self.assertEqual(get_host_key_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={ - 'type': 'pipeline_known_host', - 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}', - 'hostname': 'bitbucket.org', - 'public_key': { - 'type': 'pipeline_ssh_public_key', - 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', - 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', - 'key_type': 'ssh-rsa', - 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' - } - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_known_host, + "get_existing_known_host", + return_value={ + "type": "pipeline_known_host", + "uuid": "{21cc0590-bebe-4fae-8baf-03722704119a7}", + "hostname": "bitbucket.org", + "public_key": { + "type": "pipeline_ssh_public_key", + "md5_fingerprint": "md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40", + "sha256_fingerprint": "SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A", + "key_type": "ssh-rsa", + "key": "AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==", + }, + }, + ) def test_dont_create_same_value(self, *args): - with patch.object(self.module, 'create_known_host') as create_known_host_mock: + with patch.object(self.module, "create_known_host") as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "present", + } + ): self.module.main() self.assertEqual(create_known_host_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) + self.assertEqual(exec_info.exception.args[0]["changed"], False) - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None) + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_known_host, "get_existing_known_host", return_value=None) def test_create_known_host_check_mode(self, *args): - with patch.object(self.module, 'create_known_host') as create_known_host_mock: + with patch.object(self.module, "create_known_host") as create_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(create_known_host_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={ - 'type': 'pipeline_known_host', - 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}', - 'hostname': 'bitbucket.org', - 'public_key': { - 'type': 'pipeline_ssh_public_key', - 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', - 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', - 'key_type': 'ssh-rsa', - 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' - } - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_known_host, + "get_existing_known_host", + return_value={ + "type": "pipeline_known_host", + "uuid": "{21cc0590-bebe-4fae-8baf-03722704119a7}", + "hostname": "bitbucket.org", + "public_key": { + "type": "pipeline_ssh_public_key", + "md5_fingerprint": "md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40", + "sha256_fingerprint": "SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A", + "key_type": "ssh-rsa", + "key": "AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==", + }, + }, + ) def test_delete_known_host(self, *args): - with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: + with patch.object(self.module, "delete_known_host") as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_known_host_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None) + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_known_host, "get_existing_known_host", return_value=None) def test_delete_absent_known_host(self, *args): - with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: + with patch.object(self.module, "delete_known_host") as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_known_host_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) - - @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation') - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={ - 'type': 'pipeline_known_host', - 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}', - 'hostname': 'bitbucket.org', - 'public_key': { - 'type': 'pipeline_ssh_public_key', - 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', - 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', - 'key_type': 'ssh-rsa', - 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' - } - }) + self.assertEqual(exec_info.exception.args[0]["changed"], False) + + @pytest.mark.skipif(not HAS_PARAMIKO, reason="paramiko must be installed to test key creation") + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_known_host, + "get_existing_known_host", + return_value={ + "type": "pipeline_known_host", + "uuid": "{21cc0590-bebe-4fae-8baf-03722704119a7}", + "hostname": "bitbucket.org", + "public_key": { + "type": "pipeline_ssh_public_key", + "md5_fingerprint": "md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40", + "sha256_fingerprint": "SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A", + "key_type": "ssh-rsa", + "key": "AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==", + }, + }, + ) def test_delete_known_host_check_mode(self, *args): - with patch.object(self.module, 'delete_known_host') as delete_known_host_mock: + with patch.object(self.module, "delete_known_host") as delete_known_host_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'bitbucket.org', - 'state': 'absent', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "bitbucket.org", + "state": "absent", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(delete_known_host_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py b/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py index 7a898f36c04..8df34b9894a 100644 --- a/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py +++ b/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py @@ -9,7 +9,12 @@ from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_variable -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class TestBucketPipelineVariableModule(ModuleTestCase): @@ -19,293 +24,355 @@ def setUp(self): def test_without_required_parameters(self): with self.assertRaises(AnsibleFailJson) as exec_info: - with set_module_args({ - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - }): + with set_module_args( + { + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + } + ): self.module.main() - self.assertEqual(exec_info.exception.args[0]['failed'], True) + self.assertEqual(exec_info.exception.args[0]["failed"], True) def test_missing_value_with_present_state(self): with self.assertRaises(AnsibleFailJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "present", + } + ): self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_value']) - - @patch.dict('os.environ', { - 'BITBUCKET_CLIENT_ID': 'ABC', - 'BITBUCKET_CLIENT_SECRET': 'XXX', - }) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) + self.assertEqual(exec_info.exception.args[0]["msg"], self.module.error_messages["required_value"]) + + @patch.dict( + "os.environ", + { + "BITBUCKET_CLIENT_ID": "ABC", + "BITBUCKET_CLIENT_SECRET": "XXX", + }, + ) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_variable, "get_existing_pipeline_variable", return_value=None) def test_oauth_env_vars_params(self, *args): with self.assertRaises(AnsibleExitJson): - with set_module_args({ - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - }): + with set_module_args( + { + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + } + ): self.module.main() - @patch.dict('os.environ', { - 'BITBUCKET_USERNAME': 'ABC', - 'BITBUCKET_PASSWORD': 'XXX', - }) - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) + @patch.dict( + "os.environ", + { + "BITBUCKET_USERNAME": "ABC", + "BITBUCKET_PASSWORD": "XXX", + }, + ) + @patch.object(bitbucket_pipeline_variable, "get_existing_pipeline_variable", return_value=None) def test_basic_auth_env_vars_params(self, *args): with self.assertRaises(AnsibleExitJson): - with set_module_args({ - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - }): + with set_module_args( + { + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + } + ): self.module.main() - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) + @patch.object(bitbucket_pipeline_variable, "get_existing_pipeline_variable", return_value=None) def test_create_variable(self, *args): - with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock: + with patch.object(self.module, "create_pipeline_variable") as create_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'user': 'ABC', - 'password': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'state': 'present', - }): + with set_module_args( + { + "user": "ABC", + "password": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "state": "present", + } + ): self.module.main() self.assertEqual(create_pipeline_variable_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_variable, "get_existing_pipeline_variable", return_value=None) def test_create_variable_check_mode(self, *args): - with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock: + with patch.object(self.module, "create_pipeline_variable") as create_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(create_pipeline_variable_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': 'Im alive', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "Im alive", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_update_variable(self, *args): - with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: + with patch.object(self.module, "update_pipeline_variable") as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "state": "present", + } + ): self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'type': 'pipeline_variable', - 'secured': True, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "type": "pipeline_variable", + "secured": True, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_update_secured_variable(self, *args): - with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: + with patch.object(self.module, "update_pipeline_variable") as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'secured': True, - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "secured": True, + "state": "present", + } + ): self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "42", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_update_secured_state(self, *args): - with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: + with patch.object(self.module, "update_pipeline_variable") as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'secured': True, - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "secured": True, + "state": "present", + } + ): self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "42", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_dont_update_same_value(self, *args): - with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: + with patch.object(self.module, "update_pipeline_variable") as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'state': 'present', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "state": "present", + } + ): self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': 'Im alive', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], False) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "Im alive", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_update_variable_check_mode(self, *args): - with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock: + with patch.object(self.module, "update_pipeline_variable") as update_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'value': '42', - 'state': 'present', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "value": "42", + "state": "present", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(update_pipeline_variable_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': 'Im alive', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], True) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "Im alive", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_delete_variable(self, *args): - with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: + with patch.object(self.module, "delete_pipeline_variable") as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 1) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None) + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object(bitbucket_pipeline_variable, "get_existing_pipeline_variable", return_value=None) def test_delete_absent_variable(self, *args): - with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: + with patch.object(self.module, "delete_pipeline_variable") as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + } + ): self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], False) - - @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token') - @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={ - 'name': 'PIPELINE_VAR_NAME', - 'value': 'Im alive', - 'type': 'pipeline_variable', - 'secured': False, - 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}' - }) + self.assertEqual(exec_info.exception.args[0]["changed"], False) + + @patch.object(BitbucketHelper, "fetch_access_token", return_value="token") + @patch.object( + bitbucket_pipeline_variable, + "get_existing_pipeline_variable", + return_value={ + "name": "PIPELINE_VAR_NAME", + "value": "Im alive", + "type": "pipeline_variable", + "secured": False, + "uuid": "{9ddb0507-439a-495a- 99f3 - 564f15138127}", + }, + ) def test_delete_variable_check_mode(self, *args): - with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock: + with patch.object(self.module, "delete_pipeline_variable") as delete_pipeline_variable_mock: with self.assertRaises(AnsibleExitJson) as exec_info: - with set_module_args({ - 'client_id': 'ABC', - 'client_secret': 'XXX', - 'workspace': 'name', - 'repository': 'repo', - 'name': 'PIPELINE_VAR_NAME', - 'state': 'absent', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "client_id": "ABC", + "client_secret": "XXX", + "workspace": "name", + "repository": "repo", + "name": "PIPELINE_VAR_NAME", + "state": "absent", + "_ansible_check_mode": True, + } + ): self.module.main() self.assertEqual(delete_pipeline_variable_mock.call_count, 0) - self.assertEqual(exec_info.exception.args[0]['changed'], True) + self.assertEqual(exec_info.exception.args[0]["changed"], True) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_bootc_manage.py b/tests/unit/plugins/modules/test_bootc_manage.py index 16dea94cc98..af072eacf15 100644 --- a/tests/unit/plugins/modules/test_bootc_manage.py +++ b/tests/unit/plugins/modules/test_bootc_manage.py @@ -6,11 +6,15 @@ from unittest.mock import patch from ansible_collections.community.general.plugins.modules import bootc_manage -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestBootcManageModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = bootc_manage @@ -20,52 +24,52 @@ def tearDown(self): def test_switch_without_image(self): """Failure if state is 'switch' but no image provided""" - with set_module_args({'state': 'switch'}): + with set_module_args({"state": "switch"}): with self.assertRaises(AnsibleFailJson) as result: self.module.main() - self.assertEqual(result.exception.args[0]['msg'], "state is switch but all of the following are missing: image") + self.assertEqual(result.exception.args[0]["msg"], "state is switch but all of the following are missing: image") def test_switch_with_image(self): """Test successful switch with image provided""" - with set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}): - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with set_module_args({"state": "switch", "image": "example.com/image:latest"}): + with patch("ansible.module_utils.basic.AnsibleModule.run_command") as run_command_mock: + run_command_mock.return_value = (0, "Queued for next boot: ", "") with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_latest_state(self): """Test successful upgrade to the latest state""" - with set_module_args({'state': 'latest'}): - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'Queued for next boot: ', '') + with set_module_args({"state": "latest"}): + with patch("ansible.module_utils.basic.AnsibleModule.run_command") as run_command_mock: + run_command_mock.return_value = (0, "Queued for next boot: ", "") with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_latest_state_no_change(self): """Test no change for latest state""" - with set_module_args({'state': 'latest'}): - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (0, 'No changes in ', '') + with set_module_args({"state": "latest"}): + with patch("ansible.module_utils.basic.AnsibleModule.run_command") as run_command_mock: + run_command_mock.return_value = (0, "No changes in ", "") with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertFalse(result.exception.args[0]['changed']) + self.assertFalse(result.exception.args[0]["changed"]) def test_switch_image_failure(self): """Test failure during image switch""" - with set_module_args({'state': 'switch', 'image': 'example.com/image:latest'}): - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (1, '', 'ERROR') + with set_module_args({"state": "switch", "image": "example.com/image:latest"}): + with patch("ansible.module_utils.basic.AnsibleModule.run_command") as run_command_mock: + run_command_mock.return_value = (1, "", "ERROR") with self.assertRaises(AnsibleFailJson) as result: self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') + self.assertEqual(result.exception.args[0]["msg"], "ERROR: Command execution failed.") def test_latest_state_failure(self): """Test failure during upgrade""" - with set_module_args({'state': 'latest'}): - with patch('ansible.module_utils.basic.AnsibleModule.run_command') as run_command_mock: - run_command_mock.return_value = (1, '', 'ERROR') + with set_module_args({"state": "latest"}): + with patch("ansible.module_utils.basic.AnsibleModule.run_command") as run_command_mock: + run_command_mock.return_value = (1, "", "ERROR") with self.assertRaises(AnsibleFailJson) as result: self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'ERROR: Command execution failed.') + self.assertEqual(result.exception.args[0]["msg"], "ERROR: Command execution failed.") diff --git a/tests/unit/plugins/modules/test_campfire.py b/tests/unit/plugins/modules/test_campfire.py index 50ef3446fab..00cc8ad95cb 100644 --- a/tests/unit/plugins/modules/test_campfire.py +++ b/tests/unit/plugins/modules/test_campfire.py @@ -7,11 +7,15 @@ import pytest from unittest.mock import patch from ansible_collections.community.general.plugins.modules import campfire -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestCampfireModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = campfire @@ -21,7 +25,7 @@ def tearDown(self): @pytest.fixture def fetch_url_mock(self, mocker): - return mocker.patch('ansible.module_utils.notification.campfire.fetch_url') + return mocker.patch("ansible.module_utils.notification.campfire.fetch_url") def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" @@ -31,12 +35,7 @@ def test_without_required_parameters(self): def test_successful_message(self): """Test failure message""" - with set_module_args({ - 'subscription': 'test', - 'token': 'abc', - 'room': 'test', - 'msg': 'test' - }): + with set_module_args({"subscription": "test", "token": "abc", "room": "test", "msg": "test"}): with patch.object(campfire, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 200}) with self.assertRaises(AnsibleExitJson): @@ -44,20 +43,14 @@ def test_successful_message(self): assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - data = fetch_url_mock.call_args[1]['data'] + data = fetch_url_mock.call_args[1]["data"] - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'test' + assert url == "https://test.campfirenow.com/room/test/speak.xml" + assert data == "test" def test_successful_message_with_notify(self): """Test failure message""" - with set_module_args({ - 'subscription': 'test', - 'token': 'abc', - 'room': 'test', - 'msg': 'test', - 'notify': 'bell' - }): + with set_module_args({"subscription": "test", "token": "abc", "room": "test", "msg": "test", "notify": "bell"}): with patch.object(campfire, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 200}) with self.assertRaises(AnsibleExitJson): @@ -66,26 +59,21 @@ def test_successful_message_with_notify(self): assert fetch_url_mock.call_count == 2 notify_call = fetch_url_mock.mock_calls[0] url = notify_call[1][1] - data = notify_call[2]['data'] + data = notify_call[2]["data"] - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'SoundMessagebell' + assert url == "https://test.campfirenow.com/room/test/speak.xml" + assert data == "SoundMessagebell" message_call = fetch_url_mock.mock_calls[1] url = message_call[1][1] - data = message_call[2]['data'] + data = message_call[2]["data"] - assert url == 'https://test.campfirenow.com/room/test/speak.xml' - assert data == 'test' + assert url == "https://test.campfirenow.com/room/test/speak.xml" + assert data == "test" def test_failure_message(self): """Test failure message""" - with set_module_args({ - 'subscription': 'test', - 'token': 'abc', - 'room': 'test', - 'msg': 'test' - }): + with set_module_args({"subscription": "test", "token": "abc", "room": "test", "msg": "test"}): with patch.object(campfire, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 403}) with self.assertRaises(AnsibleFailJson): diff --git a/tests/unit/plugins/modules/test_circonus_annotation.py b/tests/unit/plugins/modules/test_circonus_annotation.py index 31a162334ef..8372cdeeda4 100644 --- a/tests/unit/plugins/modules/test_circonus_annotation.py +++ b/tests/unit/plugins/modules/test_circonus_annotation.py @@ -13,11 +13,15 @@ from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules import circonus_annotation -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestCirconusAnnotation(ModuleTestCase): - def setUp(self): super().setUp() self.module = circonus_annotation @@ -33,122 +37,125 @@ def test_without_required_parameters(self): def test_add_annotation(self): """Check that result is changed""" - with set_module_args({ - 'category': 'test category', - 'description': 'test description', - 'title': 'test title', - 'api_key': str(uuid.uuid4()), - }): - - cid = '/annotation/100000' + with set_module_args( + { + "category": "test category", + "description": "test description", + "title": "test title", + "api_key": str(uuid.uuid4()), + } + ): + cid = "/annotation/100000" def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): data = { - '_cid': cid, - '_created': 1502146995, - '_last_modified': 1502146995, - '_last_modified_by': '/user/1000', - 'category': 'test category', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502145480, - 'stop': None, - 'title': 'test title', + "_cid": cid, + "_created": 1502146995, + "_last_modified": 1502146995, + "_last_modified_by": "/user/1000", + "category": "test category", + "description": "test description", + "rel_metrics": [], + "start": 1502145480, + "stop": None, + "title": "test title", } raw = to_bytes(json.dumps(data)) resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) resp.status = 200 - resp.reason = 'OK' - resp.headers = {'X-Circonus-API-Version': '2.00'} + resp.reason = "OK" + resp.headers = {"X-Circonus-API-Version": "2.00"} return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with patch("requests.adapters.HTTPAdapter.send", autospec=True, side_effect=send) as send: with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["annotation"]["_cid"], cid) self.assertEqual(send.call_count, 1) def test_add_annotation_unicode(self): """Check that result is changed. - Note: it seems there is a bug which prevent to create an annotation - with a non-ASCII category if this category already exists, in such - case an Internal Server Error (500) occurs.""" - with set_module_args({ - 'category': 'new catégorÿ', - 'description': 'test description', - 'title': 'test title', - 'api_key': str(uuid.uuid4()), - }): - - cid = '/annotation/100000' + Note: it seems there is a bug which prevent to create an annotation + with a non-ASCII category if this category already exists, in such + case an Internal Server Error (500) occurs.""" + with set_module_args( + { + "category": "new catégorÿ", + "description": "test description", + "title": "test title", + "api_key": str(uuid.uuid4()), + } + ): + cid = "/annotation/100000" def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): data = { - '_cid': '/annotation/100000', - '_created': 1502236928, - '_last_modified': 1502236928, - '_last_modified_by': '/user/1000', + "_cid": "/annotation/100000", + "_created": 1502236928, + "_last_modified": 1502236928, + "_last_modified_by": "/user/1000", # use res['annotation']['category'].encode('latin1').decode('utf8') - 'category': 'new cat\xc3\xa9gor\xc3\xbf', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502236927, - 'stop': 1502236927, - 'title': 'test title', + "category": "new cat\xc3\xa9gor\xc3\xbf", + "description": "test description", + "rel_metrics": [], + "start": 1502236927, + "stop": 1502236927, + "title": "test title", } - raw = to_bytes(json.dumps(data), encoding='latin1') + raw = to_bytes(json.dumps(data), encoding="latin1") resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) resp.status = 200 - resp.reason = 'OK' - resp.headers = {'X-Circonus-API-Version': '2.00'} + resp.reason = "OK" + resp.headers = {"X-Circonus-API-Version": "2.00"} return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with patch("requests.adapters.HTTPAdapter.send", autospec=True, side_effect=send) as send: with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["annotation"]["_cid"], cid) self.assertEqual(send.call_count, 1) def test_auth_failure(self): """Check that an error is raised when authentication failed""" - with set_module_args({ - 'category': 'test category', - 'description': 'test description', - 'title': 'test title', - 'api_key': str(uuid.uuid4()), - }): - - cid = '/annotation/100000' + with set_module_args( + { + "category": "test category", + "description": "test description", + "title": "test title", + "api_key": str(uuid.uuid4()), + } + ): + cid = "/annotation/100000" def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): data = { - '_cid': cid, - '_created': 1502146995, - '_last_modified': 1502146995, - '_last_modified_by': '/user/1000', - 'category': 'test category', - 'description': 'test description', - 'rel_metrics': [], - 'start': 1502145480, - 'stop': None, - 'title': 'test title', + "_cid": cid, + "_created": 1502146995, + "_last_modified": 1502146995, + "_last_modified_by": "/user/1000", + "category": "test category", + "description": "test description", + "rel_metrics": [], + "start": 1502145480, + "stop": None, + "title": "test title", } raw = to_bytes(json.dumps(data)) resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False) resp.status = 403 - resp.reason = 'Forbidden' - resp.headers = {'X-Circonus-API-Version': '2.00'} + resp.reason = "Forbidden" + resp.headers = {"X-Circonus-API-Version": "2.00"} return self.build_response(request, resp) - with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send: + with patch("requests.adapters.HTTPAdapter.send", autospec=True, side_effect=send) as send: with self.assertRaises(AnsibleFailJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['failed']) - self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason'])) + self.assertTrue(result.exception.args[0]["failed"]) + self.assertTrue(re.match(r"\b403\b", result.exception.args[0]["reason"])) self.assertEqual(send.call_count, 1) diff --git a/tests/unit/plugins/modules/test_datadog_downtime.py b/tests/unit/plugins/modules/test_datadog_downtime.py index 9a102af3290..193858f9a88 100644 --- a/tests/unit/plugins/modules/test_datadog_downtime.py +++ b/tests/unit/plugins/modules/test_datadog_downtime.py @@ -8,7 +8,10 @@ from ansible_collections.community.general.plugins.modules import datadog_downtime from unittest.mock import MagicMock, patch from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, ) from pytest import importorskip @@ -20,7 +23,6 @@ class TestDatadogDowntime(ModuleTestCase): - def setUp(self): super().setUp() self.module = datadog_downtime @@ -36,18 +38,20 @@ def test_without_required_parameters(self): @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_no_id(self, downtimes_api_mock): - with set_module_args({ - "monitor_tags": ["foo:bar"], - "scope": ["*"], - "monitor_id": 12345, - "downtime_message": "Message", - "start": 1111, - "end": 2222, - "timezone": "UTC", - "rrule": "rrule", - "api_key": "an_api_key", - "app_key": "an_app_key", - }): + with set_module_args( + { + "monitor_tags": ["foo:bar"], + "scope": ["*"], + "monitor_id": 12345, + "downtime_message": "Message", + "start": 1111, + "end": 2222, + "timezone": "UTC", + "rrule": "rrule", + "api_key": "an_api_key", + "app_key": "an_app_key", + } + ): downtime = Downtime() downtime.monitor_tags = ["foo:bar"] downtime.scope = ["*"] @@ -56,35 +60,34 @@ def test_create_downtime_when_no_id(self, downtimes_api_mock): downtime.start = 1111 downtime.end = 2222 downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + downtime.recurrence = DowntimeRecurrence(rrule="rrule", type="rrule") create_downtime_mock = MagicMock(return_value=self.__downtime_with_id(12345)) downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock) with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["downtime"]["id"], 12345) create_downtime_mock.assert_called_once_with(downtime) @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): - with set_module_args({ - "id": 1212, - "monitor_tags": ["foo:bar"], - "scope": ["*"], - "monitor_id": 12345, - "downtime_message": "Message", - "start": 1111, - "end": 2222, - "timezone": "UTC", - "rrule": "rrule", - "api_key": "an_api_key", - "app_key": "an_app_key", - }): + with set_module_args( + { + "id": 1212, + "monitor_tags": ["foo:bar"], + "scope": ["*"], + "monitor_id": 12345, + "downtime_message": "Message", + "start": 1111, + "end": 2222, + "timezone": "UTC", + "rrule": "rrule", + "api_key": "an_api_key", + "app_key": "an_app_key", + } + ): downtime = Downtime() downtime.monitor_tags = ["foo:bar"] downtime.scope = ["*"] @@ -93,10 +96,7 @@ def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): downtime.start = 1111 downtime.end = 2222 downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + downtime.recurrence = DowntimeRecurrence(rrule="rrule", type="rrule") disabled_downtime = Downtime() disabled_downtime.disabled = True @@ -110,26 +110,28 @@ def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock): with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['downtime']['id'], 12345) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["downtime"]["id"], 12345) create_downtime_mock.assert_called_once_with(downtime) get_downtime_mock.assert_called_once_with(1212) @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_when_not_disabled(self, downtimes_api_mock): - with set_module_args({ - "id": 1212, - "monitor_tags": ["foo:bar"], - "scope": ["*"], - "monitor_id": 12345, - "downtime_message": "Message", - "start": 1111, - "end": 2222, - "timezone": "UTC", - "rrule": "rrule", - "api_key": "an_api_key", - "app_key": "an_app_key", - }): + with set_module_args( + { + "id": 1212, + "monitor_tags": ["foo:bar"], + "scope": ["*"], + "monitor_id": 12345, + "downtime_message": "Message", + "start": 1111, + "end": 2222, + "timezone": "UTC", + "rrule": "rrule", + "api_key": "an_api_key", + "app_key": "an_app_key", + } + ): downtime = Downtime() downtime.monitor_tags = ["foo:bar"] downtime.scope = ["*"] @@ -138,10 +140,7 @@ def test_update_downtime_when_not_disabled(self, downtimes_api_mock): downtime.start = 1111 downtime.end = 2222 downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + downtime.recurrence = DowntimeRecurrence(rrule="rrule", type="rrule") enabled_downtime = Downtime() enabled_downtime.disabled = False @@ -155,26 +154,28 @@ def test_update_downtime_when_not_disabled(self, downtimes_api_mock): with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['downtime']['id'], 1212) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["downtime"]["id"], 1212) update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_update_downtime_no_change(self, downtimes_api_mock): - with set_module_args({ - "id": 1212, - "monitor_tags": ["foo:bar"], - "scope": ["*"], - "monitor_id": 12345, - "downtime_message": "Message", - "start": 1111, - "end": 2222, - "timezone": "UTC", - "rrule": "rrule", - "api_key": "an_api_key", - "app_key": "an_app_key", - }): + with set_module_args( + { + "id": 1212, + "monitor_tags": ["foo:bar"], + "scope": ["*"], + "monitor_id": 12345, + "downtime_message": "Message", + "start": 1111, + "end": 2222, + "timezone": "UTC", + "rrule": "rrule", + "api_key": "an_api_key", + "app_key": "an_app_key", + } + ): downtime = Downtime() downtime.monitor_tags = ["foo:bar"] downtime.scope = ["*"] @@ -183,10 +184,7 @@ def test_update_downtime_no_change(self, downtimes_api_mock): downtime.start = 1111 downtime.end = 2222 downtime.timezone = "UTC" - downtime.recurrence = DowntimeRecurrence( - rrule="rrule", - type="rrule" - ) + downtime.recurrence = DowntimeRecurrence(rrule="rrule", type="rrule") downtime_get = Downtime() downtime_get.id = 1212 @@ -198,9 +196,7 @@ def test_update_downtime_no_change(self, downtimes_api_mock): downtime_get.start = 1111 downtime_get.end = 2222 downtime_get.timezone = "UTC" - downtime_get.recurrence = DowntimeRecurrence( - rrule="rrule" - ) + downtime_get.recurrence = DowntimeRecurrence(rrule="rrule") update_downtime_mock = MagicMock(return_value=downtime_get) get_downtime_mock = MagicMock(return_value=downtime_get) @@ -210,28 +206,29 @@ def test_update_downtime_no_change(self, downtimes_api_mock): with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertFalse(result.exception.args[0]['changed']) - self.assertEqual(result.exception.args[0]['downtime']['id'], 1212) + self.assertFalse(result.exception.args[0]["changed"]) + self.assertEqual(result.exception.args[0]["downtime"]["id"], 1212) update_downtime_mock.assert_called_once_with(1212, downtime) get_downtime_mock.assert_called_once_with(1212) @patch("ansible_collections.community.general.plugins.modules.datadog_downtime.DowntimesApi") def test_delete_downtime(self, downtimes_api_mock): - with set_module_args({ - "id": 1212, - "state": "absent", - "api_key": "an_api_key", - "app_key": "an_app_key", - }): + with set_module_args( + { + "id": 1212, + "state": "absent", + "api_key": "an_api_key", + "app_key": "an_app_key", + } + ): cancel_downtime_mock = MagicMock() downtimes_api_mock.return_value = MagicMock( - get_downtime=self.__downtime_with_id, - cancel_downtime=cancel_downtime_mock + get_downtime=self.__downtime_with_id, cancel_downtime=cancel_downtime_mock ) with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) cancel_downtime_mock.assert_called_once_with(1212) def __downtime_with_id(self, id): diff --git a/tests/unit/plugins/modules/test_dconf.py b/tests/unit/plugins/modules/test_dconf.py index 085a87ab174..562760249c2 100644 --- a/tests/unit/plugins/modules/test_dconf.py +++ b/tests/unit/plugins/modules/test_dconf.py @@ -19,25 +19,26 @@ @pytest.mark.parametrize( "v1,v2,expected,fallback_expected", - (("'foo'", "'foo'", True, True), - ('"foo"', "'foo'", True, False), - ("'foo'", '"foo"', True, False), - ("'foo'", '"bar"', False, False), - ("[1, 2, 3]", "[1, 2, 3]", True, True), - ("[1, 2, 3]", "[3, 2, 1]", False, False), - ('1234', '1234', True, True), - ('1234', '1235', False, False), - ('1.0', '1.0', True, True), - ('1.000', '1.0', True, False), - ('2.0', '4.0', False, False), - # GVariants with different types aren't equal! - ('1', '1.0', False, False), - # Explicit types - ('@as []', '[]', True, False), - )) + ( + ("'foo'", "'foo'", True, True), + ('"foo"', "'foo'", True, False), + ("'foo'", '"foo"', True, False), + ("'foo'", '"bar"', False, False), + ("[1, 2, 3]", "[1, 2, 3]", True, True), + ("[1, 2, 3]", "[3, 2, 1]", False, False), + ("1234", "1234", True, True), + ("1234", "1235", False, False), + ("1.0", "1.0", True, True), + ("1.000", "1.0", True, False), + ("2.0", "4.0", False, False), + # GVariants with different types aren't equal! + ("1", "1.0", False, False), + # Explicit types + ("@as []", "[]", True, False), + ), +) def test_gvariant_equality(mocker, v1, v2, expected, fallback_expected): - assert DconfPreference.variants_are_equal(v1, v2) is \ - (expected if Variant else fallback_expected) - mocker.patch.object(dconf, 'Variant', None) + assert DconfPreference.variants_are_equal(v1, v2) is (expected if Variant else fallback_expected) + mocker.patch.object(dconf, "Variant", None) mocker.patch.object(dconf, "GError", AttributeError) assert DconfPreference.variants_are_equal(v1, v2) is fallback_expected diff --git a/tests/unit/plugins/modules/test_discord.py b/tests/unit/plugins/modules/test_discord.py index 8ce6101a482..c43b1ffdd3f 100644 --- a/tests/unit/plugins/modules/test_discord.py +++ b/tests/unit/plugins/modules/test_discord.py @@ -10,11 +10,15 @@ import pytest from ansible_collections.community.general.plugins.modules import discord -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestDiscordModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = discord @@ -24,7 +28,7 @@ def tearDown(self): @pytest.fixture def fetch_url_mock(self, mocker): - return mocker.patch('ansible.module_utils.notification.discord.fetch_url') + return mocker.patch("ansible.module_utils.notification.discord.fetch_url") def test_without_parameters(self): """Failure if no parameters set""" @@ -34,62 +38,49 @@ def test_without_parameters(self): def test_without_content(self): """Failure if content and embeds both are missing""" - with set_module_args({ - 'webhook_id': 'xxx', - 'webhook_token': 'xxx' - }): + with set_module_args({"webhook_id": "xxx", "webhook_token": "xxx"}): with self.assertRaises(AnsibleFailJson): self.module.main() def test_successful_message(self): """Test a basic message successfully.""" - with set_module_args({ - 'webhook_id': 'xxx', - 'webhook_token': 'xxx', - 'content': 'test' - }): - + with set_module_args({"webhook_id": "xxx", "webhook_token": "xxx", "content": "test"}): with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + fetch_url_mock.return_value = (None, {"status": 204, "msg": "OK (0 bytes)"}) with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['content'] == "test" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["content"] == "test" def test_message_with_username(self): """Test a message with username set successfully.""" - with set_module_args({ - 'webhook_id': 'xxx', - 'webhook_token': 'xxx', - 'content': 'test', - 'username': 'Ansible Bot' - }): - + with set_module_args( + {"webhook_id": "xxx", "webhook_token": "xxx", "content": "test", "username": "Ansible Bot"} + ): with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + fetch_url_mock.return_value = (None, {"status": 204, "msg": "OK (0 bytes)"}) with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible Bot" - assert call_data['content'] == "test" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["username"] == "Ansible Bot" + assert call_data["content"] == "test" def test_failed_message(self): """Test failure because webhook id is wrong.""" - with set_module_args({ - 'webhook_id': 'wrong', - 'webhook_token': 'xxx', - 'content': 'test' - }): - + with set_module_args({"webhook_id": "wrong", "webhook_token": "xxx", "content": "test"}): with patch.object(discord, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = ( None, - {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'}, + { + "status": 404, + "msg": "HTTP Error 404: Not Found", + "body": '{"message": "Unknown Webhook", "code": 10015}', + }, ) with self.assertRaises(AnsibleFailJson): self.module.main() @@ -97,13 +88,8 @@ def test_failed_message(self): def test_failed_message_without_body(self): """Test failure with empty response body.""" - with set_module_args({ - 'webhook_id': 'wrong', - 'webhook_token': 'xxx', - 'content': 'test' - }): - + with set_module_args({"webhook_id": "wrong", "webhook_token": "xxx", "content": "test"}): with patch.object(discord, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'}) + fetch_url_mock.return_value = (None, {"status": 404, "msg": "HTTP Error 404: Not Found"}) with self.assertRaises(AnsibleFailJson): self.module.main() diff --git a/tests/unit/plugins/modules/test_dnf_config_manager.py b/tests/unit/plugins/modules/test_dnf_config_manager.py index 94d246e85fa..c68041f762e 100644 --- a/tests/unit/plugins/modules/test_dnf_config_manager.py +++ b/tests/unit/plugins/modules/test_dnf_config_manager.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Andrew Hyatt # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -6,8 +5,12 @@ from unittest.mock import patch, call from ansible_collections.community.general.plugins.modules import dnf_config_manager as dnf_config_manager_module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ - ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) # Return value on all-default arguments mock_repolist_crb_enabled = """Loaded plugins: builddep, changelog, config-manager, copr, debug, debuginfo-install @@ -230,37 +233,38 @@ Repo-status : disabled """ -expected_repo_states_crb_enabled = {'disabled': ['appstream-debuginfo', - 'appstream-source', - 'baseos-debuginfo', - 'baseos-source'], - 'enabled': ['appstream', - 'baseos', - 'copr:copr.fedorainfracloud.org:uriesk:dracut-crypt-ssh', - 'crb', - 'rpmfusion-nonfree-updates']} - -expected_repo_states_crb_disabled = {'disabled': ['appstream-debuginfo', - 'appstream-source', - 'baseos-debuginfo', - 'baseos-source', - 'crb'], - 'enabled': ['appstream', - 'baseos', - 'copr:copr.fedorainfracloud.org:uriesk:dracut-crypt-ssh', - 'rpmfusion-nonfree-updates']} - -call_get_repo_states = call(['/usr/bin/dnf', 'repolist', '--all', '--verbose'], check_rc=True) -call_disable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-disabled', 'crb'], check_rc=True) -call_enable_crb = call(['/usr/bin/dnf', 'config-manager', '--assumeyes', '--set-enabled', 'crb'], check_rc=True) +expected_repo_states_crb_enabled = { + "disabled": ["appstream-debuginfo", "appstream-source", "baseos-debuginfo", "baseos-source"], + "enabled": [ + "appstream", + "baseos", + "copr:copr.fedorainfracloud.org:uriesk:dracut-crypt-ssh", + "crb", + "rpmfusion-nonfree-updates", + ], +} + +expected_repo_states_crb_disabled = { + "disabled": ["appstream-debuginfo", "appstream-source", "baseos-debuginfo", "baseos-source", "crb"], + "enabled": [ + "appstream", + "baseos", + "copr:copr.fedorainfracloud.org:uriesk:dracut-crypt-ssh", + "rpmfusion-nonfree-updates", + ], +} + +call_get_repo_states = call(["/usr/bin/dnf", "repolist", "--all", "--verbose"], check_rc=True) +call_disable_crb = call(["/usr/bin/dnf", "config-manager", "--assumeyes", "--set-disabled", "crb"], check_rc=True) +call_enable_crb = call(["/usr/bin/dnf", "config-manager", "--assumeyes", "--set-enabled", "crb"], check_rc=True) class TestDNFConfigManager(ModuleTestCase): def setUp(self): super().setUp() - self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command')) + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") self.run_command = self.mock_run_command.start() - self.mock_path_exists = (patch('os.path.exists')) + self.mock_path_exists = patch("os.path.exists") self.path_exists = self.mock_path_exists.start() self.path_exists.return_value = True self.module = dnf_config_manager_module @@ -270,7 +274,7 @@ def tearDown(self): self.mock_run_command.stop() self.mock_path_exists.stop() - def set_command_mock(self, execute_return=(0, '', ''), execute_side_effect=None): + def set_command_mock(self, execute_return=(0, "", ""), execute_side_effect=None): self.run_command.reset_mock() self.run_command.return_value = execute_return self.run_command.side_effect = execute_side_effect @@ -278,10 +282,10 @@ def set_command_mock(self, execute_return=(0, '', ''), execute_side_effect=None) def execute_module(self, failed=False, changed=False): if failed: result = self.failed() - self.assertTrue(result['failed']) + self.assertTrue(result["failed"]) else: result = self.changed(changed) - self.assertEqual(result['changed'], changed) + self.assertEqual(result["changed"], changed) return result @@ -290,7 +294,7 @@ def failed(self): self.module.main() result = exc.exception.args[0] - self.assertTrue(result['failed']) + self.assertTrue(result["failed"]) return result def changed(self, changed=False): @@ -298,102 +302,84 @@ def changed(self, changed=False): self.module.main() result = exc.exception.args[0] - self.assertEqual(result['changed'], changed) + self.assertEqual(result["changed"], changed) return result def test_get_repo_states(self): with set_module_args({}): - self.set_command_mock(execute_return=(0, mock_repolist_crb_enabled, '')) + self.set_command_mock(execute_return=(0, mock_repolist_crb_enabled, "")) result = self.execute_module(changed=False) - self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) - self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) - self.assertEqual(result['changed_repos'], []) + self.assertEqual(result["repo_states_pre"], expected_repo_states_crb_enabled) + self.assertEqual(result["repo_states_post"], expected_repo_states_crb_enabled) + self.assertEqual(result["changed_repos"], []) self.run_command.assert_has_calls(calls=[call_get_repo_states, call_get_repo_states], any_order=False) def test_enable_disabled_repo(self): - with set_module_args({ - 'name': ['crb'], - 'state': 'enabled' - }): - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_enabled, '')] + with set_module_args({"name": ["crb"], "state": "enabled"}): + side_effects = [(0, mock_repolist_crb_disabled, ""), (0, "", ""), (0, mock_repolist_crb_enabled, "")] self.set_command_mock(execute_side_effect=side_effects) result = self.execute_module(changed=True) - self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_disabled) - self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) - self.assertEqual(result['changed_repos'], ['crb']) + self.assertEqual(result["repo_states_pre"], expected_repo_states_crb_disabled) + self.assertEqual(result["repo_states_post"], expected_repo_states_crb_enabled) + self.assertEqual(result["changed_repos"], ["crb"]) expected_calls = [call_get_repo_states, call_enable_crb, call_get_repo_states] self.run_command.assert_has_calls(calls=expected_calls, any_order=False) def test_enable_disabled_repo_check_mode(self): - with set_module_args({ - 'name': ['crb'], - 'state': 'enabled', - '_ansible_check_mode': True - }): - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, mock_repolist_crb_disabled, '')] + with set_module_args({"name": ["crb"], "state": "enabled", "_ansible_check_mode": True}): + side_effects = [(0, mock_repolist_crb_disabled, ""), (0, mock_repolist_crb_disabled, "")] self.set_command_mock(execute_side_effect=side_effects) result = self.execute_module(changed=True) - self.assertEqual(result['changed_repos'], ['crb']) + self.assertEqual(result["changed_repos"], ["crb"]) self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_disable_enabled_repo(self): - with set_module_args({ - 'name': ['crb'], - 'state': 'disabled' - }): - side_effects = [(0, mock_repolist_crb_enabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] + with set_module_args({"name": ["crb"], "state": "disabled"}): + side_effects = [(0, mock_repolist_crb_enabled, ""), (0, "", ""), (0, mock_repolist_crb_disabled, "")] self.set_command_mock(execute_side_effect=side_effects) result = self.execute_module(changed=True) - self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) - self.assertEqual(result['repo_states_post'], expected_repo_states_crb_disabled) - self.assertEqual(result['changed_repos'], ['crb']) + self.assertEqual(result["repo_states_pre"], expected_repo_states_crb_enabled) + self.assertEqual(result["repo_states_post"], expected_repo_states_crb_disabled) + self.assertEqual(result["changed_repos"], ["crb"]) expected_calls = [call_get_repo_states, call_disable_crb, call_get_repo_states] self.run_command.assert_has_calls(calls=expected_calls, any_order=False) def test_crb_already_enabled(self): - with set_module_args({ - 'name': ['crb'], - 'state': 'enabled' - }): - side_effects = [(0, mock_repolist_crb_enabled, ''), (0, mock_repolist_crb_enabled, '')] + with set_module_args({"name": ["crb"], "state": "enabled"}): + side_effects = [(0, mock_repolist_crb_enabled, ""), (0, mock_repolist_crb_enabled, "")] self.set_command_mock(execute_side_effect=side_effects) result = self.execute_module(changed=False) - self.assertEqual(result['repo_states_pre'], expected_repo_states_crb_enabled) - self.assertEqual(result['repo_states_post'], expected_repo_states_crb_enabled) - self.assertEqual(result['changed_repos'], []) + self.assertEqual(result["repo_states_pre"], expected_repo_states_crb_enabled) + self.assertEqual(result["repo_states_post"], expected_repo_states_crb_enabled) + self.assertEqual(result["changed_repos"], []) self.run_command.assert_has_calls(calls=[call_get_repo_states, call_get_repo_states], any_order=False) def test_get_repo_states_fail_no_status(self): with set_module_args({}): - self.set_command_mock(execute_return=(0, mock_repolist_no_status, '')) + self.set_command_mock(execute_return=(0, mock_repolist_no_status, "")) result = self.execute_module(failed=True) - self.assertEqual(result['msg'], 'dnf repolist parse failure: parsed another repo id before next status') + self.assertEqual(result["msg"], "dnf repolist parse failure: parsed another repo id before next status") self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_get_repo_states_fail_status_before_id(self): with set_module_args({}): - self.set_command_mock(execute_return=(0, mock_repolist_status_before_id, '')) + self.set_command_mock(execute_return=(0, mock_repolist_status_before_id, "")) result = self.execute_module(failed=True) - self.assertEqual(result['msg'], 'dnf repolist parse failure: parsed status before repo id') + self.assertEqual(result["msg"], "dnf repolist parse failure: parsed status before repo id") self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_failed__unknown_repo_id(self): - with set_module_args({ - 'name': ['fake'] - }): - self.set_command_mock(execute_return=(0, mock_repolist_crb_disabled, '')) + with set_module_args({"name": ["fake"]}): + self.set_command_mock(execute_return=(0, mock_repolist_crb_disabled, "")) result = self.execute_module(failed=True) - self.assertEqual(result['msg'], "did not find repo with ID 'fake' in dnf repolist --all --verbose") + self.assertEqual(result["msg"], "did not find repo with ID 'fake' in dnf repolist --all --verbose") self.run_command.assert_has_calls(calls=[call_get_repo_states], any_order=False) def test_failed_state_change_ineffective(self): - with set_module_args({ - 'name': ['crb'], - 'state': 'enabled' - }): - side_effects = [(0, mock_repolist_crb_disabled, ''), (0, '', ''), (0, mock_repolist_crb_disabled, '')] + with set_module_args({"name": ["crb"], "state": "enabled"}): + side_effects = [(0, mock_repolist_crb_disabled, ""), (0, "", ""), (0, mock_repolist_crb_disabled, "")] self.set_command_mock(execute_side_effect=side_effects) result = self.execute_module(failed=True) - self.assertEqual(result['msg'], "dnf config-manager failed to make 'crb' enabled") + self.assertEqual(result["msg"], "dnf config-manager failed to make 'crb' enabled") expected_calls = [call_get_repo_states, call_enable_crb, call_get_repo_states] self.run_command.assert_has_calls(calls=expected_calls, any_order=False) diff --git a/tests/unit/plugins/modules/test_dnsimple.py b/tests/unit/plugins/modules/test_dnsimple.py index afc858df764..ccfc85b1f60 100644 --- a/tests/unit/plugins/modules/test_dnsimple.py +++ b/tests/unit/plugins/modules/test_dnsimple.py @@ -1,4 +1,3 @@ - # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -6,15 +5,18 @@ from __future__ import annotations from ansible_collections.community.general.plugins.modules import dnsimple as dnsimple_module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from unittest.mock import patch import pytest import sys -dnsimple = pytest.importorskip('dnsimple') +dnsimple = pytest.importorskip("dnsimple") mandatory_py_version = pytest.mark.skipif( - sys.version_info < (3, 6), - reason='The dnsimple dependency requires python3.6 or higher' + sys.version_info < (3, 6), reason="The dnsimple dependency requires python3.6 or higher" ) from dnsimple import DNSimpleException @@ -38,24 +40,24 @@ def test_without_required_parameters(self): with set_module_args({}): self.module.main() - @patch('dnsimple.service.Identity.whoami') + @patch("dnsimple.service.Identity.whoami") def test_account_token(self, mock_whoami): mock_whoami.return_value.data.account = 42 - ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + ds = self.module.DNSimpleV2("fake", "fake", True, self.module) self.assertEqual(ds.account, 42) - @patch('dnsimple.service.Accounts.list_accounts') - @patch('dnsimple.service.Identity.whoami') + @patch("dnsimple.service.Accounts.list_accounts") + @patch("dnsimple.service.Identity.whoami") def test_user_token_multiple_accounts(self, mock_whoami, mock_accounts): mock_accounts.return_value.data = [1, 2, 3] mock_whoami.return_value.data.account = None with self.assertRaises(DNSimpleException): - self.module.DNSimpleV2('fake', 'fake', True, self.module) + self.module.DNSimpleV2("fake", "fake", True, self.module) - @patch('dnsimple.service.Accounts.list_accounts') - @patch('dnsimple.service.Identity.whoami') + @patch("dnsimple.service.Accounts.list_accounts") + @patch("dnsimple.service.Identity.whoami") def test_user_token_single_account(self, mock_whoami, mock_accounts): mock_accounts.return_value.data = [42] mock_whoami.return_value.data.account = None - ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + ds = self.module.DNSimpleV2("fake", "fake", True, self.module) self.assertEqual(ds.account, 42) diff --git a/tests/unit/plugins/modules/test_dnsimple_info.py b/tests/unit/plugins/modules/test_dnsimple_info.py index 4c89ff17ffd..271900a2a34 100644 --- a/tests/unit/plugins/modules/test_dnsimple_info.py +++ b/tests/unit/plugins/modules/test_dnsimple_info.py @@ -1,4 +1,3 @@ - # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,33 +6,38 @@ from ansible_collections.community.general.plugins.modules import dnsimple_info -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args, AnsibleExitJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + ModuleTestCase, + set_module_args, + AnsibleExitJson, +) from httmock import response from httmock import with_httmock from httmock import urlmatch -@urlmatch(netloc='(.)*dnsimple.com(.)*', - path='/v2/[0-9]*/zones/') +@urlmatch(netloc="(.)*dnsimple.com(.)*", path="/v2/[0-9]*/zones/") def zones_resp(url, request): """return domains""" - headers = {'content-type': 'application/json'} - data_content = {"data": - [{"account_id": "1234", }, ], - "pagination": {"total_pages": 1}} + headers = {"content-type": "application/json"} + data_content = { + "data": [ + { + "account_id": "1234", + }, + ], + "pagination": {"total_pages": 1}, + } content = data_content return response(200, content, headers, None, 5, request) -@urlmatch(netloc='(.)*dnsimple.com(.)*', - path='/v2/[0-9]*/zones/(.)*/records(.*)') +@urlmatch(netloc="(.)*dnsimple.com(.)*", path="/v2/[0-9]*/zones/(.)*/records(.*)") def records_resp(url, request): """return record(s)""" - headers = {'content-type': 'application/json'} - data_content = {"data": - [{"content": "example", - "name": "example.com"}], - "pagination": {"total_pages": 1}} + headers = {"content-type": "application/json"} + data_content = {"data": [{"content": "example", "name": "example.com"}], "pagination": {"total_pages": 1}} content = data_content return response(200, content, headers, None, 5, request) @@ -42,7 +46,6 @@ class TestDNSimple_Info(ModuleTestCase): """Main class for testing dnsimple module.""" def setUp(self): - """Setup.""" super().setUp() self.module = dnsimple_info @@ -62,48 +65,38 @@ def test_only_key_and_account(self): """key and account will pass, returns domains""" account_id = "1234" with self.assertRaises(AnsibleExitJson) as exc_info: - with set_module_args({ - "api_key": "abcd1324", - "account_id": account_id - }): + with set_module_args({"api_key": "abcd1324", "account_id": account_id}): self.module.main() result = exc_info.exception.args[0] # nothing should change - self.assertFalse(result['changed']) + self.assertFalse(result["changed"]) # we should return at least one item with the matching account ID - assert result['dnsimple_domain_info'][0]["account_id"] == account_id + assert result["dnsimple_domain_info"][0]["account_id"] == account_id @with_httmock(records_resp) def test_only_name_without_record(self): """name and no record should not fail, returns the record""" name = "example.com" with self.assertRaises(AnsibleExitJson) as exc_info: - with set_module_args({ - "api_key": "abcd1324", - "name": "example.com", - "account_id": "1234" - }): + with set_module_args({"api_key": "abcd1324", "name": "example.com", "account_id": "1234"}): self.module.main() result = exc_info.exception.args[0] # nothing should change - self.assertFalse(result['changed']) + self.assertFalse(result["changed"]) # we should return at least one item with matching domain - assert result['dnsimple_records_info'][0]['name'] == name + assert result["dnsimple_records_info"][0]["name"] == name @with_httmock(records_resp) def test_name_and_record(self): """name and record should not fail, returns the record""" record = "example" with self.assertRaises(AnsibleExitJson) as exc_info: - with set_module_args({ - "api_key": "abcd1324", - "account_id": "1234", - "name": "example.com", - "record": "example" - }): + with set_module_args( + {"api_key": "abcd1324", "account_id": "1234", "name": "example.com", "record": "example"} + ): self.module.main() result = exc_info.exception.args[0] # nothing should change - self.assertFalse(result['changed']) + self.assertFalse(result["changed"]) # we should return at least one item and content should match - assert result['dnsimple_record_info'][0]['content'] == record + assert result["dnsimple_record_info"][0]["content"] == record diff --git a/tests/unit/plugins/modules/test_gem.py b/tests/unit/plugins/modules/test_gem.py index c25045f8121..d29f1366c9e 100644 --- a/tests/unit/plugins/modules/test_gem.py +++ b/tests/unit/plugins/modules/test_gem.py @@ -8,22 +8,27 @@ import pytest from ansible_collections.community.general.plugins.modules import gem -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) def get_command(run_command): """Generate the command line string from the patched run_command""" args = run_command.call_args[0] command = args[0] - return ' '.join(command) + return " ".join(command) class TestGem(ModuleTestCase): def setUp(self): super().setUp() - self.rubygems_path = ['/usr/bin/gem'] + self.rubygems_path = ["/usr/bin/gem"] self.mocker.patch( - 'ansible_collections.community.general.plugins.modules.gem.get_rubygems_path', + "ansible_collections.community.general.plugins.modules.gem.get_rubygems_path", lambda module: copy.deepcopy(self.rubygems_path), ) @@ -34,7 +39,7 @@ def _mocker(self, mocker): def patch_installed_versions(self, versions): """Mocks the versions of the installed package""" - target = 'ansible_collections.community.general.plugins.modules.gem.get_installed_versions' + target = "ansible_collections.community.general.plugins.modules.gem.get_installed_versions" def new(module, remote=False): return versions @@ -42,7 +47,7 @@ def new(module, remote=False): return self.mocker.patch(target, new) def patch_rubygems_version(self, version=None): - target = 'ansible_collections.community.general.plugins.modules.gem.get_rubygems_version' + target = "ansible_collections.community.general.plugins.modules.gem.get_rubygems_version" def new(module): return version @@ -50,23 +55,25 @@ def new(module): return self.mocker.patch(target, new) def patch_run_command(self): - target = 'ansible.module_utils.basic.AnsibleModule.run_command' + target = "ansible.module_utils.basic.AnsibleModule.run_command" mock = self.mocker.patch(target) - mock.return_value = (0, '', '') + mock.return_value = (0, "", "") return mock def test_fails_when_user_install_and_install_dir_are_combined(self): - with set_module_args({ - 'name': 'dummy', - 'user_install': True, - 'install_dir': '/opt/dummy', - }): + with set_module_args( + { + "name": "dummy", + "user_install": True, + "install_dir": "/opt/dummy", + } + ): with pytest.raises(AnsibleFailJson) as exc: gem.main() result = exc.value.args[0] - assert result['failed'] - assert result['msg'] == "install_dir requires user_install=false" + assert result["failed"] + assert result["msg"] == "install_dir requires user_install=false" def test_passes_install_dir_to_gem(self): # XXX: This test is extremely fragile, and makes assumptions about the module code, and how @@ -75,11 +82,13 @@ def test_passes_install_dir_to_gem(self): # test mocks. The only thing that matters is the assertion that this 'gem install' is # invoked with '--install-dir'. - with set_module_args({ - 'name': 'dummy', - 'user_install': False, - 'install_dir': '/opt/dummy', - }): + with set_module_args( + { + "name": "dummy", + "user_install": False, + "install_dir": "/opt/dummy", + } + ): self.patch_rubygems_version() self.patch_installed_versions([]) run_command = self.patch_run_command() @@ -88,23 +97,25 @@ def test_passes_install_dir_to_gem(self): gem.main() result = exc.value.args[0] - assert result['changed'] + assert result["changed"] assert run_command.called - assert '--install-dir /opt/dummy' in get_command(run_command) + assert "--install-dir /opt/dummy" in get_command(run_command) def test_passes_install_dir_and_gem_home_when_uninstall_gem(self): # XXX: This test is also extremely fragile because of mocking. # If this breaks, the only that matters is to check whether '--install-dir' is # in the run command, and that GEM_HOME is passed to the command. - with set_module_args({ - 'name': 'dummy', - 'user_install': False, - 'install_dir': '/opt/dummy', - 'state': 'absent', - }): + with set_module_args( + { + "name": "dummy", + "user_install": False, + "install_dir": "/opt/dummy", + "state": "absent", + } + ): self.patch_rubygems_version() - self.patch_installed_versions(['1.0.0']) + self.patch_installed_versions(["1.0.0"]) run_command = self.patch_run_command() @@ -112,19 +123,21 @@ def test_passes_install_dir_and_gem_home_when_uninstall_gem(self): gem.main() result = exc.value.args[0] - assert result['failed'] + assert result["failed"] assert run_command.called - assert '--install-dir /opt/dummy' in get_command(run_command) + assert "--install-dir /opt/dummy" in get_command(run_command) - update_environ = run_command.call_args[1].get('environ_update', {}) - assert update_environ.get('GEM_HOME') == '/opt/dummy' + update_environ = run_command.call_args[1].get("environ_update", {}) + assert update_environ.get("GEM_HOME") == "/opt/dummy" def test_passes_add_force_option(self): - with set_module_args({ - 'name': 'dummy', - 'force': True, - }): + with set_module_args( + { + "name": "dummy", + "force": True, + } + ): self.patch_rubygems_version() self.patch_installed_versions([]) run_command = self.patch_run_command() @@ -133,7 +146,7 @@ def test_passes_add_force_option(self): gem.main() result = exc.value.args[0] - assert result['changed'] + assert result["changed"] assert run_command.called - assert '--force' in get_command(run_command) + assert "--force" in get_command(run_command) diff --git a/tests/unit/plugins/modules/test_github_repo.py b/tests/unit/plugins/modules/test_github_repo.py index 28e973251a4..7df757ac44e 100644 --- a/tests/unit/plugins/modules/test_github_repo.py +++ b/tests/unit/plugins/modules/test_github_repo.py @@ -12,55 +12,48 @@ from ansible_collections.community.general.plugins.modules import github_repo -pytest.importorskip('github') +pytest.importorskip("github") -@urlmatch(netloc=r'.*') +@urlmatch(netloc=r".*") def debug_mock(url, request): print(request.original.__dict__) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*', method="get") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/orgs/.*", method="get") def get_orgs_mock(url, request): match = re.search(r"api\.github\.com(:[0-9]+)?/orgs/(?P[^/]+)", request.url) org = match.group("org") # https://docs.github.com/en/rest/reference/orgs#get-an-organization - headers = {'content-type': 'application/json'} - content = { - "login": org, - "url": f"https://api.github.com/orgs/{org}" - } + headers = {"content-type": "application/json"} + content = {"login": org, "url": f"https://api.github.com/orgs/{org}"} content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user', method="get") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/user", method="get") def get_user_mock(url, request): # https://docs.github.com/en/rest/reference/users#get-the-authenticated-user - headers = {'content-type': 'application/json'} - content = { - "login": "octocat", - "url": "https://api.github.com/users/octocat" - } + headers = {"content-type": "application/json"} + content = {"login": "octocat", "url": "https://api.github.com/users/octocat"} content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="get") def get_repo_notfound_mock(url, request): - return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) + return response(404, '{"message": "Not Found"}', "", "Not Found", 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="get") def get_repo_mock(url, request): - match = re.search( - r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) + match = re.search(r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) org = match.group("org") repo = match.group("repo") # https://docs.github.com/en/rest/reference/repos#get-a-repository - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} content = { "name": repo, "full_name": f"{org}/{repo}", @@ -68,21 +61,20 @@ def get_repo_mock(url, request): "private": False, "description": "This your first repo!", "default_branch": "master", - "allow_rebase_merge": True + "allow_rebase_merge": True, } content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="get") def get_private_repo_mock(url, request): - match = re.search( - r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) + match = re.search(r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) org = match.group("org") repo = match.group("repo") # https://docs.github.com/en/rest/reference/repos#get-a-repository - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} content = { "name": repo, "full_name": f"{org}/{repo}", @@ -90,80 +82,78 @@ def get_private_repo_mock(url, request): "private": True, "description": "This your first repo!", "default_branch": "master", - "allow_rebase_merge": True + "allow_rebase_merge": True, } content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*/repos', method="post") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/orgs/.*/repos", method="post") def create_new_org_repo_mock(url, request): - match = re.search( - r"api\.github\.com(:[0-9]+)?/orgs/(?P[^/]+)/repos", request.url) + match = re.search(r"api\.github\.com(:[0-9]+)?/orgs/(?P[^/]+)/repos", request.url) org = match.group("org") repo = json.loads(request.body) - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} # https://docs.github.com/en/rest/reference/repos#create-an-organization-repository content = { - "name": repo['name'], + "name": repo["name"], "full_name": f"{org}/{repo['name']}", - "private": repo.get('private', False), - "description": repo.get('description') + "private": repo.get("private", False), + "description": repo.get("description"), } content = json.dumps(content).encode("utf-8") return response(201, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user/repos', method="post") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/user/repos", method="post") def create_new_user_repo_mock(url, request): repo = json.loads(request.body) - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} # https://docs.github.com/en/rest/reference/repos#create-a-repository-for-the-authenticated-user content = { - "name": repo['name'], + "name": repo["name"], "full_name": f"octocat/{repo['name']}", - "private": repo.get('private', False), - "description": repo.get('description') + "private": repo.get("private", False), + "description": repo.get("description"), } content = json.dumps(content).encode("utf-8") return response(201, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="patch") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="patch") def patch_repo_mock(url, request): - match = re.search( - r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) + match = re.search(r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) org = match.group("org") repo = match.group("repo") body = json.loads(request.body) - headers = {'content-type': 'application/json'} + headers = {"content-type": "application/json"} # https://docs.github.com/en/rest/reference/repos#update-a-repository content = { "name": repo, "full_name": f"{org}/{repo}", "url": f"https://api.github.com/repos/{org}/{repo}", - "private": body.get('private', False), - "description": body.get('description'), + "private": body.get("private", False), + "description": body.get("description"), "default_branch": "master", - "allow_rebase_merge": True + "allow_rebase_merge": True, } content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="delete") def delete_repo_mock(url, request): # https://docs.github.com/en/rest/reference/repos#delete-a-repository return response(204, None, None, None, 5, request) -@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete") +@urlmatch(netloc=r"api\.github\.com(:[0-9]+)?$", path=r"/repos/.*/.*", method="delete") def delete_repo_notfound_mock(url, request): # https://docs.github.com/en/rest/reference/repos#delete-a-repository - return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) + return response(404, '{"message": "Not Found"}', "", "Not Found", 5, request) class TestGithubRepo(unittest.TestCase): @@ -171,154 +161,170 @@ class TestGithubRepo(unittest.TestCase): @with_httmock(get_repo_notfound_mock) @with_httmock(create_new_org_repo_mock) def test_create_new_org_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": "Just for fun", - "private": False, - "state": "present", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - - self.assertEqual(result['changed'], True) - self.assertEqual(result['repo']['private'], False) - self.assertEqual(result['repo']['description'], 'Just for fun') + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "present", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + + self.assertEqual(result["changed"], True) + self.assertEqual(result["repo"]["private"], False) + self.assertEqual(result["repo"]["description"], "Just for fun") @with_httmock(get_orgs_mock) @with_httmock(get_repo_notfound_mock) @with_httmock(create_new_org_repo_mock) def test_create_new_org_repo_incomplete(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": None, - "private": None, - "state": "present", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - - self.assertEqual(result['changed'], True) - self.assertEqual(result['repo']['private'], False) - self.assertEqual(result['repo']['description'], None) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": None, + "private": None, + "state": "present", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + + self.assertEqual(result["changed"], True) + self.assertEqual(result["repo"]["private"], False) + self.assertEqual(result["repo"]["description"], None) @with_httmock(get_user_mock) @with_httmock(get_repo_notfound_mock) @with_httmock(create_new_user_repo_mock) def test_create_new_user_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": None, - "name": "myrepo", - "description": "Just for fun", - "private": True, - "state": "present", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], True) - self.assertEqual(result['repo']['private'], True) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": None, + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "present", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], True) + self.assertEqual(result["repo"]["private"], True) @with_httmock(get_orgs_mock) @with_httmock(get_repo_mock) @with_httmock(patch_repo_mock) def test_patch_existing_org_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": "Just for fun", - "private": True, - "state": "present", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], True) - self.assertEqual(result['repo']['private'], True) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "present", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], True) + self.assertEqual(result["repo"]["private"], True) @with_httmock(get_orgs_mock) @with_httmock(get_private_repo_mock) def test_idempotency_existing_org_private_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": None, - "private": None, - "state": "present", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], False) - self.assertEqual(result['repo']['private'], True) - self.assertEqual(result['repo']['description'], 'This your first repo!') + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": None, + "private": None, + "state": "present", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], False) + self.assertEqual(result["repo"]["private"], True) + self.assertEqual(result["repo"]["description"], "This your first repo!") @with_httmock(get_orgs_mock) @with_httmock(get_repo_mock) @with_httmock(delete_repo_mock) def test_delete_org_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": "Just for fun", - "private": False, - "state": "absent", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], True) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "absent", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], True) @with_httmock(get_user_mock) @with_httmock(get_repo_mock) @with_httmock(delete_repo_mock) def test_delete_user_repo(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": None, - "name": "myrepo", - "description": "Just for fun", - "private": False, - "state": "absent", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], True) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": None, + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "absent", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], True) @with_httmock(get_orgs_mock) @with_httmock(get_repo_notfound_mock) @with_httmock(delete_repo_notfound_mock) def test_delete_org_repo_notfound(self): - result = github_repo.run_module({ - 'username': None, - 'password': None, - "access_token": "mytoken", - "organization": "MyOrganization", - "name": "myrepo", - "description": "Just for fun", - "private": True, - "state": "absent", - "api_url": "https://api.github.com", - "force_defaults": False, - }) - self.assertEqual(result['changed'], False) + result = github_repo.run_module( + { + "username": None, + "password": None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "absent", + "api_url": "https://api.github.com", + "force_defaults": False, + } + ) + self.assertEqual(result["changed"], False) if __name__ == "__main__": diff --git a/tests/unit/plugins/modules/test_gitlab_deploy_key.py b/tests/unit/plugins/modules/test_gitlab_deploy_key.py index 9ea0d3cedf5..bb90aaa1acb 100644 --- a/tests/unit/plugins/modules/test_gitlab_deploy_key.py +++ b/tests/unit/plugins/modules/test_gitlab_deploy_key.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,10 +17,14 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, - resp_get_project, resp_find_project_deploy_key, - resp_create_project_deploy_key, resp_delete_project_deploy_key) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + resp_get_project, + resp_find_project_deploy_key, + resp_create_project_deploy_key, + resp_delete_project_deploy_key, + ) # GitLab module requirements if python_version_match_requirement(): @@ -67,11 +70,16 @@ def test_deploy_key_exist(self): def test_create_deploy_key(self): project = self.gitlab_instance.projects.get(1) - deploy_key = self.moduleUtil.create_deploy_key(project, {"title": "Public key", - "key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM" - "4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc" - "KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD" - "zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="}) + deploy_key = self.moduleUtil.create_deploy_key( + project, + { + "title": "Public key", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM" + "4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc" + "KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD" + "zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=", + }, + ) self.assertEqual(type(deploy_key), ProjectKey) self.assertEqual(deploy_key.title, "Public key") diff --git a/tests/unit/plugins/modules/test_gitlab_group.py b/tests/unit/plugins/modules/test_gitlab_group.py index 25262060078..1d44cb5d449 100644 --- a/tests/unit/plugins/modules/test_gitlab_group.py +++ b/tests/unit/plugins/modules/test_gitlab_group.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,10 +17,16 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, - resp_get_group, resp_get_missing_group, resp_create_group, - resp_create_subgroup, resp_delete_group, resp_find_group_project) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + resp_get_group, + resp_get_missing_group, + resp_create_group, + resp_create_subgroup, + resp_delete_group, + resp_find_group_project, + ) # GitLab module requirements if python_version_match_requirement(): @@ -65,13 +70,16 @@ def test_exist_group_2(self): @with_httmock(resp_create_group) def test_create_group(self): - group = self.moduleUtil.create_group({'name': "Foobar Group", - 'path': "foo-bar", - 'description': "An interesting group", - 'project_creation_level': "developer", - 'subgroup_creation_level': "maintainer", - 'require_two_factor_authentication': True, - }) + group = self.moduleUtil.create_group( + { + "name": "Foobar Group", + "path": "foo-bar", + "description": "An interesting group", + "project_creation_level": "developer", + "subgroup_creation_level": "maintainer", + "require_two_factor_authentication": True, + } + ) self.assertEqual(type(group), Group) self.assertEqual(group.name, "Foobar Group") @@ -84,12 +92,15 @@ def test_create_group(self): @with_httmock(resp_create_subgroup) def test_create_subgroup(self): - group = self.moduleUtil.create_group({'name': "BarFoo Group", - 'path': "bar-foo", - 'parent_id': 1, - 'project_creation_level': "noone", - 'require_two_factor_authentication': True, - }) + group = self.moduleUtil.create_group( + { + "name": "BarFoo Group", + "path": "bar-foo", + "parent_id": 1, + "project_creation_level": "noone", + "require_two_factor_authentication": True, + } + ) self.assertEqual(type(group), Group) self.assertEqual(group.name, "BarFoo Group") @@ -102,11 +113,15 @@ def test_create_subgroup(self): @with_httmock(resp_get_group) def test_update_group(self): group = self.gitlab_instance.groups.get(1) - changed, newGroup = self.moduleUtil.update_group(group, {'name': "BarFoo Group", - 'visibility': "private", - 'project_creation_level': "maintainer", - 'require_two_factor_authentication': True, - }) + changed, newGroup = self.moduleUtil.update_group( + group, + { + "name": "BarFoo Group", + "visibility": "private", + "project_creation_level": "maintainer", + "require_two_factor_authentication": True, + }, + ) self.assertEqual(changed, True) self.assertEqual(newGroup.name, "BarFoo Group") @@ -114,7 +129,7 @@ def test_update_group(self): self.assertEqual(newGroup.project_creation_level, "maintainer") self.assertEqual(newGroup.require_two_factor_authentication, True) - changed, newGroup = self.moduleUtil.update_group(group, {'name': "BarFoo Group"}) + changed, newGroup = self.moduleUtil.update_group(group, {"name": "BarFoo Group"}) self.assertEqual(changed, False) diff --git a/tests/unit/plugins/modules/test_gitlab_group_access_token.py b/tests/unit/plugins/modules/test_gitlab_group_access_token.py index f728d0250fc..5473623708e 100644 --- a/tests/unit/plugins/modules/test_gitlab_group_access_token.py +++ b/tests/unit/plugins/modules/test_gitlab_group_access_token.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Zoran Krleza (zoran.krleza@true-north.hr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -15,7 +14,7 @@ def python_gitlab_version_match_requirement(): - return tuple(map(int, gitlab.__version__.split('.'))) >= PYTHON_GITLAB_MINIMAL_VERSION + return tuple(map(int, gitlab.__version__.split("."))) >= PYTHON_GITLAB_MINIMAL_VERSION def _dummy(x): @@ -26,12 +25,14 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - resp_get_user, - resp_get_group, - resp_list_group_access_tokens, - resp_create_group_access_tokens, - resp_revoke_group_access_tokens) + from .gitlab import ( + GitlabModuleTestCase, + resp_get_user, + resp_get_group, + resp_list_group_access_tokens, + resp_create_group_access_tokens, + resp_revoke_group_access_tokens, + ) except ImportError: pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing")) @@ -56,7 +57,9 @@ class TestGitlabGroupAccessToken(GitlabModuleTestCase): def setUp(self): super().setUp() if not python_gitlab_version_match_requirement(): - self.skipTest(f"python-gitlab {'.'.join(map(str, PYTHON_GITLAB_MINIMAL_VERSION))}+ is needed for gitlab_group_access_token") + self.skipTest( + f"python-gitlab {'.'.join(map(str, PYTHON_GITLAB_MINIMAL_VERSION))}+ is needed for gitlab_group_access_token" + ) self.moduleUtil = GitLabGroupAccessToken(module=self.mock_module, gitlab_instance=self.gitlab_instance) @@ -110,7 +113,9 @@ def test_create_access_token(self): groups = self.gitlab_instance.groups.get(1) self.assertIsNotNone(groups) - rvalue = self.moduleUtil.create_access_token(groups, {'name': "tokenXYZ", 'scopes': ["api"], 'access_level': 20, 'expires_at': "2024-12-31"}) + rvalue = self.moduleUtil.create_access_token( + groups, {"name": "tokenXYZ", "scopes": ["api"], "access_level": 20, "expires_at": "2024-12-31"} + ) self.assertEqual(rvalue, True) self.assertIsNotNone(self.moduleUtil.access_token_object) diff --git a/tests/unit/plugins/modules/test_gitlab_hook.py b/tests/unit/plugins/modules/test_gitlab_hook.py index fc4a76c90db..71743a51b1a 100644 --- a/tests/unit/plugins/modules/test_gitlab_hook.py +++ b/tests/unit/plugins/modules/test_gitlab_hook.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,10 +17,14 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, - resp_get_project, resp_find_project_hook, - resp_create_project_hook, resp_delete_project_hook) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + resp_get_project, + resp_find_project_hook, + resp_create_project_hook, + resp_delete_project_hook, + ) # GitLab module requirements if python_version_match_requirement(): diff --git a/tests/unit/plugins/modules/test_gitlab_project.py b/tests/unit/plugins/modules/test_gitlab_project.py index 2ef67fe553b..fd7460d7e27 100644 --- a/tests/unit/plugins/modules/test_gitlab_project.py +++ b/tests/unit/plugins/modules/test_gitlab_project.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,10 +17,16 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, - resp_get_group, resp_get_project_by_name, resp_create_project, - resp_get_project, resp_delete_project, resp_get_user) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + resp_get_group, + resp_get_project_by_name, + resp_create_project, + resp_get_project, + resp_delete_project, + resp_get_user, + ) # GitLab module requirements if python_version_match_requirement(): @@ -70,7 +75,9 @@ def test_project_exist(self): @with_httmock(resp_create_project) def test_create_project(self): group = self.gitlab_instance.groups.get(1) - project = self.moduleUtil.create_project(group, {"name": "Diaspora Client", "path": "diaspora-client", "namespace_id": group.id}) + project = self.moduleUtil.create_project( + group, {"name": "Diaspora Client", "path": "diaspora-client", "namespace_id": group.id} + ) self.assertEqual(type(project), Project) self.assertEqual(project.name, "Diaspora Client") @@ -97,14 +104,18 @@ def test_update_project_merge_method(self): # merge_method should be 'merge' by default self.assertEqual(project.merge_method, "merge") - changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name", "merge_method": "rebase_merge"}) + changed, newProject = self.moduleUtil.update_project( + project, {"name": "New Name", "merge_method": "rebase_merge"} + ) self.assertEqual(changed, True) self.assertEqual(type(newProject), Project) self.assertEqual(newProject.name, "New Name") self.assertEqual(newProject.merge_method, "rebase_merge") - changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name", "merge_method": "rebase_merge"}) + changed, newProject = self.moduleUtil.update_project( + project, {"name": "New Name", "merge_method": "rebase_merge"} + ) self.assertEqual(changed, False) self.assertEqual(newProject.name, "New Name") diff --git a/tests/unit/plugins/modules/test_gitlab_project_access_token.py b/tests/unit/plugins/modules/test_gitlab_project_access_token.py index df7bd4ecf3c..3eccae9c5ff 100644 --- a/tests/unit/plugins/modules/test_gitlab_project_access_token.py +++ b/tests/unit/plugins/modules/test_gitlab_project_access_token.py @@ -1,4 +1,3 @@ - # Copyright (c) 2023, Zoran Krleza (zoran.krleza@true-north.hr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -15,7 +14,7 @@ def python_gitlab_version_match_requirement(): - return tuple(map(int, gitlab.__version__.split('.'))) >= PYTHON_GITLAB_MINIMAL_VERSION + return tuple(map(int, gitlab.__version__.split("."))) >= PYTHON_GITLAB_MINIMAL_VERSION def _dummy(x): @@ -26,12 +25,14 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - resp_get_user, - resp_get_project, - resp_list_project_access_tokens, - resp_create_project_access_tokens, - resp_revoke_project_access_tokens) + from .gitlab import ( + GitlabModuleTestCase, + resp_get_user, + resp_get_project, + resp_list_project_access_tokens, + resp_create_project_access_tokens, + resp_revoke_project_access_tokens, + ) except ImportError: pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing")) @@ -56,7 +57,9 @@ class TestGitlabProjectAccessToken(GitlabModuleTestCase): def setUp(self): super().setUp() if not python_gitlab_version_match_requirement(): - self.skipTest(f"python-gitlab {'.'.join(map(str, PYTHON_GITLAB_MINIMAL_VERSION))}+ is needed for gitlab_project_access_token") + self.skipTest( + f"python-gitlab {'.'.join(map(str, PYTHON_GITLAB_MINIMAL_VERSION))}+ is needed for gitlab_project_access_token" + ) self.moduleUtil = GitLabProjectAccessToken(module=self.mock_module, gitlab_instance=self.gitlab_instance) @@ -110,7 +113,9 @@ def test_create_access_token(self): project = self.gitlab_instance.projects.get(1) self.assertIsNotNone(project) - rvalue = self.moduleUtil.create_access_token(project, {'name': "tokenXYZ", 'scopes': ["api"], 'access_level': 20, 'expires_at': "2024-12-31"}) + rvalue = self.moduleUtil.create_access_token( + project, {"name": "tokenXYZ", "scopes": ["api"], "access_level": 20, "expires_at": "2024-12-31"} + ) self.assertEqual(rvalue, True) self.assertIsNotNone(self.moduleUtil.access_token_object) diff --git a/tests/unit/plugins/modules/test_gitlab_protected_branch.py b/tests/unit/plugins/modules/test_gitlab_protected_branch.py index bc83b01946e..39917d8d41c 100644 --- a/tests/unit/plugins/modules/test_gitlab_protected_branch.py +++ b/tests/unit/plugins/modules/test_gitlab_protected_branch.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -20,12 +19,17 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, python_gitlab_module_version, - python_gitlab_version_match_requirement, - resp_get_protected_branch, resp_get_project_by_name, - resp_get_protected_branch_not_exist, - resp_delete_protected_branch, resp_get_user) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + python_gitlab_module_version, + python_gitlab_version_match_requirement, + resp_get_protected_branch, + resp_get_project_by_name, + resp_get_protected_branch_not_exist, + resp_delete_protected_branch, + resp_get_user, + ) # GitLab module requirements if python_version_match_requirement(): @@ -45,7 +49,7 @@ def _dummy(x): with_httmock = _dummy -class MockProtectedBranch(): +class MockProtectedBranch: def __init__(self, merge_access_levels, push_access_levels): self.merge_access_levels = merge_access_levels self.push_access_levels = push_access_levels @@ -58,7 +62,9 @@ def setUp(self): super().setUp() self.gitlab_instance.user = self.gitlab_instance.users.get(1) - self.moduleUtil = GitlabProtectedBranch(module=self.mock_module, project="foo-bar/diaspora-client", gitlab_instance=self.gitlab_instance) + self.moduleUtil = GitlabProtectedBranch( + module=self.mock_module, project="foo-bar/diaspora-client", gitlab_instance=self.gitlab_instance + ) @with_httmock(resp_get_protected_branch) def test_protected_branch_exist(self): @@ -75,10 +81,7 @@ def test_can_update_zero_delta(self): merge_access_levels=[{"access_level": 40}], push_access_levels=[{"access_level": 40}], ) - options = { - "merge_access_levels": 40, - "push_access_level": 40 - } + options = {"merge_access_levels": 40, "push_access_level": 40} rvalue = self.moduleUtil.can_update(protected_branch, options) self.assertEqual(rvalue, True) @@ -87,10 +90,7 @@ def test_can_update_no_configured(self): merge_access_levels=[{"access_level": 40}], push_access_levels=[{"access_level": 40}], ) - options = { - "merge_access_levels": None, - "push_access_level": None - } + options = {"merge_access_levels": None, "push_access_level": None} rvalue = self.moduleUtil.can_update(protected_branch, options) self.assertEqual(rvalue, True) @@ -99,10 +99,7 @@ def test_can_update_different_settings(self): merge_access_levels=[{"access_level": 40}], push_access_levels=[{"access_level": 40}], ) - options = { - "merge_access_levels": 40, - "push_access_level": 30 - } + options = {"merge_access_levels": 40, "push_access_level": 30} rvalue = self.moduleUtil.can_update(protected_branch, options) self.assertEqual(rvalue, False) diff --git a/tests/unit/plugins/modules/test_gitlab_runner.py b/tests/unit/plugins/modules/test_gitlab_runner.py index 47829f0d966..37ce965bbc6 100644 --- a/tests/unit/plugins/modules/test_gitlab_runner.py +++ b/tests/unit/plugins/modules/test_gitlab_runner.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -20,14 +19,20 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (FakeAnsibleModule, - GitlabModuleTestCase, - python_version_match_requirement, - resp_find_runners_all, resp_find_runners_list, - resp_find_project_runners, resp_find_group_runners, - resp_get_runner, - resp_create_runner, resp_delete_runner, - resp_get_project_by_name, resp_get_group_by_name) + from .gitlab import ( + FakeAnsibleModule, + GitlabModuleTestCase, + python_version_match_requirement, + resp_find_runners_all, + resp_find_runners_list, + resp_find_project_runners, + resp_find_group_runners, + resp_get_runner, + resp_create_runner, + resp_delete_runner, + resp_get_project_by_name, + resp_get_group_by_name, + ) # GitLab module requirements if python_version_match_requirement(): @@ -53,8 +58,12 @@ class TestGitlabRunner(GitlabModuleTestCase): def setUp(self): super().setUp() - self.module_util_all = GitLabRunner(module=FakeAnsibleModule({"owned": False}), gitlab_instance=self.gitlab_instance) - self.module_util_owned = GitLabRunner(module=FakeAnsibleModule({"owned": True}), gitlab_instance=self.gitlab_instance) + self.module_util_all = GitLabRunner( + module=FakeAnsibleModule({"owned": False}), gitlab_instance=self.gitlab_instance + ) + self.module_util_owned = GitLabRunner( + module=FakeAnsibleModule({"owned": True}), gitlab_instance=self.gitlab_instance + ) @with_httmock(resp_find_runners_all) @with_httmock(resp_get_runner) @@ -82,8 +91,10 @@ def test_runner_exist_owned(self): @with_httmock(resp_get_runner) @with_httmock(resp_get_project_by_name) def test_project_runner_exist(self): - gitlab_project = self.gitlab_instance.projects.get('foo-bar/diaspora-client') - module_util = GitLabRunner(module=FakeAnsibleModule(), gitlab_instance=self.gitlab_instance, project=gitlab_project) + gitlab_project = self.gitlab_instance.projects.get("foo-bar/diaspora-client") + module_util = GitLabRunner( + module=FakeAnsibleModule(), gitlab_instance=self.gitlab_instance, project=gitlab_project + ) rvalue = module_util.exists_runner("test-1-20220210") @@ -98,7 +109,7 @@ def test_project_runner_exist(self): @with_httmock(resp_get_runner) @pytest.mark.skipif(gitlab.__version__ < "2.3.0", reason="require python-gitlab >= 2.3.0") def test_group_runner_exist(self): - gitlab_group = self.gitlab_instance.groups.get('foo-bar') + gitlab_group = self.gitlab_instance.groups.get("foo-bar") module_util = GitLabRunner(module=FakeAnsibleModule(), gitlab_instance=self.gitlab_instance, group=gitlab_group) rvalue = module_util.exists_runner("test-3-20220210") diff --git a/tests/unit/plugins/modules/test_gitlab_user.py b/tests/unit/plugins/modules/test_gitlab_user.py index da0a30512aa..25c8e042173 100644 --- a/tests/unit/plugins/modules/test_gitlab_user.py +++ b/tests/unit/plugins/modules/test_gitlab_user.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -18,12 +17,21 @@ def _dummy(x): pytestmark = [] try: - from .gitlab import (GitlabModuleTestCase, - python_version_match_requirement, - resp_find_user, resp_get_user, resp_get_user_keys, - resp_create_user_keys, resp_create_user, resp_delete_user, - resp_get_member, resp_get_group, resp_add_member, - resp_update_member, resp_get_member) + from .gitlab import ( + GitlabModuleTestCase, + python_version_match_requirement, + resp_find_user, + resp_get_user, + resp_get_user_keys, + resp_create_user_keys, + resp_create_user, + resp_delete_user, + resp_get_member, + resp_get_group, + resp_add_member, + resp_update_member, + resp_get_member, + ) # GitLab module requirements if python_version_match_requirement(): @@ -78,8 +86,9 @@ def test_find_user(self): @with_httmock(resp_create_user) def test_create_user(self): - user = self.moduleUtil.create_user({'email': 'john@example.com', 'password': 's3cur3s3cr3T', - 'username': 'john_smith', 'name': 'John Smith'}) + user = self.moduleUtil.create_user( + {"email": "john@example.com", "password": "s3cur3s3cr3T", "username": "john_smith", "name": "John Smith"} + ) self.assertEqual(type(user), User) self.assertEqual(user.name, "John Smith") self.assertEqual(user.id, 1) @@ -89,30 +98,30 @@ def test_update_user(self): user = self.gitlab_instance.users.get(1) changed, newUser = self.moduleUtil.update_user( - user, - {'name': {'value': "Jack Smith"}, "is_admin": {'value': "true", 'setter': 'admin'}}, {} + user, {"name": {"value": "Jack Smith"}, "is_admin": {"value": "true", "setter": "admin"}}, {} ) self.assertEqual(changed, True) self.assertEqual(newUser.name, "Jack Smith") self.assertEqual(newUser.admin, "true") - changed, newUser = self.moduleUtil.update_user(user, {'name': {'value': "Jack Smith"}}, {}) + changed, newUser = self.moduleUtil.update_user(user, {"name": {"value": "Jack Smith"}}, {}) self.assertEqual(changed, False) changed, newUser = self.moduleUtil.update_user( user, - {}, { - 'skip_reconfirmation': {'value': True}, - 'password': {'value': 'super_secret-super_secret'}, - } + {}, + { + "skip_reconfirmation": {"value": True}, + "password": {"value": "super_secret-super_secret"}, + }, ) # note: uncheckable parameters dont set changed state self.assertEqual(changed, False) self.assertEqual(newUser.skip_reconfirmation, True) - self.assertEqual(newUser.password, 'super_secret-super_secret') + self.assertEqual(newUser.password, "super_secret-super_secret") @with_httmock(resp_find_user) @with_httmock(resp_delete_user) @@ -139,22 +148,30 @@ def test_sshkey_exist(self): def test_create_sshkey(self): user = self.gitlab_instance.users.get(1) - rvalue = self.moduleUtil.add_ssh_key_to_user(user, { - 'name': "Public key", - 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe" - "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4" - "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=", - 'expires_at': ""}) + rvalue = self.moduleUtil.add_ssh_key_to_user( + user, + { + "name": "Public key", + "file": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe" + "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4" + "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=", + "expires_at": "", + }, + ) self.assertEqual(rvalue, False) - rvalue = self.moduleUtil.add_ssh_key_to_user(user, { - 'name': "Private key", - 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcU" - "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+" - "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j" - "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x" - "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF", - 'expires_at': "2027-01-01"}) + rvalue = self.moduleUtil.add_ssh_key_to_user( + user, + { + "name": "Private key", + "file": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcU" + "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+" + "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j" + "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x" + "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF", + "expires_at": "2027-01-01", + }, + ) self.assertEqual(rvalue, True) @with_httmock(resp_get_group) diff --git a/tests/unit/plugins/modules/test_homebrew.py b/tests/unit/plugins/modules/test_homebrew.py index e96bf6bce11..395f2f9201a 100644 --- a/tests/unit/plugins/modules/test_homebrew.py +++ b/tests/unit/plugins/modules/test_homebrew.py @@ -9,7 +9,6 @@ class TestHomebrewModule(unittest.TestCase): - def setUp(self): self.brew_app_names = ["git-ssh", "awscli@1", "bash"] diff --git a/tests/unit/plugins/modules/test_icinga2_feature.py b/tests/unit/plugins/modules/test_icinga2_feature.py index b67619d5ac5..c818e1462cd 100644 --- a/tests/unit/plugins/modules/test_icinga2_feature.py +++ b/tests/unit/plugins/modules/test_icinga2_feature.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Ansible Project # Copyright (c) 2018, Abhijeet Kasurde # @@ -9,7 +8,12 @@ from unittest.mock import patch from ansible_collections.community.general.plugins.modules import icinga2_feature -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from ansible.module_utils import basic @@ -25,7 +29,7 @@ def setUp(self): """Setup.""" super().setUp() self.module = icinga2_feature - self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path = patch.object(basic.AnsibleModule, "get_bin_path", get_bin_path) self.mock_get_bin_path.start() self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' @@ -41,58 +45,61 @@ def test_without_required_parameters(self): def test_enable_feature(self): """Check that result is changed.""" - with set_module_args({ - 'name': 'api', - }): - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output + with set_module_args( + { + "name": "api", + } + ): + with patch.object(basic.AnsibleModule, "run_command") as run_command: + run_command.return_value = 0, "", "" # successful execution, no output with self.assertRaises(AnsibleExitJson) as result: icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) self.assertEqual(run_command.call_count, 2) - self.assertEqual(run_command.call_args[0][0][-1], 'api') + self.assertEqual(run_command.call_args[0][0][-1], "api") def test_enable_feature_with_check_mode(self): """Check that result is changed in check mode.""" - with set_module_args({ - 'name': 'api', - '_ansible_check_mode': True, - }): - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output + with set_module_args( + { + "name": "api", + "_ansible_check_mode": True, + } + ): + with patch.object(basic.AnsibleModule, "run_command") as run_command: + run_command.return_value = 0, "", "" # successful execution, no output with self.assertRaises(AnsibleExitJson) as result: icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) self.assertEqual(run_command.call_count, 1) def test_disable_feature(self): """Check that result is changed.""" - with set_module_args({ - 'name': 'api', - 'state': 'absent' - }): - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output + with set_module_args({"name": "api", "state": "absent"}): + with patch.object(basic.AnsibleModule, "run_command") as run_command: + run_command.return_value = 0, "", "" # successful execution, no output with self.assertRaises(AnsibleExitJson) as result: icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) self.assertEqual(run_command.call_count, 2) - self.assertEqual(run_command.call_args[0][0][-1], 'api') + self.assertEqual(run_command.call_args[0][0][-1], "api") def test_disable_feature_with_check_mode(self): """Check that result is changed in check mode.""" - with set_module_args({ - 'name': 'api', - 'state': 'absent', - '_ansible_check_mode': True, - }): - with patch.object(basic.AnsibleModule, 'run_command') as run_command: - run_command.return_value = 0, '', '' # successful execution, no output + with set_module_args( + { + "name": "api", + "state": "absent", + "_ansible_check_mode": True, + } + ): + with patch.object(basic.AnsibleModule, "run_command") as run_command: + run_command.return_value = 0, "", "" # successful execution, no output with self.assertRaises(AnsibleExitJson) as result: icinga2_feature.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) self.assertEqual(run_command.call_count, 1) diff --git a/tests/unit/plugins/modules/test_ini_file.py b/tests/unit/plugins/modules/test_ini_file.py index 071bc55dd22..821b13a4b04 100644 --- a/tests/unit/plugins/modules/test_ini_file.py +++ b/tests/unit/plugins/modules/test_ini_file.py @@ -8,13 +8,12 @@ from ansible_collections.community.general.plugins.modules import ini_file -def do_test(option, ignore_spaces, newline, before, expected_after, - expected_changed, expected_msg): +def do_test(option, ignore_spaces, newline, before, expected_after, expected_changed, expected_msg): section_lines = [before] changed_lines = [0] changed, msg = ini_file.update_section_line( - option, None, section_lines, 0, changed_lines, ignore_spaces, - newline, None) + option, None, section_lines, 0, changed_lines, ignore_spaces, newline, None + ) assert section_lines[0] == expected_after assert changed == expected_changed assert changed_lines[0] == 1 @@ -22,29 +21,29 @@ def do_test(option, ignore_spaces, newline, before, expected_after, def test_ignore_spaces_comment(): - oldline = ';foobar=baz' - newline = 'foobar = baz' - do_test('foobar', True, newline, oldline, newline, True, 'option changed') + oldline = ";foobar=baz" + newline = "foobar = baz" + do_test("foobar", True, newline, oldline, newline, True, "option changed") def test_ignore_spaces_changed(): - oldline = 'foobar=baz' - newline = 'foobar = freeble' - do_test('foobar', True, newline, oldline, newline, True, 'option changed') + oldline = "foobar=baz" + newline = "foobar = freeble" + do_test("foobar", True, newline, oldline, newline, True, "option changed") def test_ignore_spaces_unchanged(): - oldline = 'foobar=baz' - newline = 'foobar = baz' - do_test('foobar', True, newline, oldline, oldline, False, None) + oldline = "foobar=baz" + newline = "foobar = baz" + do_test("foobar", True, newline, oldline, oldline, False, None) def test_no_ignore_spaces_changed(): - oldline = 'foobar=baz' - newline = 'foobar = baz' - do_test('foobar', False, newline, oldline, newline, True, 'option changed') + oldline = "foobar=baz" + newline = "foobar = baz" + do_test("foobar", False, newline, oldline, newline, True, "option changed") def test_no_ignore_spaces_unchanged(): - newline = 'foobar=baz' - do_test('foobar', False, newline, newline, newline, False, None) + newline = "foobar=baz" + do_test("foobar", False, newline, newline, newline, False, None) diff --git a/tests/unit/plugins/modules/test_ipa_getkeytab.py b/tests/unit/plugins/modules/test_ipa_getkeytab.py index af03275fd43..1bd35cecb68 100644 --- a/tests/unit/plugins/modules/test_ipa_getkeytab.py +++ b/tests/unit/plugins/modules/test_ipa_getkeytab.py @@ -7,7 +7,11 @@ from unittest.mock import call, patch from ansible_collections.community.general.plugins.modules import ipa_getkeytab -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class IPAKeytabModuleTestCase(ModuleTestCase): @@ -16,11 +20,11 @@ class IPAKeytabModuleTestCase(ModuleTestCase): def setUp(self): super().setUp() ansible_module_path = "ansible_collections.community.general.plugins.modules.ipa_getkeytab.AnsibleModule" - self.mock_run_command = patch(f'{ansible_module_path}.run_command') + self.mock_run_command = patch(f"{ansible_module_path}.run_command") self.module_main_command = self.mock_run_command.start() - self.mock_get_bin_path = patch(f'{ansible_module_path}.get_bin_path') + self.mock_get_bin_path = patch(f"{ansible_module_path}.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() - self.get_bin_path.return_value = '/testbin/ipa_getkeytab' + self.get_bin_path.return_value = "/testbin/ipa_getkeytab" def tearDown(self): self.mock_run_command.stop() @@ -33,26 +37,35 @@ def module_main(self, exit_exc): return exc.exception.args[0] def test_present(self): - with set_module_args({ - 'path': '/tmp/test.keytab', - 'principal': 'HTTP/freeipa-dc02.ipa.test', - 'ipa_host': 'freeipa-dc01.ipa.test', - 'state': 'present' - }): + with set_module_args( + { + "path": "/tmp/test.keytab", + "principal": "HTTP/freeipa-dc02.ipa.test", + "ipa_host": "freeipa-dc01.ipa.test", + "state": "present", + } + ): self.module_main_command.side_effect = [ - (0, '{}', ''), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/ipa_getkeytab', - '--keytab', '/tmp/test.keytab', - '--server', 'freeipa-dc01.ipa.test', - '--principal', 'HTTP/freeipa-dc02.ipa.test' - ], - check_rc=True, - environ_update={'LC_ALL': 'C', 'LANGUAGE': 'C'} - ), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/ipa_getkeytab", + "--keytab", + "/tmp/test.keytab", + "--server", + "freeipa-dc01.ipa.test", + "--principal", + "HTTP/freeipa-dc02.ipa.test", + ], + check_rc=True, + environ_update={"LC_ALL": "C", "LANGUAGE": "C"}, + ), + ] + ) diff --git a/tests/unit/plugins/modules/test_ipa_otpconfig.py b/tests/unit/plugins/modules/test_ipa_otpconfig.py index f08a56c7fd7..0ef64a1b9ff 100644 --- a/tests/unit/plugins/modules/test_ipa_otpconfig.py +++ b/tests/unit/plugins/modules/test_ipa_otpconfig.py @@ -1,4 +1,3 @@ - # Copyright (c) 2020, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,12 @@ from contextlib import contextmanager from unittest.mock import call, patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import ipa_otpconfig @@ -30,8 +34,8 @@ def patch_ipa(**kwargs): ... """ obj = ipa_otpconfig.OTPConfigIPAClient - with patch.object(obj, 'login') as mock_login: - with patch.object(obj, '_post_json', **kwargs) as mock_post: + with patch.object(obj, "login") as mock_login: + with patch.object(obj, "_post_json", **kwargs) as mock_post: yield mock_login, mock_post @@ -82,182 +86,110 @@ def _test_base(self, module_args, return_value, mock_calls, changed): mock_post.assert_not_called() # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_set_all_no_adjustment(self): """Set values requiring no adjustment""" module_args = { - 'ipatokentotpauthwindow': 11, - 'ipatokentotpsyncwindow': 12, - 'ipatokenhotpauthwindow': 13, - 'ipatokenhotpsyncwindow': 14 + "ipatokentotpauthwindow": 11, + "ipatokentotpsyncwindow": 12, + "ipatokenhotpauthwindow": 13, + "ipatokenhotpsyncwindow": 14, } return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_all_aliases_no_adjustment(self): """Set values requiring no adjustment on all using aliases values""" - module_args = { - 'totpauthwindow': 11, - 'totpsyncwindow': 12, - 'hotpauthwindow': 13, - 'hotpsyncwindow': 14 - } + module_args = {"totpauthwindow": 11, "totpsyncwindow": 12, "hotpauthwindow": 13, "hotpsyncwindow": 14} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_totp_auth_window_no_adjustment(self): """Set values requiring no adjustment on totpauthwindow""" - module_args = { - 'totpauthwindow': 11 - } + module_args = {"totpauthwindow": 11} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_totp_sync_window_no_adjustment(self): """Set values requiring no adjustment on totpsyncwindow""" - module_args = { - 'totpsyncwindow': 12 - } + module_args = {"totpsyncwindow": 12} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_hotp_auth_window_no_adjustment(self): """Set values requiring no adjustment on hotpauthwindow""" - module_args = { - 'hotpauthwindow': 13 - } + module_args = {"hotpauthwindow": 13} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_hotp_sync_window_no_adjustment(self): """Set values requiring no adjustment on hotpsyncwindow""" - module_args = { - 'hotpsyncwindow': 14 - } + module_args = {"hotpsyncwindow": 14} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} - mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_show', - 'name': None - } - ) + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } + mock_calls = ({"method": "otpconfig_show", "name": None}, {"method": "otpconfig_show", "name": None}) changed = False self._test_base(module_args, return_value, mock_calls, changed) def test_set_totp_auth_window(self): """Set values requiring adjustment on totpauthwindow""" - module_args = { - 'totpauthwindow': 10 - } + module_args = {"totpauthwindow": 10} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_mod', - 'name': None, - 'item': {'ipatokentotpauthwindow': '10'} - }, - { - 'method': 'otpconfig_show', - 'name': None - } + {"method": "otpconfig_show", "name": None}, + {"method": "otpconfig_mod", "name": None, "item": {"ipatokentotpauthwindow": "10"}}, + {"method": "otpconfig_show", "name": None}, ) changed = True @@ -265,28 +197,17 @@ def test_set_totp_auth_window(self): def test_set_totp_sync_window(self): """Set values requiring adjustment on totpsyncwindow""" - module_args = { - 'totpsyncwindow': 10 - } + module_args = {"totpsyncwindow": 10} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_mod', - 'name': None, - 'item': {'ipatokentotpsyncwindow': '10'} - }, - { - 'method': 'otpconfig_show', - 'name': None - } + {"method": "otpconfig_show", "name": None}, + {"method": "otpconfig_mod", "name": None, "item": {"ipatokentotpsyncwindow": "10"}}, + {"method": "otpconfig_show", "name": None}, ) changed = True @@ -294,28 +215,17 @@ def test_set_totp_sync_window(self): def test_set_hotp_auth_window(self): """Set values requiring adjustment on hotpauthwindow""" - module_args = { - 'hotpauthwindow': 10 - } + module_args = {"hotpauthwindow": 10} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_mod', - 'name': None, - 'item': {'ipatokenhotpauthwindow': '10'} - }, - { - 'method': 'otpconfig_show', - 'name': None - } + {"method": "otpconfig_show", "name": None}, + {"method": "otpconfig_mod", "name": None, "item": {"ipatokenhotpauthwindow": "10"}}, + {"method": "otpconfig_show", "name": None}, ) changed = True @@ -323,28 +233,17 @@ def test_set_hotp_auth_window(self): def test_set_hotp_sync_window(self): """Set values requiring adjustment on hotpsyncwindow""" - module_args = { - 'hotpsyncwindow': 10 - } + module_args = {"hotpsyncwindow": 10} return_value = { - 'ipatokentotpauthwindow': ['11'], - 'ipatokentotpsyncwindow': ['12'], - 'ipatokenhotpauthwindow': ['13'], - 'ipatokenhotpsyncwindow': ['14']} + "ipatokentotpauthwindow": ["11"], + "ipatokentotpsyncwindow": ["12"], + "ipatokenhotpauthwindow": ["13"], + "ipatokenhotpsyncwindow": ["14"], + } mock_calls = ( - { - 'method': 'otpconfig_show', - 'name': None - }, - { - 'method': 'otpconfig_mod', - 'name': None, - 'item': {'ipatokenhotpsyncwindow': '10'} - }, - { - 'method': 'otpconfig_show', - 'name': None - } + {"method": "otpconfig_show", "name": None}, + {"method": "otpconfig_mod", "name": None, "item": {"ipatokenhotpsyncwindow": "10"}}, + {"method": "otpconfig_show", "name": None}, ) changed = True @@ -353,33 +252,30 @@ def test_set_hotp_sync_window(self): def test_set_all(self): """Set values requiring adjustment on all""" module_args = { - 'ipatokentotpauthwindow': 11, - 'ipatokentotpsyncwindow': 12, - 'ipatokenhotpauthwindow': 13, - 'ipatokenhotpsyncwindow': 14 + "ipatokentotpauthwindow": 11, + "ipatokentotpsyncwindow": 12, + "ipatokenhotpauthwindow": 13, + "ipatokenhotpsyncwindow": 14, } return_value = { - 'ipatokentotpauthwindow': ['1'], - 'ipatokentotpsyncwindow': ['2'], - 'ipatokenhotpauthwindow': ['3'], - 'ipatokenhotpsyncwindow': ['4']} + "ipatokentotpauthwindow": ["1"], + "ipatokentotpsyncwindow": ["2"], + "ipatokenhotpauthwindow": ["3"], + "ipatokenhotpsyncwindow": ["4"], + } mock_calls = ( + {"method": "otpconfig_show", "name": None}, { - 'method': 'otpconfig_show', - 'name': None + "method": "otpconfig_mod", + "name": None, + "item": { + "ipatokentotpauthwindow": "11", + "ipatokentotpsyncwindow": "12", + "ipatokenhotpauthwindow": "13", + "ipatokenhotpsyncwindow": "14", + }, }, - { - 'method': 'otpconfig_mod', - 'name': None, - 'item': {'ipatokentotpauthwindow': '11', - 'ipatokentotpsyncwindow': '12', - 'ipatokenhotpauthwindow': '13', - 'ipatokenhotpsyncwindow': '14'} - }, - { - 'method': 'otpconfig_show', - 'name': None - } + {"method": "otpconfig_show", "name": None}, ) changed = True @@ -387,18 +283,20 @@ def test_set_all(self): def test_fail_post(self): """Fail due to an exception raised from _post_json""" - with set_module_args({ - 'ipatokentotpauthwindow': 11, - 'ipatokentotpsyncwindow': 12, - 'ipatokenhotpauthwindow': 13, - 'ipatokenhotpsyncwindow': 14 - }): - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with set_module_args( + { + "ipatokentotpauthwindow": 11, + "ipatokentotpsyncwindow": 12, + "ipatokenhotpauthwindow": 13, + "ipatokenhotpsyncwindow": 14, + } + ): + with patch_ipa(side_effect=Exception("ERROR MESSAGE")) as (mock_login, mock_post): with self.assertRaises(AnsibleFailJson) as exec_info: self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') + self.assertEqual(exec_info.exception.args[0]["msg"], "ERROR MESSAGE") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_ipa_otptoken.py b/tests/unit/plugins/modules/test_ipa_otptoken.py index 382571d9132..0f7e3b40fab 100644 --- a/tests/unit/plugins/modules/test_ipa_otptoken.py +++ b/tests/unit/plugins/modules/test_ipa_otptoken.py @@ -1,4 +1,3 @@ - # Copyright (c) 2020, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,12 @@ from contextlib import contextmanager from unittest.mock import call, patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import ipa_otptoken @@ -30,8 +34,8 @@ def patch_ipa(**kwargs): ... """ obj = ipa_otptoken.OTPTokenIPAClient - with patch.object(obj, 'login') as mock_login: - with patch.object(obj, '_post_json', **kwargs) as mock_post: + with patch.object(obj, "login") as mock_login: + with patch.object(obj, "_post_json", **kwargs) as mock_post: yield mock_login, mock_post @@ -81,29 +85,19 @@ def _test_base(self, module_args, return_value, mock_calls, changed): mock_post.assert_not_called() # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_add_new_all_default(self): """Add a new OTP with all default values""" - module_args = { - 'uniqueid': 'NewToken1' - } + module_args = {"uniqueid": "NewToken1"} return_value = {} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, - { - 'method': 'otptoken_add', - 'name': 'NewToken1', - 'item': {'ipatokendisabled': False, - 'all': True} - } + {"method": "otptoken_add", "name": "NewToken1", "item": {"ipatokendisabled": False, "all": True}}, ) changed = True @@ -111,25 +105,15 @@ def test_add_new_all_default(self): def test_add_new_all_default_with_aliases(self): """Add a new OTP with all default values using alias values""" - module_args = { - 'name': 'NewToken1' - } + module_args = {"name": "NewToken1"} return_value = {} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, - { - 'method': 'otptoken_add', - 'name': 'NewToken1', - 'item': {'ipatokendisabled': False, - 'all': True} - } + {"method": "otptoken_add", "name": "NewToken1", "item": {"ipatokendisabled": False, "all": True}}, ) changed = True @@ -138,54 +122,53 @@ def test_add_new_all_default_with_aliases(self): def test_add_new_all_specified(self): """Add a new OTP with all default values""" module_args = { - 'uniqueid': 'NewToken1', - 'otptype': 'hotp', - 'secretkey': 'VGVzdFNlY3JldDE=', - 'description': 'Test description', - 'owner': 'pinky', - 'enabled': True, - 'notbefore': '20200101010101', - 'notafter': '20900101010101', - 'vendor': 'Acme', - 'model': 'ModelT', - 'serial': 'Number1', - 'state': 'present', - 'algorithm': 'sha256', - 'digits': 6, - 'offset': 10, - 'interval': 30, - 'counter': 30, + "uniqueid": "NewToken1", + "otptype": "hotp", + "secretkey": "VGVzdFNlY3JldDE=", + "description": "Test description", + "owner": "pinky", + "enabled": True, + "notbefore": "20200101010101", + "notafter": "20900101010101", + "vendor": "Acme", + "model": "ModelT", + "serial": "Number1", + "state": "present", + "algorithm": "sha256", + "digits": 6, + "offset": 10, + "interval": 30, + "counter": 30, } return_value = {} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, { - 'method': 'otptoken_add', - 'name': 'NewToken1', - 'item': {'type': 'HOTP', - 'ipatokenotpkey': 'KRSXG5CTMVRXEZLUGE======', - 'description': 'Test description', - 'ipatokenowner': 'pinky', - 'ipatokendisabled': False, - 'ipatokennotbefore': '20200101010101Z', - 'ipatokennotafter': '20900101010101Z', - 'ipatokenvendor': 'Acme', - 'ipatokenmodel': 'ModelT', - 'ipatokenserial': 'Number1', - 'ipatokenotpalgorithm': 'sha256', - 'ipatokenotpdigits': '6', - 'ipatokentotpclockoffset': '10', - 'ipatokentotptimestep': '30', - 'ipatokenhotpcounter': '30', - 'all': True} - } + "method": "otptoken_add", + "name": "NewToken1", + "item": { + "type": "HOTP", + "ipatokenotpkey": "KRSXG5CTMVRXEZLUGE======", + "description": "Test description", + "ipatokenowner": "pinky", + "ipatokendisabled": False, + "ipatokennotbefore": "20200101010101Z", + "ipatokennotafter": "20900101010101Z", + "ipatokenvendor": "Acme", + "ipatokenmodel": "ModelT", + "ipatokenserial": "Number1", + "ipatokenotpalgorithm": "sha256", + "ipatokenotpdigits": "6", + "ipatokentotpclockoffset": "10", + "ipatokentotptimestep": "30", + "ipatokenhotpcounter": "30", + "all": True, + }, + }, ) changed = True @@ -194,48 +177,47 @@ def test_add_new_all_specified(self): def test_already_existing_no_change_all_specified(self): """Add a new OTP with all values specified but needing no change""" module_args = { - 'uniqueid': 'NewToken1', - 'otptype': 'hotp', - 'secretkey': 'VGVzdFNlY3JldDE=', - 'description': 'Test description', - 'owner': 'pinky', - 'enabled': True, - 'notbefore': '20200101010101', - 'notafter': '20900101010101', - 'vendor': 'Acme', - 'model': 'ModelT', - 'serial': 'Number1', - 'state': 'present', - 'algorithm': 'sha256', - 'digits': 6, - 'offset': 10, - 'interval': 30, - 'counter': 30, + "uniqueid": "NewToken1", + "otptype": "hotp", + "secretkey": "VGVzdFNlY3JldDE=", + "description": "Test description", + "owner": "pinky", + "enabled": True, + "notbefore": "20200101010101", + "notafter": "20900101010101", + "vendor": "Acme", + "model": "ModelT", + "serial": "Number1", + "state": "present", + "algorithm": "sha256", + "digits": 6, + "offset": 10, + "interval": 30, + "counter": 30, + } + return_value = { + "ipatokenuniqueid": "NewToken1", + "type": "HOTP", + "ipatokenotpkey": [{"__base64__": "VGVzdFNlY3JldDE="}], + "description": ["Test description"], + "ipatokenowner": ["pinky"], + "ipatokendisabled": [False], + "ipatokennotbefore": ["20200101010101Z"], + "ipatokennotafter": ["20900101010101Z"], + "ipatokenvendor": ["Acme"], + "ipatokenmodel": ["ModelT"], + "ipatokenserial": ["Number1"], + "ipatokenotpalgorithm": ["sha256"], + "ipatokenotpdigits": ["6"], + "ipatokentotpclockoffset": ["10"], + "ipatokentotptimestep": ["30"], + "ipatokenhotpcounter": ["30"], } - return_value = {'ipatokenuniqueid': 'NewToken1', - 'type': 'HOTP', - 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], - 'description': ['Test description'], - 'ipatokenowner': ['pinky'], - 'ipatokendisabled': [False], - 'ipatokennotbefore': ['20200101010101Z'], - 'ipatokennotafter': ['20900101010101Z'], - 'ipatokenvendor': ['Acme'], - 'ipatokenmodel': ['ModelT'], - 'ipatokenserial': ['Number1'], - 'ipatokenotpalgorithm': ['sha256'], - 'ipatokenotpdigits': ['6'], - 'ipatokentotpclockoffset': ['10'], - 'ipatokentotptimestep': ['30'], - 'ipatokenhotpcounter': ['30']} mock_calls = [ { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, } ] changed = False @@ -245,62 +227,63 @@ def test_already_existing_no_change_all_specified(self): def test_already_existing_one_change_all_specified(self): """Modify an existing OTP with one value specified needing change""" module_args = { - 'uniqueid': 'NewToken1', - 'otptype': 'hotp', - 'secretkey': 'VGVzdFNlY3JldDE=', - 'description': 'Test description', - 'owner': 'brain', - 'enabled': True, - 'notbefore': '20200101010101', - 'notafter': '20900101010101', - 'vendor': 'Acme', - 'model': 'ModelT', - 'serial': 'Number1', - 'state': 'present', - 'algorithm': 'sha256', - 'digits': 6, - 'offset': 10, - 'interval': 30, - 'counter': 30, + "uniqueid": "NewToken1", + "otptype": "hotp", + "secretkey": "VGVzdFNlY3JldDE=", + "description": "Test description", + "owner": "brain", + "enabled": True, + "notbefore": "20200101010101", + "notafter": "20900101010101", + "vendor": "Acme", + "model": "ModelT", + "serial": "Number1", + "state": "present", + "algorithm": "sha256", + "digits": 6, + "offset": 10, + "interval": 30, + "counter": 30, + } + return_value = { + "ipatokenuniqueid": "NewToken1", + "type": "HOTP", + "ipatokenotpkey": [{"__base64__": "VGVzdFNlY3JldDE="}], + "description": ["Test description"], + "ipatokenowner": ["pinky"], + "ipatokendisabled": [False], + "ipatokennotbefore": ["20200101010101Z"], + "ipatokennotafter": ["20900101010101Z"], + "ipatokenvendor": ["Acme"], + "ipatokenmodel": ["ModelT"], + "ipatokenserial": ["Number1"], + "ipatokenotpalgorithm": ["sha256"], + "ipatokenotpdigits": ["6"], + "ipatokentotpclockoffset": ["10"], + "ipatokentotptimestep": ["30"], + "ipatokenhotpcounter": ["30"], } - return_value = {'ipatokenuniqueid': 'NewToken1', - 'type': 'HOTP', - 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], - 'description': ['Test description'], - 'ipatokenowner': ['pinky'], - 'ipatokendisabled': [False], - 'ipatokennotbefore': ['20200101010101Z'], - 'ipatokennotafter': ['20900101010101Z'], - 'ipatokenvendor': ['Acme'], - 'ipatokenmodel': ['ModelT'], - 'ipatokenserial': ['Number1'], - 'ipatokenotpalgorithm': ['sha256'], - 'ipatokenotpdigits': ['6'], - 'ipatokentotpclockoffset': ['10'], - 'ipatokentotptimestep': ['30'], - 'ipatokenhotpcounter': ['30']} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, { - 'method': 'otptoken_mod', - 'name': 'NewToken1', - 'item': {'description': 'Test description', - 'ipatokenowner': 'brain', - 'ipatokendisabled': False, - 'ipatokennotbefore': '20200101010101Z', - 'ipatokennotafter': '20900101010101Z', - 'ipatokenvendor': 'Acme', - 'ipatokenmodel': 'ModelT', - 'ipatokenserial': 'Number1', - 'all': True} - } + "method": "otptoken_mod", + "name": "NewToken1", + "item": { + "description": "Test description", + "ipatokenowner": "brain", + "ipatokendisabled": False, + "ipatokennotbefore": "20200101010101Z", + "ipatokennotafter": "20900101010101Z", + "ipatokenvendor": "Acme", + "ipatokenmodel": "ModelT", + "ipatokenserial": "Number1", + "all": True, + }, + }, ) changed = True @@ -309,62 +292,63 @@ def test_already_existing_one_change_all_specified(self): def test_already_existing_all_valid_change_all_specified(self): """Modify an existing OTP with all valid values specified needing change""" module_args = { - 'uniqueid': 'NewToken1', - 'otptype': 'hotp', - 'secretkey': 'VGVzdFNlY3JldDE=', - 'description': 'New Test description', - 'owner': 'pinky', - 'enabled': False, - 'notbefore': '20200101010102', - 'notafter': '20900101010102', - 'vendor': 'NewAcme', - 'model': 'NewModelT', - 'serial': 'Number2', - 'state': 'present', - 'algorithm': 'sha256', - 'digits': 6, - 'offset': 10, - 'interval': 30, - 'counter': 30, + "uniqueid": "NewToken1", + "otptype": "hotp", + "secretkey": "VGVzdFNlY3JldDE=", + "description": "New Test description", + "owner": "pinky", + "enabled": False, + "notbefore": "20200101010102", + "notafter": "20900101010102", + "vendor": "NewAcme", + "model": "NewModelT", + "serial": "Number2", + "state": "present", + "algorithm": "sha256", + "digits": 6, + "offset": 10, + "interval": 30, + "counter": 30, + } + return_value = { + "ipatokenuniqueid": "NewToken1", + "type": "HOTP", + "ipatokenotpkey": [{"__base64__": "VGVzdFNlY3JldDE="}], + "description": ["Test description"], + "ipatokenowner": ["pinky"], + "ipatokendisabled": [False], + "ipatokennotbefore": ["20200101010101Z"], + "ipatokennotafter": ["20900101010101Z"], + "ipatokenvendor": ["Acme"], + "ipatokenmodel": ["ModelT"], + "ipatokenserial": ["Number1"], + "ipatokenotpalgorithm": ["sha256"], + "ipatokenotpdigits": ["6"], + "ipatokentotpclockoffset": ["10"], + "ipatokentotptimestep": ["30"], + "ipatokenhotpcounter": ["30"], } - return_value = {'ipatokenuniqueid': 'NewToken1', - 'type': 'HOTP', - 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], - 'description': ['Test description'], - 'ipatokenowner': ['pinky'], - 'ipatokendisabled': [False], - 'ipatokennotbefore': ['20200101010101Z'], - 'ipatokennotafter': ['20900101010101Z'], - 'ipatokenvendor': ['Acme'], - 'ipatokenmodel': ['ModelT'], - 'ipatokenserial': ['Number1'], - 'ipatokenotpalgorithm': ['sha256'], - 'ipatokenotpdigits': ['6'], - 'ipatokentotpclockoffset': ['10'], - 'ipatokentotptimestep': ['30'], - 'ipatokenhotpcounter': ['30']} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, { - 'method': 'otptoken_mod', - 'name': 'NewToken1', - 'item': {'description': 'New Test description', - 'ipatokenowner': 'pinky', - 'ipatokendisabled': True, - 'ipatokennotbefore': '20200101010102Z', - 'ipatokennotafter': '20900101010102Z', - 'ipatokenvendor': 'NewAcme', - 'ipatokenmodel': 'NewModelT', - 'ipatokenserial': 'Number2', - 'all': True} - } + "method": "otptoken_mod", + "name": "NewToken1", + "item": { + "description": "New Test description", + "ipatokenowner": "pinky", + "ipatokendisabled": True, + "ipatokennotbefore": "20200101010102Z", + "ipatokennotafter": "20900101010102Z", + "ipatokenvendor": "NewAcme", + "ipatokenmodel": "NewModelT", + "ipatokenserial": "Number2", + "all": True, + }, + }, ) changed = True @@ -372,39 +356,32 @@ def test_already_existing_all_valid_change_all_specified(self): def test_delete_existing_token(self): """Delete an existing OTP""" - module_args = { - 'uniqueid': 'NewToken1', - 'state': 'absent' + module_args = {"uniqueid": "NewToken1", "state": "absent"} + return_value = { + "ipatokenuniqueid": "NewToken1", + "type": "HOTP", + "ipatokenotpkey": [{"__base64__": "KRSXG5CTMVRXEZLUGE======"}], + "description": ["Test description"], + "ipatokenowner": ["pinky"], + "ipatokendisabled": [False], + "ipatokennotbefore": ["20200101010101Z"], + "ipatokennotafter": ["20900101010101Z"], + "ipatokenvendor": ["Acme"], + "ipatokenmodel": ["ModelT"], + "ipatokenserial": ["Number1"], + "ipatokenotpalgorithm": ["sha256"], + "ipatokenotpdigits": ["6"], + "ipatokentotpclockoffset": ["10"], + "ipatokentotptimestep": ["30"], + "ipatokenhotpcounter": ["30"], } - return_value = {'ipatokenuniqueid': 'NewToken1', - 'type': 'HOTP', - 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}], - 'description': ['Test description'], - 'ipatokenowner': ['pinky'], - 'ipatokendisabled': [False], - 'ipatokennotbefore': ['20200101010101Z'], - 'ipatokennotafter': ['20900101010101Z'], - 'ipatokenvendor': ['Acme'], - 'ipatokenmodel': ['ModelT'], - 'ipatokenserial': ['Number1'], - 'ipatokenotpalgorithm': ['sha256'], - 'ipatokenotpdigits': ['6'], - 'ipatokentotpclockoffset': ['10'], - 'ipatokentotptimestep': ['30'], - 'ipatokenhotpcounter': ['30']} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, - { - 'method': 'otptoken_del', - 'name': 'NewToken1' - } + {"method": "otptoken_del", "name": "NewToken1"}, ) changed = True @@ -412,42 +389,32 @@ def test_delete_existing_token(self): def test_disable_existing_token(self): """Disable an existing OTP""" - module_args = { - 'uniqueid': 'NewToken1', - 'otptype': 'hotp', - 'enabled': False + module_args = {"uniqueid": "NewToken1", "otptype": "hotp", "enabled": False} + return_value = { + "ipatokenuniqueid": "NewToken1", + "type": "HOTP", + "ipatokenotpkey": [{"__base64__": "KRSXG5CTMVRXEZLUGE======"}], + "description": ["Test description"], + "ipatokenowner": ["pinky"], + "ipatokendisabled": [False], + "ipatokennotbefore": ["20200101010101Z"], + "ipatokennotafter": ["20900101010101Z"], + "ipatokenvendor": ["Acme"], + "ipatokenmodel": ["ModelT"], + "ipatokenserial": ["Number1"], + "ipatokenotpalgorithm": ["sha256"], + "ipatokenotpdigits": ["6"], + "ipatokentotpclockoffset": ["10"], + "ipatokentotptimestep": ["30"], + "ipatokenhotpcounter": ["30"], } - return_value = {'ipatokenuniqueid': 'NewToken1', - 'type': 'HOTP', - 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}], - 'description': ['Test description'], - 'ipatokenowner': ['pinky'], - 'ipatokendisabled': [False], - 'ipatokennotbefore': ['20200101010101Z'], - 'ipatokennotafter': ['20900101010101Z'], - 'ipatokenvendor': ['Acme'], - 'ipatokenmodel': ['ModelT'], - 'ipatokenserial': ['Number1'], - 'ipatokenotpalgorithm': ['sha256'], - 'ipatokenotpdigits': ['6'], - 'ipatokentotpclockoffset': ['10'], - 'ipatokentotptimestep': ['30'], - 'ipatokenhotpcounter': ['30']} mock_calls = ( { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, }, - { - 'method': 'otptoken_mod', - 'name': 'NewToken1', - 'item': {'ipatokendisabled': True, - 'all': True} - } + {"method": "otptoken_mod", "name": "NewToken1", "item": {"ipatokendisabled": True, "all": True}}, ) changed = True @@ -455,20 +422,14 @@ def test_disable_existing_token(self): def test_delete_not_existing_token(self): """Delete a OTP that does not exist""" - module_args = { - 'uniqueid': 'NewToken1', - 'state': 'absent' - } + module_args = {"uniqueid": "NewToken1", "state": "absent"} return_value = {} mock_calls = [ { - 'method': 'otptoken_find', - 'name': None, - 'item': {'all': True, - 'ipatokenuniqueid': 'NewToken1', - 'timelimit': '0', - 'sizelimit': '0'} + "method": "otptoken_find", + "name": None, + "item": {"all": True, "ipatokenuniqueid": "NewToken1", "timelimit": "0", "sizelimit": "0"}, } ] @@ -478,15 +439,13 @@ def test_delete_not_existing_token(self): def test_fail_post(self): """Fail due to an exception raised from _post_json""" - with set_module_args({ - 'uniqueid': 'NewToken1' - }): - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with set_module_args({"uniqueid": "NewToken1"}): + with patch_ipa(side_effect=Exception("ERROR MESSAGE")) as (mock_login, mock_post): with self.assertRaises(AnsibleFailJson) as exec_info: self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') + self.assertEqual(exec_info.exception.args[0]["msg"], "ERROR MESSAGE") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_ipa_pwpolicy.py b/tests/unit/plugins/modules/test_ipa_pwpolicy.py index be680dd0924..2d9e3bc2660 100644 --- a/tests/unit/plugins/modules/test_ipa_pwpolicy.py +++ b/tests/unit/plugins/modules/test_ipa_pwpolicy.py @@ -1,4 +1,3 @@ - # Copyright (c) 2020, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,12 @@ from contextlib import contextmanager from unittest.mock import call, patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import ipa_pwpolicy @@ -30,8 +34,8 @@ def patch_ipa(**kwargs): ... """ obj = ipa_pwpolicy.PwPolicyIPAClient - with patch.object(obj, 'login') as mock_login: - with patch.object(obj, '_post_json', **kwargs) as mock_post: + with patch.object(obj, "login") as mock_login: + with patch.object(obj, "_post_json", **kwargs) as mock_post: yield mock_login, mock_post @@ -82,58 +86,51 @@ def _test_base(self, module_args, return_value, mock_calls, changed): mock_post.assert_not_called() # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_add(self): """Add a new policy""" module_args = { - 'group': 'admins', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "group": "admins", + "state": "present", + "priority": "10", + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = {} mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "admins"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'admins' - } + "method": "pwpolicy_add", + "name": "admins", + "item": { + "cospriority": "10", + "krbmaxpwdlife": "90", + "krbminpwdlife": "1", + "krbpwdhistorylength": "8", + "krbpwdmindiffchars": "3", + "krbpwdminlength": "16", + "krbpwdmaxfailure": "6", + "krbpwdfailurecountinterval": "60", + "krbpwdlockoutduration": "600", + "passwordgracelimit": "3", + "ipapwdmaxrepeat": "3", + "ipapwdmaxsequence": "3", + "ipapwddictcheck": True, + "ipapwdusercheck": True, + }, }, - { - 'method': 'pwpolicy_add', - 'name': 'admins', - 'item': { - 'cospriority': '10', - 'krbmaxpwdlife': '90', - 'krbminpwdlife': '1', - 'krbpwdhistorylength': '8', - 'krbpwdmindiffchars': '3', - 'krbpwdminlength': '16', - 'krbpwdmaxfailure': '6', - 'krbpwdfailurecountinterval': '60', - 'krbpwdlockoutduration': '600', - 'passwordgracelimit': '3', - 'ipapwdmaxrepeat': '3', - 'ipapwdmaxsequence': '3', - 'ipapwddictcheck': True, - 'ipapwdusercheck': True, - } - } ) changed = True @@ -142,53 +139,46 @@ def test_add(self): def test_aliases(self): """Same as test_add, but uses the `name` alias for the `group` option""" module_args = { - 'name': 'admins', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "name": "admins", + "state": "present", + "priority": "10", + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = {} mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "admins"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'admins' - } + "method": "pwpolicy_add", + "name": "admins", + "item": { + "cospriority": "10", + "krbmaxpwdlife": "90", + "krbminpwdlife": "1", + "krbpwdhistorylength": "8", + "krbpwdmindiffchars": "3", + "krbpwdminlength": "16", + "krbpwdmaxfailure": "6", + "krbpwdfailurecountinterval": "60", + "krbpwdlockoutduration": "600", + "passwordgracelimit": "3", + "ipapwdmaxrepeat": "3", + "ipapwdmaxsequence": "3", + "ipapwddictcheck": True, + "ipapwdusercheck": True, + }, }, - { - 'method': 'pwpolicy_add', - 'name': 'admins', - 'item': { - 'cospriority': '10', - 'krbmaxpwdlife': '90', - 'krbminpwdlife': '1', - 'krbpwdhistorylength': '8', - 'krbpwdmindiffchars': '3', - 'krbpwdminlength': '16', - 'krbpwdmaxfailure': '6', - 'krbpwdfailurecountinterval': '60', - 'krbpwdlockoutduration': '600', - 'passwordgracelimit': '3', - 'ipapwdmaxrepeat': '3', - 'ipapwdmaxsequence': '3', - 'ipapwddictcheck': True, - 'ipapwdusercheck': True, - } - } ) changed = True @@ -197,71 +187,64 @@ def test_aliases(self): def test_mod_different_args(self): """Policy exists, but some of the args are different and need to be modified""" module_args = { - 'group': 'sysops', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '60', - 'minpwdlife': '24', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '12', - 'maxfailcount': '8', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "group": "sysops", + "state": "present", + "priority": "10", + "maxpwdlife": "60", + "minpwdlife": "24", + "historylength": "8", + "minclasses": "3", + "minlength": "12", + "maxfailcount": "8", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['sysops'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbminpwdlife': ['1'], - 'krbpwdhistorylength': ['8'], - 'krbpwdmindiffchars': ['3'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'krbpwdfailurecountinterval': ['60'], - 'krbpwdlockoutduration': ['600'], - 'passwordgracelimit': ['3'], - 'ipapwdmaxrepeat': ['3'], - 'ipapwdmaxsequence': ['3'], - 'ipapwddictcheck': [True], - 'ipapwdusercheck': [True], - 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["sysops"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbminpwdlife": ["1"], + "krbpwdhistorylength": ["8"], + "krbpwdmindiffchars": ["3"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "krbpwdfailurecountinterval": ["60"], + "krbpwdlockoutduration": ["600"], + "passwordgracelimit": ["3"], + "ipapwdmaxrepeat": ["3"], + "ipapwdmaxsequence": ["3"], + "ipapwddictcheck": [True], + "ipapwdusercheck": [True], + "dn": "cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } + "method": "pwpolicy_mod", + "name": "sysops", + "item": { + "cospriority": "10", + "krbmaxpwdlife": "60", + "krbminpwdlife": "24", + "krbpwdhistorylength": "8", + "krbpwdmindiffchars": "3", + "krbpwdminlength": "12", + "krbpwdmaxfailure": "8", + "krbpwdfailurecountinterval": "60", + "krbpwdlockoutduration": "600", + "passwordgracelimit": "3", + "ipapwdmaxrepeat": "3", + "ipapwdmaxsequence": "3", + "ipapwddictcheck": True, + "ipapwdusercheck": True, + }, }, - { - 'method': 'pwpolicy_mod', - 'name': 'sysops', - 'item': { - 'cospriority': '10', - 'krbmaxpwdlife': '60', - 'krbminpwdlife': '24', - 'krbpwdhistorylength': '8', - 'krbpwdmindiffchars': '3', - 'krbpwdminlength': '12', - 'krbpwdmaxfailure': '8', - 'krbpwdfailurecountinterval': '60', - 'krbpwdlockoutduration': '600', - 'passwordgracelimit': '3', - 'ipapwdmaxrepeat': '3', - 'ipapwdmaxsequence': '3', - 'ipapwddictcheck': True, - 'ipapwdusercheck': True, - } - } ) changed = True @@ -270,62 +253,55 @@ def test_mod_different_args(self): def test_mod_missing_args(self): """Policy exists, but some of the args aren't set, so need to be added""" module_args = { - 'group': 'sysops', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "group": "sysops", + "state": "present", + "priority": "10", + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['sysops'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbpwdhistorylength': ['8'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["sysops"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbpwdhistorylength": ["8"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "dn": "cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } + "method": "pwpolicy_mod", + "name": "sysops", + "item": { + "cospriority": "10", + "krbmaxpwdlife": "90", + "krbminpwdlife": "1", + "krbpwdhistorylength": "8", + "krbpwdmindiffchars": "3", + "krbpwdminlength": "16", + "krbpwdmaxfailure": "6", + "krbpwdfailurecountinterval": "60", + "krbpwdlockoutduration": "600", + "passwordgracelimit": "3", + "ipapwdmaxrepeat": "3", + "ipapwdmaxsequence": "3", + "ipapwddictcheck": True, + "ipapwdusercheck": True, + }, }, - { - 'method': 'pwpolicy_mod', - 'name': 'sysops', - 'item': { - 'cospriority': '10', - 'krbmaxpwdlife': '90', - 'krbminpwdlife': '1', - 'krbpwdhistorylength': '8', - 'krbpwdmindiffchars': '3', - 'krbpwdminlength': '16', - 'krbpwdmaxfailure': '6', - 'krbpwdfailurecountinterval': '60', - 'krbpwdlockoutduration': '600', - 'passwordgracelimit': '3', - 'ipapwdmaxrepeat': '3', - 'ipapwdmaxsequence': '3', - 'ipapwddictcheck': True, - 'ipapwdusercheck': True, - } - } ) changed = True @@ -334,38 +310,31 @@ def test_mod_missing_args(self): def test_del(self): """Policy exists, and state is absent. Needs to be deleted""" module_args = { - 'group': 'sysops', - 'state': 'absent', + "group": "sysops", + "state": "absent", # other arguments are ignored when state is `absent` - 'priority': '10', - 'maxpwdlife': '90', - 'historylength': '8', - 'minlength': '16', - 'maxfailcount': '6' + "priority": "10", + "maxpwdlife": "90", + "historylength": "8", + "minlength": "16", + "maxfailcount": "6", } return_value = { - 'cn': ['sysops'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbpwdhistorylength': ['8'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["sysops"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbpwdhistorylength": ["8"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "dn": "cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } + "method": "pwpolicy_del", + "name": "sysops", }, - { - 'method': 'pwpolicy_del', - 'name': 'sysops', - } ) changed = True @@ -374,52 +343,43 @@ def test_del(self): def test_no_change(self): """Policy already exists. No changes needed""" module_args = { - 'group': 'admins', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "group": "admins", + "state": "present", + "priority": "10", + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['admins'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbminpwdlife': ['1'], - 'krbpwdhistorylength': ['8'], - 'krbpwdmindiffchars': ['3'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'krbpwdfailurecountinterval': ['60'], - 'krbpwdlockoutduration': ['600'], - 'passwordgracelimit': ['3'], - 'ipapwdmaxrepeat': ['3'], - 'ipapwdmaxsequence': ['3'], - 'ipapwddictcheck': [True], - 'ipapwdusercheck': [True], - 'dn': 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["admins"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbminpwdlife": ["1"], + "krbpwdhistorylength": ["8"], + "krbpwdmindiffchars": ["3"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "krbpwdfailurecountinterval": ["60"], + "krbpwdlockoutduration": ["600"], + "passwordgracelimit": ["3"], + "ipapwdmaxrepeat": ["3"], + "ipapwdmaxsequence": ["3"], + "ipapwddictcheck": [True], + "ipapwdusercheck": [True], + "dn": "cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'admins' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "admins"}}] changed = False self._test_base(module_args, return_value, mock_calls, changed) @@ -427,26 +387,17 @@ def test_no_change(self): def test_del_no_change(self): """Policy doesn't exist, and state is absent. No change needed""" module_args = { - 'group': 'sysops', - 'state': 'absent', + "group": "sysops", + "state": "absent", # other arguments are ignored when state is `absent` - 'priority': '10', - 'maxpwdlife': '90', - 'historylength': '8', - 'minlength': '16', - 'maxfailcount': '6' + "priority": "10", + "maxpwdlife": "90", + "historylength": "8", + "minlength": "16", + "maxfailcount": "6", } return_value = {} - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}] changed = False self._test_base(module_args, return_value, mock_calls, changed) @@ -454,65 +405,58 @@ def test_del_no_change(self): def test_global(self): """Modify the global policy""" module_args = { - 'maxpwdlife': '60', - 'minpwdlife': '24', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '12', - 'maxfailcount': '8', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "maxpwdlife": "60", + "minpwdlife": "24", + "historylength": "8", + "minclasses": "3", + "minlength": "12", + "maxfailcount": "8", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['global_policy'], - 'krbmaxpwdlife': ['90'], - 'krbminpwdlife': ['1'], - 'krbpwdmindiffchars': ['3'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'krbpwdfailurecountinterval': ['60'], - 'krbpwdlockoutduration': ['600'], - 'passwordgracelimit': ['3'], - 'ipapwdmaxrepeat': ['3'], - 'ipapwdmaxsequence': ['3'], - 'ipapwddictcheck': [True], - 'ipapwdusercheck': [True], - 'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["global_policy"], + "krbmaxpwdlife": ["90"], + "krbminpwdlife": ["1"], + "krbpwdmindiffchars": ["3"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "krbpwdfailurecountinterval": ["60"], + "krbpwdlockoutduration": ["600"], + "passwordgracelimit": ["3"], + "ipapwdmaxrepeat": ["3"], + "ipapwdmaxsequence": ["3"], + "ipapwddictcheck": [True], + "ipapwdusercheck": [True], + "dn": "cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } mock_calls = ( + {"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "global_policy"}}, { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'global_policy' - } + "method": "pwpolicy_mod", + "name": None, + "item": { + "krbmaxpwdlife": "60", + "krbminpwdlife": "24", + "krbpwdhistorylength": "8", + "krbpwdmindiffchars": "3", + "krbpwdminlength": "12", + "krbpwdmaxfailure": "8", + "krbpwdfailurecountinterval": "60", + "krbpwdlockoutduration": "600", + "passwordgracelimit": "3", + "ipapwdmaxrepeat": "3", + "ipapwdmaxsequence": "3", + "ipapwddictcheck": True, + "ipapwdusercheck": True, + }, }, - { - 'method': 'pwpolicy_mod', - 'name': None, - 'item': { - 'krbmaxpwdlife': '60', - 'krbminpwdlife': '24', - 'krbpwdhistorylength': '8', - 'krbpwdmindiffchars': '3', - 'krbpwdminlength': '12', - 'krbpwdmaxfailure': '8', - 'krbpwdfailurecountinterval': '60', - 'krbpwdlockoutduration': '600', - 'passwordgracelimit': '3', - 'ipapwdmaxrepeat': '3', - 'ipapwdmaxsequence': '3', - 'ipapwddictcheck': True, - 'ipapwdusercheck': True, - } - } ) changed = True @@ -521,48 +465,39 @@ def test_global(self): def test_global_no_change(self): """Global policy already matches the given arguments. No change needed""" module_args = { - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['global_policy'], - 'krbmaxpwdlife': ['90'], - 'krbminpwdlife': ['1'], - 'krbpwdhistorylength': ['8'], - 'krbpwdmindiffchars': ['3'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'krbpwdfailurecountinterval': ['60'], - 'krbpwdlockoutduration': ['600'], - 'passwordgracelimit': ['3'], - 'ipapwdmaxrepeat': ['3'], - 'ipapwdmaxsequence': ['3'], - 'ipapwddictcheck': [True], - 'ipapwdusercheck': [True], - 'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["global_policy"], + "krbmaxpwdlife": ["90"], + "krbminpwdlife": ["1"], + "krbpwdhistorylength": ["8"], + "krbpwdmindiffchars": ["3"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "krbpwdfailurecountinterval": ["60"], + "krbpwdlockoutduration": ["600"], + "passwordgracelimit": ["3"], + "ipapwdmaxrepeat": ["3"], + "ipapwdmaxsequence": ["3"], + "ipapwddictcheck": [True], + "ipapwdusercheck": [True], + "dn": "cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'global_policy' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "global_policy"}}] changed = False self._test_base(module_args, return_value, mock_calls, changed) @@ -570,35 +505,26 @@ def test_global_no_change(self): def test_check_add(self): """Add a new policy in check mode. pwpolicy_add shouldn't be called""" module_args = { - '_ansible_check_mode': True, - 'group': 'admins', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '90', - 'minpwdlife': '1', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '16', - 'maxfailcount': '6', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "_ansible_check_mode": True, + "group": "admins", + "state": "present", + "priority": "10", + "maxpwdlife": "90", + "minpwdlife": "1", + "historylength": "8", + "minclasses": "3", + "minlength": "16", + "maxfailcount": "6", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = {} - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'admins' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "admins"}}] changed = True self._test_base(module_args, return_value, mock_calls, changed) @@ -606,100 +532,75 @@ def test_check_add(self): def test_check_mod(self): """Modify a policy in check mode. pwpolicy_mod shouldn't be called""" module_args = { - '_ansible_check_mode': True, - 'group': 'sysops', - 'state': 'present', - 'priority': '10', - 'maxpwdlife': '60', - 'minpwdlife': '24', - 'historylength': '8', - 'minclasses': '3', - 'minlength': '12', - 'maxfailcount': '8', - 'failinterval': '60', - 'lockouttime': '600', - 'gracelimit': 3, - 'maxrepeat': 3, - 'maxsequence': 3, - 'dictcheck': True, - 'usercheck': True, + "_ansible_check_mode": True, + "group": "sysops", + "state": "present", + "priority": "10", + "maxpwdlife": "60", + "minpwdlife": "24", + "historylength": "8", + "minclasses": "3", + "minlength": "12", + "maxfailcount": "8", + "failinterval": "60", + "lockouttime": "600", + "gracelimit": 3, + "maxrepeat": 3, + "maxsequence": 3, + "dictcheck": True, + "usercheck": True, } return_value = { - 'cn': ['sysops'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbminpwdlife': ['1'], - 'krbpwdhistorylength': ['8'], - 'krbpwdmindiffchars': ['3'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'krbpwdfailurecountinterval': ['60'], - 'krbpwdlockoutduration': ['600'], - 'passwordgracelimit': ['3'], - 'ipapwdmaxrepeat': ['3'], - 'ipapwdmaxsequence': ['3'], - 'ipapwddictcheck': [True], - 'ipapwdusercheck': [True], - 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["sysops"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbminpwdlife": ["1"], + "krbpwdhistorylength": ["8"], + "krbpwdmindiffchars": ["3"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "krbpwdfailurecountinterval": ["60"], + "krbpwdlockoutduration": ["600"], + "passwordgracelimit": ["3"], + "ipapwdmaxrepeat": ["3"], + "ipapwdmaxsequence": ["3"], + "ipapwddictcheck": [True], + "ipapwdusercheck": [True], + "dn": "cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}] changed = True self._test_base(module_args, return_value, mock_calls, changed) def test_check_del(self): """Delete a policy in check mode. pwpolicy_del shouldn't be called""" - module_args = { - '_ansible_check_mode': True, - 'group': 'sysops', - 'state': 'absent' - } + module_args = {"_ansible_check_mode": True, "group": "sysops", "state": "absent"} return_value = { - 'cn': ['sysops'], - 'cospriority': ['10'], - 'krbmaxpwdlife': ['90'], - 'krbpwdhistorylength': ['8'], - 'krbpwdminlength': ['16'], - 'krbpwdmaxfailure': ['6'], - 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com', - 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy'] + "cn": ["sysops"], + "cospriority": ["10"], + "krbmaxpwdlife": ["90"], + "krbpwdhistorylength": ["8"], + "krbpwdminlength": ["16"], + "krbpwdmaxfailure": ["6"], + "dn": "cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com", + "objectclass": ["top", "nscontainer", "krbpwdpolicy"], } - mock_calls = [ - { - 'method': 'pwpolicy_find', - 'name': None, - 'item': { - 'all': True, - 'cn': 'sysops' - } - } - ] + mock_calls = [{"method": "pwpolicy_find", "name": None, "item": {"all": True, "cn": "sysops"}}] changed = True self._test_base(module_args, return_value, mock_calls, changed) def test_fail_post(self): """Fail due to an exception raised from _post_json""" - with set_module_args({ - 'group': 'admins', - 'state': 'absent' - }): - with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with set_module_args({"group": "admins", "state": "absent"}): + with patch_ipa(side_effect=Exception("ERROR MESSAGE")) as (mock_login, mock_post): with self.assertRaises(AnsibleFailJson) as exec_info: self.module.main() - self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') + self.assertEqual(exec_info.exception.args[0]["msg"], "ERROR MESSAGE") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_ipbase.py b/tests/unit/plugins/modules/test_ipbase.py index 65eb249f674..4a96df1bf6c 100644 --- a/tests/unit/plugins/modules/test_ipbase.py +++ b/tests/unit/plugins/modules/test_ipbase.py @@ -161,7 +161,9 @@ class TestIpbase(unittest.TestCase): - def test_info(self,): + def test_info( + self, + ): "test the json data extraction" params = { @@ -173,7 +175,7 @@ def test_info(self,): module = Mock() module.params = params - data = json.loads(IPBASE_DATA['response'].decode("utf-8")) + data = json.loads(IPBASE_DATA["response"].decode("utf-8")) IpbaseInfo._get_url_data = Mock() IpbaseInfo._get_url_data.return_value = data diff --git a/tests/unit/plugins/modules/test_java_keystore.py b/tests/unit/plugins/modules/test_java_keystore.py index 689faaa4427..f0af6dac7aa 100644 --- a/tests/unit/plugins/modules/test_java_keystore.py +++ b/tests/unit/plugins/modules/test_java_keystore.py @@ -1,4 +1,3 @@ - # Copyright (c) 2018, Ansible Project # Copyright (c) 2018, Abhijeet Kasurde # @@ -10,27 +9,29 @@ import os from unittest.mock import patch, Mock -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + ModuleTestCase, + set_module_args, +) from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.modules.java_keystore import JavaKeystore module_argument_spec = dict( - name=dict(type='str', required=True), - dest=dict(type='path', required=True), - certificate=dict(type='str', no_log=True), - certificate_path=dict(type='path'), - private_key=dict(type='str', no_log=True), - private_key_path=dict(type='path', no_log=False), - private_key_passphrase=dict(type='str', no_log=True), - password=dict(type='str', required=True, no_log=True), - ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), - keystore_type=dict(type='str', choices=['jks', 'pkcs12']), - force=dict(type='bool', default=False), + name=dict(type="str", required=True), + dest=dict(type="path", required=True), + certificate=dict(type="str", no_log=True), + certificate_path=dict(type="path"), + private_key=dict(type="str", no_log=True), + private_key_path=dict(type="path", no_log=False), + private_key_passphrase=dict(type="str", no_log=True), + password=dict(type="str", required=True, no_log=True), + ssl_backend=dict(type="str", default="openssl", choices=["openssl", "cryptography"]), + keystore_type=dict(type="str", choices=["jks", "pkcs12"]), + force=dict(type="bool", default=False), ) module_supports_check_mode = True -module_choose_between = (['certificate', 'certificate_path'], - ['private_key', 'private_key_path']) +module_choose_between = (["certificate", "certificate_path"], ["private_key", "private_key_path"]) class TestCreateJavaKeystore(ModuleTestCase): @@ -41,19 +42,25 @@ def setUp(self): super().setUp() orig_exists = os.path.exists - self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_file') - self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_path') - self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type') - self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') - self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') - self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') - self.mock_os_path_exists = patch('os.path.exists', - side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) - self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context', - side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0']) - self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path', - side_effect=lambda path: (False, None)) + self.mock_create_file = patch("ansible_collections.community.general.plugins.modules.java_keystore.create_file") + self.mock_create_path = patch("ansible_collections.community.general.plugins.modules.java_keystore.create_path") + self.mock_current_type = patch( + "ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type" + ) + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") + self.mock_preserved_copy = patch("ansible.module_utils.basic.AnsibleModule.preserved_copy") + self.mock_atomic_move = patch("ansible.module_utils.basic.AnsibleModule.atomic_move") + self.mock_os_path_exists = patch( + "os.path.exists", side_effect=lambda path: True if path == "/path/to/keystore.jks" else orig_exists(path) + ) + self.mock_selinux_context = patch( + "ansible.module_utils.basic.AnsibleModule.selinux_context", + side_effect=lambda path: ["unconfined_u", "object_r", "user_home_t", "s0"], + ) + self.mock_is_special_selinux_path = patch( + "ansible.module_utils.basic.AnsibleModule.is_special_selinux_path", side_effect=lambda path: (False, None) + ) self.run_command = self.mock_run_command.start() self.get_bin_path = self.mock_get_bin_path.start() self.preserved_copy = self.mock_preserved_copy.start() @@ -80,147 +87,190 @@ def tearDown(self): self.mock_os_path_exists.stop() def test_create_jks_success(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='test', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="test", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] - self.run_command.side_effect = [(0, '', ''), (0, '', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_path.side_effect = ["/tmp/tmpgrzm2ah7"] + self.create_file.side_effect = ["/tmp/etacifitrec", "/tmp/yek_etavirp", ""] + self.run_command.side_effect = [(0, "", ""), (0, "", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) assert jks.create() == { - 'changed': True, - 'cmd': ["keytool", "-importkeystore", - "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-noprompt"], - 'msg': '', - 'rc': 0 + "changed": True, + "cmd": [ + "keytool", + "-importkeystore", + "-destkeystore", + "/path/to/keystore.jks", + "-srckeystore", + "/tmp/tmpgrzm2ah7", + "-srcstoretype", + "pkcs12", + "-alias", + "test", + "-noprompt", + ], + "msg": "", + "rc": 0, } def test_create_jks_keypass_fail_export_pkcs12(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - private_key_passphrase='passphrase-foo', - dest='/path/to/keystore.jks', - name='test', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + private_key_passphrase="passphrase-foo", + dest="/path/to/keystore.jks", + name="test", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) module.exit_json = Mock() module.fail_json = Mock() - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_path.side_effect = ["/tmp/tmp1cyp12xa"] + self.create_file.side_effect = ["/tmp/tmpvalcrt32", "/tmp/tmpwh4key0c", ""] + self.run_command.side_effect = [(1, "", "Oops"), (0, "", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) jks.create() module.fail_json.assert_called_once_with( - cmd=["openssl", "pkcs12", "-export", "-name", "test", - "-in", "/tmp/tmpvalcrt32", - "-inkey", "/tmp/tmpwh4key0c", - "-out", "/tmp/tmp1cyp12xa", - "-passout", "stdin", - "-passin", "stdin"], - msg='', - err='Oops', - rc=1 + cmd=[ + "openssl", + "pkcs12", + "-export", + "-name", + "test", + "-in", + "/tmp/tmpvalcrt32", + "-inkey", + "/tmp/tmpwh4key0c", + "-out", + "/tmp/tmp1cyp12xa", + "-passout", + "stdin", + "-passin", + "stdin", + ], + msg="", + err="Oops", + rc=1, ) def test_create_jks_fail_export_pkcs12(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='test', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="test", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) module.exit_json = Mock() module.fail_json = Mock() - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_path.side_effect = ["/tmp/tmp1cyp12xa"] + self.create_file.side_effect = ["/tmp/tmpvalcrt32", "/tmp/tmpwh4key0c", ""] + self.run_command.side_effect = [(1, "", "Oops"), (0, "", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) jks.create() module.fail_json.assert_called_once_with( - cmd=["openssl", "pkcs12", "-export", "-name", "test", - "-in", "/tmp/tmpvalcrt32", - "-inkey", "/tmp/tmpwh4key0c", - "-out", "/tmp/tmp1cyp12xa", - "-passout", "stdin"], - msg='', - err='Oops', - rc=1 + cmd=[ + "openssl", + "pkcs12", + "-export", + "-name", + "test", + "-in", + "/tmp/tmpvalcrt32", + "-inkey", + "/tmp/tmpwh4key0c", + "-out", + "/tmp/tmp1cyp12xa", + "-passout", + "stdin", + ], + msg="", + err="Oops", + rc=1, ) def test_create_jks_fail_import_key(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='test', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="test", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) module.exit_json = Mock() module.fail_json = Mock() - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] - self.run_command.side_effect = [(0, '', ''), (1, '', 'Oops')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_path.side_effect = ["/tmp/tmpgrzm2ah7"] + self.create_file.side_effect = ["/tmp/etacifitrec", "/tmp/yek_etavirp", ""] + self.run_command.side_effect = [(0, "", ""), (1, "", "Oops")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) jks.create() module.fail_json.assert_called_once_with( - cmd=["keytool", "-importkeystore", - "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-noprompt"], - msg='', - err='Oops', - rc=1 + cmd=[ + "keytool", + "-importkeystore", + "-destkeystore", + "/path/to/keystore.jks", + "-srckeystore", + "/tmp/tmpgrzm2ah7", + "-srcstoretype", + "pkcs12", + "-alias", + "test", + "-noprompt", + ], + msg="", + err="Oops", + rc=1, ) @@ -230,12 +280,14 @@ class TestCertChanged(ModuleTestCase): def setUp(self): """Setup.""" super().setUp() - self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_file') - self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type') - self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') - self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') - self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') + self.mock_create_file = patch("ansible_collections.community.general.plugins.modules.java_keystore.create_file") + self.mock_current_type = patch( + "ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type" + ) + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") + self.mock_preserved_copy = patch("ansible.module_utils.basic.AnsibleModule.preserved_copy") + self.mock_atomic_move = patch("ansible.module_utils.basic.AnsibleModule.atomic_move") self.run_command = self.mock_run_command.start() self.create_file = self.mock_create_file.start() self.get_bin_path = self.mock_get_bin_path.start() @@ -254,166 +306,176 @@ def tearDown(self): self.mock_atomic_move.stop() def test_cert_unchanged_same_fingerprint(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder', ''] - self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] - self.current_type.side_effect = ['jks'] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/placeholder", ""] + self.run_command.side_effect = [(0, "foo=abcd:1234:efgh", ""), (0, "SHA256: abcd:1234:efgh", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] + self.current_type.side_effect = ["jks"] jks = JavaKeystore(module) result = jks.cert_changed() - self.assertFalse(result, 'Fingerprint is identical') + self.assertFalse(result, "Fingerprint is identical") def test_cert_changed_fingerprint_mismatch(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder', ''] - self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] - self.current_type.side_effect = ['jks'] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/placeholder", ""] + self.run_command.side_effect = [(0, "foo=abcd:1234:efgh", ""), (0, "SHA256: wxyz:9876:stuv", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] + self.current_type.side_effect = ["jks"] jks = JavaKeystore(module) result = jks.cert_changed() - self.assertTrue(result, 'Fingerprint mismatch') + self.assertTrue(result, "Fingerprint mismatch") def test_cert_changed_alias_does_not_exist(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder', ''] - self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/placeholder", ""] + self.run_command.side_effect = [ + (0, "foo=abcd:1234:efgh", ""), + (1, "keytool error: java.lang.Exception: Alias does not exist", ""), + ] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) result = jks.cert_changed() - self.assertTrue(result, 'Alias mismatch detected') + self.assertTrue(result, "Alias mismatch detected") def test_cert_changed_password_mismatch(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder', ''] - self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/placeholder", ""] + self.run_command.side_effect = [ + (0, "foo=abcd:1234:efgh", ""), + (1, "keytool error: java.io.IOException: Keystore password was incorrect", ""), + ] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) result = jks.cert_changed() - self.assertTrue(result, 'Password mismatch detected') + self.assertTrue(result, "Password mismatch detected") def test_cert_changed_fail_read_cert(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) module.exit_json = Mock() module.fail_json = Mock() - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/tmpdj6bvvme', ''] - self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] - self.current_type.side_effect = ['jks'] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/tmpdj6bvvme", ""] + self.run_command.side_effect = [(1, "", "Oops"), (0, "SHA256: wxyz:9876:stuv", "")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] + self.current_type.side_effect = ["jks"] jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_once_with( cmd=["openssl", "x509", "-noout", "-in", "/tmp/tmpdj6bvvme", "-fingerprint", "-sha256"], - msg='', - err='Oops', - rc=1 + msg="", + err="Oops", + rc=1, ) def test_cert_changed_fail_read_keystore(self): - with set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )): - + with set_module_args( + dict( + certificate="cert-foo", + private_key="private-foo", + dest="/path/to/keystore.jks", + name="foo", + password="changeit", + ) + ): module = AnsibleModule( argument_spec=module_argument_spec, supports_check_mode=module_supports_check_mode, mutually_exclusive=module_choose_between, - required_one_of=module_choose_between + required_one_of=module_choose_between, ) module.exit_json = Mock() module.fail_json = Mock(return_value=True) - with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder', ''] - self.run_command.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] - self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + with patch("os.remove", return_value=True): + self.create_file.side_effect = ["/tmp/placeholder", ""] + self.run_command.side_effect = [(0, "foo: wxyz:9876:stuv", ""), (1, "", "Oops")] + self.get_bin_path.side_effect = ["keytool", "openssl", ""] jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_with( cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-v"], - msg='', - err='Oops', - rc=1 + msg="", + err="Oops", + rc=1, ) diff --git a/tests/unit/plugins/modules/test_jenkins_build.py b/tests/unit/plugins/modules/test_jenkins_build.py index e741e5558c4..781b75a8c5a 100644 --- a/tests/unit/plugins/modules/test_jenkins_build.py +++ b/tests/unit/plugins/modules/test_jenkins_build.py @@ -26,39 +26,30 @@ class NotFoundException(JenkinsException): pass -class JenkinsBuildMock(): +class JenkinsBuildMock: def get_build_status(self): try: instance = JenkinsMock() - response = JenkinsMock.get_build_info(instance, 'host-delete', 1234) + response = JenkinsMock.get_build_info(instance, "host-delete", 1234) return response except jenkins.JenkinsException as e: response = {} response["result"] = "ABSENT" return response except Exception as e: - fail_json(msg=f'Unable to fetch build information, {e}') + fail_json(msg=f"Unable to fetch build information, {e}") -class JenkinsMock(): - +class JenkinsMock: def get_job_info(self, name): - return { - "nextBuildNumber": 1234 - } + return {"nextBuildNumber": 1234} def get_build_info(self, name, build_number): if name == "host-delete": raise jenkins.JenkinsException(f"job {name} number {build_number} does not exist") elif name == "create-detached": - return { - "building": True, - "result": None - } - return { - "building": True, - "result": "SUCCESS" - } + return {"building": True, "result": None} + return {"building": True, "result": "SUCCESS"} def build_job(self, *args): return None @@ -70,18 +61,12 @@ def stop_build(self, name, build_number): return None -class JenkinsMockIdempotent(): - +class JenkinsMockIdempotent: def get_job_info(self, name): - return { - "nextBuildNumber": 1235 - } + return {"nextBuildNumber": 1235} def get_build_info(self, name, build_number): - return { - "building": False, - "result": "ABORTED" - } + return {"building": False, "result": "ABORTED"} def build_job(self, *args): return None @@ -94,146 +79,111 @@ def stop_build(self, name, build_number): class TestJenkinsBuild(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json) + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") def test_module_fail_when_required_args_missing(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): with set_module_args({}): jenkins_build.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") def test_module_fail_when_missing_build_number(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): - with set_module_args({ - "name": "required-if", - "state": "stopped" - }): + with set_module_args({"name": "required-if", "state": "stopped"}): jenkins_build.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") def test_module_create_build(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson): - with set_module_args({ - "name": "host-check", - "user": "abc", - "token": "xyz" - }): + with set_module_args({"name": "host-check", "user": "abc", "token": "xyz"}): jenkins_build.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") def test_module_stop_build(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "host-check", - "build_number": "1234", - "state": "stopped", - "user": "abc", - "token": "xyz" - }): + with set_module_args( + {"name": "host-check", "build_number": "1234", "state": "stopped", "user": "abc", "token": "xyz"} + ): jenkins_build.main() - self.assertTrue(return_json.exception.args[0]['changed']) + self.assertTrue(return_json.exception.args[0]["changed"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") def test_module_stop_build_again(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMockIdempotent() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "host-check", - "build_number": "1234", - "state": "stopped", - "user": "abc", - "password": "xyz" - }): + with set_module_args( + {"name": "host-check", "build_number": "1234", "state": "stopped", "user": "abc", "password": "xyz"} + ): jenkins_build.main() - self.assertFalse(return_json.exception.args[0]['changed']) + self.assertFalse(return_json.exception.args[0]["changed"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_build_status') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_build_status") def test_module_delete_build(self, build_status, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() build_status.return_value = JenkinsBuildMock().get_build_status() with self.assertRaises(AnsibleExitJson): - with set_module_args({ - "name": "host-delete", - "build_number": "1234", - "state": "absent", - "user": "abc", - "token": "xyz" - }): + with set_module_args( + {"name": "host-delete", "build_number": "1234", "state": "absent", "user": "abc", "token": "xyz"} + ): jenkins_build.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") def test_module_delete_build_again(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMockIdempotent() with self.assertRaises(AnsibleFailJson): - with set_module_args({ - "name": "host-delete", - "build_number": "1234", - "state": "absent", - "user": "abc", - "token": "xyz" - }): + with set_module_args( + {"name": "host-delete", "build_number": "1234", "state": "absent", "user": "abc", "token": "xyz"} + ): jenkins_build.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_build_status') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_build_status") def test_module_create_build_without_detach(self, build_status, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() build_status.return_value = JenkinsBuildMock().get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "create-detached", - "user": "abc", - "token": "xyz" - }): + with set_module_args({"name": "create-detached", "user": "abc", "token": "xyz"}): jenkins_build.main() - self.assertFalse(return_json.exception.args[0]['changed']) + self.assertFalse(return_json.exception.args[0]["changed"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies") + @patch("ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection") def test_module_create_build_detached(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "create-detached", - "user": "abc", - "token": "xyz", - "detach": True - }): + with set_module_args({"name": "create-detached", "user": "abc", "token": "xyz", "detach": True}): jenkins_build.main() - self.assertTrue(return_json.exception.args[0]['changed']) + self.assertTrue(return_json.exception.args[0]["changed"]) diff --git a/tests/unit/plugins/modules/test_jenkins_build_info.py b/tests/unit/plugins/modules/test_jenkins_build_info.py index 333ca620d74..c4868d05be9 100644 --- a/tests/unit/plugins/modules/test_jenkins_build_info.py +++ b/tests/unit/plugins/modules/test_jenkins_build_info.py @@ -23,7 +23,7 @@ class JenkinsException(Exception): pass -class JenkinsBuildMock(): +class JenkinsBuildMock: def __init__(self, name, build_number=None): self.name = name self.build_number = build_number @@ -38,118 +38,97 @@ def get_build_status(self): response["result"] = "ABSENT" return response except Exception as e: - fail_json(msg=f'Unable to fetch build information, {e}') + fail_json(msg=f"Unable to fetch build information, {e}") -class JenkinsMock(): - +class JenkinsMock: def get_build_info(self, name, build_number): if name == "job-absent": raise jenkins.JenkinsException() - return { - "result": "SUCCESS", - "build_info": {} - } + return {"result": "SUCCESS", "build_info": {}} def get_job_info(self, name): if name == "job-absent": raise jenkins.JenkinsException() - return { - "lastBuild": { - "number": 123 - } - } + return {"lastBuild": {"number": 123}} class TestJenkinsBuildInfo(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json) + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies") def test_module_fail_when_required_args_missing(self, test_deps): test_deps.return_value = None with self.assertRaises(AnsibleFailJson): with set_module_args({}): jenkins_build_info.main() - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies") + @patch( + "ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection" + ) def test_module_get_build_info(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "job-present", - "user": "abc", - "token": "xyz", - "build_number": 30 - }): + with set_module_args({"name": "job-present", "user": "abc", "token": "xyz", "build_number": 30}): jenkins_build_info.main() self.assertFalse(return_json.exception.args[0]["changed"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_build_status') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies") + @patch( + "ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection" + ) + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_build_status") def test_module_get_build_info_if_build_does_not_exist(self, build_status, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() build_status.return_value = JenkinsBuildMock("job-absent", 30).get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "job-absent", - "user": "abc", - "token": "xyz", - "build_number": 30 - }): + with set_module_args({"name": "job-absent", "user": "abc", "token": "xyz", "build_number": 30}): jenkins_build_info.main() - self.assertFalse(return_json.exception.args[0]['changed']) - self.assertTrue(return_json.exception.args[0]['failed']) - self.assertEqual("ABSENT", return_json.exception.args[0]['build_info']['result']) + self.assertFalse(return_json.exception.args[0]["changed"]) + self.assertTrue(return_json.exception.args[0]["failed"]) + self.assertEqual("ABSENT", return_json.exception.args[0]["build_info"]["result"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies") + @patch( + "ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection" + ) def test_module_get_build_info_get_last_build(self, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "job-present", - "user": "abc", - "token": "xyz" - }): + with set_module_args({"name": "job-present", "user": "abc", "token": "xyz"}): jenkins_build_info.main() - self.assertFalse(return_json.exception.args[0]['changed']) - self.assertEqual("SUCCESS", return_json.exception.args[0]['build_info']['result']) + self.assertFalse(return_json.exception.args[0]["changed"]) + self.assertEqual("SUCCESS", return_json.exception.args[0]["build_info"]["result"]) - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection') - @patch('ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_build_status') + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.test_dependencies") + @patch( + "ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_jenkins_connection" + ) + @patch("ansible_collections.community.general.plugins.modules.jenkins_build_info.JenkinsBuildInfo.get_build_status") def test_module_get_build_info_if_job_does_not_exist(self, build_status, jenkins_connection, test_deps): test_deps.return_value = None jenkins_connection.return_value = JenkinsMock() build_status.return_value = JenkinsBuildMock("job-absent").get_build_status() with self.assertRaises(AnsibleExitJson) as return_json: - with set_module_args({ - "name": "job-absent", - "user": "abc", - "token": "xyz" - }): + with set_module_args({"name": "job-absent", "user": "abc", "token": "xyz"}): jenkins_build_info.main() - self.assertFalse(return_json.exception.args[0]['changed']) - self.assertTrue(return_json.exception.args[0]['failed']) - self.assertEqual("ABSENT", return_json.exception.args[0]['build_info']['result']) + self.assertFalse(return_json.exception.args[0]["changed"]) + self.assertTrue(return_json.exception.args[0]["failed"]) + self.assertEqual("ABSENT", return_json.exception.args[0]["build_info"]["result"]) diff --git a/tests/unit/plugins/modules/test_jenkins_credential.py b/tests/unit/plugins/modules/test_jenkins_credential.py index fbf04f8c043..150d5345f38 100644 --- a/tests/unit/plugins/modules/test_jenkins_credential.py +++ b/tests/unit/plugins/modules/test_jenkins_credential.py @@ -17,9 +17,11 @@ if sys.version_info[0] == 3: import builtins + open_path = "builtins.open" else: import __builtin__ as builtins + open_path = "__builtin__.open" @@ -34,32 +36,26 @@ def test_validate_file_exist_fails_when_file_missing(): module = MagicMock() with patch("os.path.exists", return_value=False): jenkins_credential.validate_file_exist(module, "/missing/file/path") - module.fail_json.assert_called_once_with( - msg="File not found: /missing/file/path" - ) + module.fail_json.assert_called_once_with(msg="File not found: /missing/file/path") -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_get_jenkins_crumb_sets_crumb_header(fetch_mock): module = MagicMock() module.params = {"type": "file", "url": "http://localhost:8080"} headers = {} fake_response = MagicMock() - fake_response.read.return_value = json.dumps( - {"crumbRequestField": "crumb_field", "crumb": "abc123"} - ).encode("utf-8") + fake_response.read.return_value = json.dumps({"crumbRequestField": "crumb_field", "crumb": "abc123"}).encode( + "utf-8" + ) fetch_mock.return_value = ( fake_response, {"status": 200, "set-cookie": "JSESSIONID=something; Path=/"}, ) - crumb_request_field, crumb, session_coockie = jenkins_credential.get_jenkins_crumb( - module, headers - ) + crumb_request_field, crumb, session_coockie = jenkins_credential.get_jenkins_crumb(module, headers) assert "Cookie" not in headers assert "crumb_field" in headers @@ -67,27 +63,23 @@ def test_get_jenkins_crumb_sets_crumb_header(fetch_mock): assert headers[crumb_request_field] == crumb -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_get_jenkins_crumb_sets_cookie_if_type_token(fetch_mock): module = MagicMock() module.params = {"type": "token", "url": "http://localhost:8080"} headers = {} fake_response = MagicMock() - fake_response.read.return_value = json.dumps( - {"crumbRequestField": "crumb_field", "crumb": "secure"} - ).encode("utf-8") + fake_response.read.return_value = json.dumps({"crumbRequestField": "crumb_field", "crumb": "secure"}).encode( + "utf-8" + ) fetch_mock.return_value = ( fake_response, {"status": 200, "set-cookie": "JSESSIONID=token-cookie; Path=/"}, ) - crumb_request_field, crumb, session_cookie = jenkins_credential.get_jenkins_crumb( - module, headers - ) + crumb_request_field, crumb, session_cookie = jenkins_credential.get_jenkins_crumb(module, headers) assert "crumb_field" in headers assert crumb == "secure" @@ -95,9 +87,7 @@ def test_get_jenkins_crumb_sets_cookie_if_type_token(fetch_mock): assert headers["Cookie"] == session_cookie -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_get_jenkins_crumb_fails_on_non_200_status(fetch_mock): module = MagicMock() module.params = {"type": "file", "url": "http://localhost:8080"} @@ -111,18 +101,14 @@ def test_get_jenkins_crumb_fails_on_non_200_status(fetch_mock): assert "Failed to fetch Jenkins crumb" in module.fail_json.call_args[1]["msg"] -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_get_jenkins_crumb_removes_job_from_url(fetch_mock): module = MagicMock() module.params = {"type": "file", "url": "http://localhost:8080/job/test"} headers = {} fake_response = MagicMock() - fake_response.read.return_value = json.dumps( - {"crumbRequestField": "Jenkins-Crumb", "crumb": "xyz"} - ).encode("utf-8") + fake_response.read.return_value = json.dumps({"crumbRequestField": "Jenkins-Crumb", "crumb": "xyz"}).encode("utf-8") fetch_mock.return_value = (fake_response, {"status": 200, "set-cookie": ""}) @@ -146,9 +132,7 @@ def test_clean_data_removes_extraneous_fields(): assert result == expected, f"Expected {expected}, got {result}" -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_target_exists_returns_true_on_200(fetch_url_mock): module = MagicMock() module.params = { @@ -165,9 +149,7 @@ def test_target_exists_returns_true_on_200(fetch_url_mock): assert jenkins_credential.target_exists(module) is True -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_target_exists_returns_false_on_404(fetch_url_mock): module = MagicMock() module.params = { @@ -184,9 +166,7 @@ def test_target_exists_returns_false_on_404(fetch_url_mock): assert jenkins_credential.target_exists(module) is False -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_target_exists_calls_fail_json_on_unexpected_status(fetch_url_mock): module = MagicMock() module.params = { @@ -205,9 +185,7 @@ def test_target_exists_calls_fail_json_on_unexpected_status(fetch_url_mock): assert "Unexpected status code" in module.fail_json.call_args[1]["msg"] -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_target_exists_skips_check_for_token_type(fetch_url_mock): module = MagicMock() module.params = { @@ -224,9 +202,7 @@ def test_target_exists_skips_check_for_token_type(fetch_url_mock): fetch_url_mock.assert_not_called() -@patch( - "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url" -) +@patch("ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url") def test_delete_target_fails_deleting(fetch_mock): module = MagicMock() module.params = { @@ -274,9 +250,7 @@ def test_read_privateKey_returns_trimmed_contents(): module = MagicMock() module.params = {"private_key_path": "/fake/path/key.pem"} - mocked_file = mock_open( - read_data="\n \t -----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY----- \n\n" - ) + mocked_file = mock_open(read_data="\n \t -----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY----- \n\n") with patch(open_path, mocked_file): result = jenkins_credential.read_privateKey(module) @@ -306,12 +280,8 @@ def test_embed_file_into_body_returns_multipart_fields(): mock = mock_open() mock.return_value.read.return_value = fake_file_content - with patch("os.path.basename", return_value="secret.pem"), patch.object( - builtins, "open", mock - ): - body, content_type = jenkins_credential.embed_file_into_body( - module, file_path, credentials.copy() - ) + with patch("os.path.basename", return_value="secret.pem"), patch.object(builtins, "open", mock): + body, content_type = jenkins_credential.embed_file_into_body(module, file_path, credentials.copy()) assert "multipart/form-data; boundary=" in content_type @@ -337,10 +307,7 @@ def test_embed_file_into_body_injects_file_keys_into_credentials(): file_path = "/fake/path/file.txt" credentials = {"id": "test"} - with patch(open_path, mock_open(read_data=b"1234")), patch( - "os.path.basename", return_value="file.txt" - ): - + with patch(open_path, mock_open(read_data=b"1234")), patch("os.path.basename", return_value="file.txt"): jenkins_credential.embed_file_into_body(module, file_path, credentials) assert credentials["file"] == "file0" diff --git a/tests/unit/plugins/modules/test_jenkins_node.py b/tests/unit/plugins/modules/test_jenkins_node.py index 90456b4e8ef..ef5fb8cd873 100644 --- a/tests/unit/plugins/modules/test_jenkins_node.py +++ b/tests/unit/plugins/modules/test_jenkins_node.py @@ -94,14 +94,15 @@ def get_instance(instance): def test_get_jenkins_instance_with_user_and_token(instance): instance.node_exists.return_value = False - with set_module_args({ - "name": "my-node", - "state": "absent", - "url": "https://localhost:8080", - "user": "admin", - "token": "password", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + "user": "admin", + "token": "password", + } + ): with pytest.raises(AnsibleExitJson): jenkins_node.main() @@ -111,13 +112,14 @@ def test_get_jenkins_instance_with_user_and_token(instance): def test_get_jenkins_instance_with_user(instance): instance.node_exists.return_value = False - with set_module_args({ - "name": "my-node", - "state": "absent", - "url": "https://localhost:8080", - "user": "admin", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + "user": "admin", + } + ): with pytest.raises(AnsibleExitJson): jenkins_node.main() @@ -127,12 +129,13 @@ def test_get_jenkins_instance_with_user(instance): def test_get_jenkins_instance_with_no_credential(instance): instance.node_exists.return_value = False - with set_module_args({ - "name": "my-node", - "state": "absent", - "url": "https://localhost:8080", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + "url": "https://localhost:8080", + } + ): with pytest.raises(AnsibleExitJson): jenkins_node.main() @@ -147,11 +150,12 @@ def test_state_present_when_absent(get_instance, instance, state): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": state, - }): - + with set_module_args( + { + "name": "my-node", + "state": state, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -166,12 +170,13 @@ def test_state_present_when_absent_check_mode(get_instance, instance, state): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": state, - "_ansible_check_mode": True, - }): - + with set_module_args( + { + "name": "my-node", + "state": state, + "_ansible_check_mode": True, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -182,18 +187,17 @@ def test_state_present_when_absent_check_mode(get_instance, instance, state): @mark.parametrize(["state"], [param(state) for state in PRESENT_STATES]) -def test_state_present_when_absent_redirect_auth_error_handled( - get_instance, instance, state -): +def test_state_present_when_absent_redirect_auth_error_handled(get_instance, instance, state): instance.node_exists.side_effect = [False, True] instance.get_node_config.return_value = "" instance.create_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": state, - }): - + with set_module_args( + { + "name": "my-node", + "state": state, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -209,11 +213,12 @@ def test_state_present_when_absent_other_error_raised(get_instance, instance, st instance.get_node_config.return_value = "" instance.create_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": state, - }): - + with set_module_args( + { + "name": "my-node", + "state": state, + } + ): with raises(AnsibleFailJson) as result: jenkins_node.main() @@ -226,11 +231,12 @@ def test_state_present_when_present(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "present", - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -244,11 +250,12 @@ def test_state_absent_when_present(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "absent", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -262,12 +269,13 @@ def test_state_absent_when_present_check_mode(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "absent", - "_ansible_check_mode": True, - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + "_ansible_check_mode": True, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -282,11 +290,12 @@ def test_state_absent_when_present_redirect_auth_error_handled(get_instance, ins instance.get_node_config.return_value = "" instance.delete_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "absent", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -301,11 +310,12 @@ def test_state_absent_when_present_other_error_raised(get_instance, instance): instance.get_node_config.return_value = "" instance.delete_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "absent", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + } + ): with raises(AnsibleFailJson) as result: jenkins_node.main() @@ -318,11 +328,12 @@ def test_state_absent_when_absent(get_instance, instance): instance.node_exists.return_value = False instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "absent", - }): - + with set_module_args( + { + "name": "my-node", + "state": "absent", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -337,11 +348,12 @@ def test_state_enabled_when_offline(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": True} - with set_module_args({ - "name": "my-node", - "state": "enabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "enabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -356,12 +368,13 @@ def test_state_enabled_when_offline_check_mode(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": True} - with set_module_args({ - "name": "my-node", - "state": "enabled", - "_ansible_check_mode": True, - }): - + with set_module_args( + { + "name": "my-node", + "state": "enabled", + "_ansible_check_mode": True, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -377,11 +390,12 @@ def test_state_enabled_when_offline_redirect_auth_error_handled(get_instance, in instance.get_node_info.side_effect = [{"offline": True}, {"offline": False}] instance.enable_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "enabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "enabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -397,11 +411,12 @@ def test_state_enabled_when_offline_other_error_raised(get_instance, instance): instance.get_node_info.side_effect = [{"offline": True}, {"offline": True}] instance.enable_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "enabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "enabled", + } + ): with raises(AnsibleFailJson) as result: jenkins_node.main() @@ -415,11 +430,12 @@ def test_state_enabled_when_not_offline(get_instance, instance): instance.get_node_config.return_value = "" instance.get_node_info.return_value = {"offline": False} - with set_module_args({ - "name": "my-node", - "state": "enabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "enabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -437,11 +453,12 @@ def test_state_disabled_when_not_offline(get_instance, instance): "offlineCauseReason": "", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -451,9 +468,7 @@ def test_state_disabled_when_not_offline(get_instance, instance): assert result.value.args[0]["changed"] is True -def test_state_disabled_when_not_offline_redirect_auth_error_handled( - get_instance, instance -): +def test_state_disabled_when_not_offline_redirect_auth_error_handled(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" instance.get_node_info.side_effect = [ @@ -468,11 +483,12 @@ def test_state_disabled_when_not_offline_redirect_auth_error_handled( ] instance.disable_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "disabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -497,11 +513,12 @@ def test_state_disabled_when_not_offline_other_error_raised(get_instance, instan ] instance.disable_node.side_effect = jenkins.JenkinsException - with set_module_args({ - "name": "my-node", - "state": "disabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + } + ): with raises(AnsibleFailJson) as result: jenkins_node.main() @@ -518,12 +535,13 @@ def test_state_disabled_when_not_offline_check_mode(get_instance, instance): "offlineCauseReason": "", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - "_ansible_check_mode": True, - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + "_ansible_check_mode": True, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -541,11 +559,12 @@ def test_state_disabled_when_offline(get_instance, instance): "offlineCauseReason": "", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -559,21 +578,25 @@ def test_configure_num_executors_when_not_configured(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "present", - "num_executors": 3, - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "num_executors": 3, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" - assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + assert_xml_equal( + instance.reconfig_node.call_args[0][1], + """ 3 -""") +""", + ) assert result.value.args[0]["configured"] is True assert result.value.args[0]["changed"] is True @@ -587,20 +610,24 @@ def test_configure_num_executors_when_not_equal(get_instance, instance): """ - with set_module_args({ - "name": "my-node", - "state": "present", - "num_executors": 2, - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "num_executors": 2, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() - assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + assert_xml_equal( + instance.reconfig_node.call_args[0][1], + """ 2 -""") +""", + ) assert result.value.args[0]["configured"] is True assert result.value.args[0]["changed"] is True @@ -614,12 +641,13 @@ def test_configure_num_executors_when_equal(get_instance, instance): """ - with set_module_args({ - "name": "my-node", - "state": "present", - "num_executors": 2, - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "num_executors": 2, + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -633,25 +661,29 @@ def test_configure_labels_when_not_configured(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "present", - "labels": [ - "a", - "b", - "c", - ], - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "labels": [ + "a", + "b", + "c", + ], + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" - assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + assert_xml_equal( + instance.reconfig_node.call_args[0][1], + """ -""") +""", + ) assert result.value.args[0]["configured"] is True assert result.value.args[0]["changed"] is True @@ -665,25 +697,29 @@ def test_configure_labels_when_not_equal(get_instance, instance): """ - with set_module_args({ - "name": "my-node", - "state": "present", - "labels": [ - "a", - "z", - "c", - ], - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "labels": [ + "a", + "z", + "c", + ], + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() assert instance.reconfig_node.call_args[0][0] == "my-node" - assert_xml_equal(instance.reconfig_node.call_args[0][1], """ + assert_xml_equal( + instance.reconfig_node.call_args[0][1], + """ -""") +""", + ) assert result.value.args[0]["configured"] is True assert result.value.args[0]["changed"] is True @@ -697,16 +733,17 @@ def test_configure_labels_when_equal(get_instance, instance): """ - with set_module_args({ - "name": "my-node", - "state": "present", - "labels": [ - "a", - "b", - "c", - ], - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "labels": [ + "a", + "b", + "c", + ], + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -720,14 +757,15 @@ def test_configure_labels_fail_when_contains_space(get_instance, instance): instance.node_exists.return_value = True instance.get_node_config.return_value = "" - with set_module_args({ - "name": "my-node", - "state": "present", - "labels": [ - "a error", - ], - }): - + with set_module_args( + { + "name": "my-node", + "state": "present", + "labels": [ + "a error", + ], + } + ): with raises(AnsibleFailJson): jenkins_node.main() @@ -736,12 +774,13 @@ def test_configure_labels_fail_when_contains_space(get_instance, instance): @mark.parametrize(["state"], [param(state) for state in ["enabled", "present", "absent"]]) def test_raises_error_if_offline_message_when_state_not_disabled(get_instance, instance, state): - with set_module_args({ - "name": "my-node", - "state": state, - "offline_message": "This is a message...", - }): - + with set_module_args( + { + "name": "my-node", + "state": state, + "offline_message": "This is a message...", + } + ): with raises(AnsibleFailJson): jenkins_node.main() @@ -756,12 +795,13 @@ def test_set_offline_message_when_equal(get_instance, instance): "offlineCauseReason": "This is an old message...", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - "offline_message": "This is an old message...", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + "offline_message": "This is an old message...", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -778,12 +818,13 @@ def test_set_offline_message_when_not_equal_not_offline(get_instance, instance): "offlineCauseReason": "This is an old message...", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - "offline_message": "This is a new message...", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + "offline_message": "This is a new message...", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() @@ -803,12 +844,13 @@ def test_set_offline_message_when_not_equal_offline(get_instance, instance): "offlineCauseReason": "This is an old message...", } - with set_module_args({ - "name": "my-node", - "state": "disabled", - "offline_message": "This is a new message...", - }): - + with set_module_args( + { + "name": "my-node", + "state": "disabled", + "offline_message": "This is a new message...", + } + ): with raises(AnsibleExitJson) as result: jenkins_node.main() diff --git a/tests/unit/plugins/modules/test_jenkins_plugin.py b/tests/unit/plugins/modules/test_jenkins_plugin.py index 1946ee3013a..8d468785ee7 100644 --- a/tests/unit/plugins/modules/test_jenkins_plugin.py +++ b/tests/unit/plugins/modules/test_jenkins_plugin.py @@ -21,8 +21,9 @@ def pass_function(*args, **kwargs): pass -GITHUB_DATA = {"url": 'https://api.github.com/repos/ansible/ansible', - "response": b""" +GITHUB_DATA = { + "url": "https://api.github.com/repos/ansible/ansible", + "response": b""" { "id": 3638964, "name": "ansible", @@ -133,30 +134,25 @@ def pass_function(*args, **kwargs): "network_count": 8893, "subscribers_count": 1733 } -""" - } +""", +} def test__get_json_data(mocker): "test the json conversion of _get_url_data" timeout = 30 - params = { - 'url': GITHUB_DATA['url'], - 'timeout': timeout - } + params = {"url": GITHUB_DATA["url"], "timeout": timeout} module = mocker.Mock() module.params = params JenkinsPlugin._csrf_enabled = pass_function JenkinsPlugin._get_installed_plugins = pass_function JenkinsPlugin._get_url_data = mocker.Mock() - JenkinsPlugin._get_url_data.return_value = BytesIO(GITHUB_DATA['response']) + JenkinsPlugin._get_url_data.return_value = BytesIO(GITHUB_DATA["response"]) jenkins_plugin = JenkinsPlugin(module) - json_data = jenkins_plugin._get_json_data( - f"{GITHUB_DATA['url']}", - 'CSRF') + json_data = jenkins_plugin._get_json_data(f"{GITHUB_DATA['url']}", "CSRF") assert isinstance(json_data, Mapping) @@ -221,12 +217,14 @@ def test__get_latest_compatible_plugin_version(fetch_mock, mocker): plugin_data = { "plugins": { - "git": OrderedDict([ - ("4.8.2", {"requiredCore": "2.263.1"}), - ("4.8.3", {"requiredCore": "2.263.1"}), - ("4.9.0", {"requiredCore": "2.289.1"}), - ("4.9.1", {"requiredCore": "2.289.1"}), - ]) + "git": OrderedDict( + [ + ("4.8.2", {"requiredCore": "2.263.1"}), + ("4.8.3", {"requiredCore": "2.263.1"}), + ("4.9.0", {"requiredCore": "2.289.1"}), + ("4.9.1", {"requiredCore": "2.289.1"}), + ] + ) } } plugin_versions_response = MagicMock() @@ -246,7 +244,7 @@ def fetch_url_side_effect(module, url, **kwargs): jenkins_plugin = JenkinsPlugin(module) latest_version = jenkins_plugin._get_latest_compatible_plugin_version() - assert latest_version == '4.8.3' + assert latest_version == "4.8.3" @patch("ansible_collections.community.general.plugins.modules.jenkins_plugin.fetch_url") diff --git a/tests/unit/plugins/modules/test_keycloak_authentication.py b/tests/unit/plugins/modules/test_keycloak_authentication.py index 735ca41f5dc..8c3df53cab7 100644 --- a/tests/unit/plugins/modules/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/test_keycloak_authentication.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ import unittest from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_authentication @@ -18,8 +21,13 @@ @contextmanager -def patch_keycloak_api(get_authentication_flow_by_alias=None, copy_auth_flow=None, create_empty_auth_flow=None, - get_executions_representation=None, delete_authentication_flow_by_id=None): +def patch_keycloak_api( + get_authentication_flow_by_alias=None, + copy_auth_flow=None, + create_empty_auth_flow=None, + get_executions_representation=None, + delete_authentication_flow_by_id=None, +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -35,39 +43,46 @@ def patch_keycloak_api(get_authentication_flow_by_alias=None, copy_auth_flow=Non """ obj = keycloak_authentication.KeycloakAPI - with patch.object(obj, 'get_authentication_flow_by_alias', side_effect=get_authentication_flow_by_alias) \ - as mock_get_authentication_flow_by_alias: - with patch.object(obj, 'copy_auth_flow', side_effect=copy_auth_flow) \ - as mock_copy_auth_flow: - with patch.object(obj, 'create_empty_auth_flow', side_effect=create_empty_auth_flow) \ - as mock_create_empty_auth_flow: - with patch.object(obj, 'get_executions_representation', return_value=get_executions_representation) \ - as mock_get_executions_representation: - with patch.object(obj, 'delete_authentication_flow_by_id', side_effect=delete_authentication_flow_by_id) \ - as mock_delete_authentication_flow_by_id: - yield mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, \ - mock_get_executions_representation, mock_delete_authentication_flow_by_id + with patch.object( + obj, "get_authentication_flow_by_alias", side_effect=get_authentication_flow_by_alias + ) as mock_get_authentication_flow_by_alias: + with patch.object(obj, "copy_auth_flow", side_effect=copy_auth_flow) as mock_copy_auth_flow: + with patch.object( + obj, "create_empty_auth_flow", side_effect=create_empty_auth_flow + ) as mock_create_empty_auth_flow: + with patch.object( + obj, "get_executions_representation", return_value=get_executions_representation + ) as mock_get_executions_representation: + with patch.object( + obj, "delete_authentication_flow_by_id", side_effect=delete_authentication_flow_by_id + ) as mock_delete_authentication_flow_by_id: + yield ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -75,18 +90,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -99,63 +119,61 @@ def test_create_auth_flow_from_copy(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'copyFrom': 'first broker login', - 'authenticationExecutions': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "copyFrom": "first broker login", + "authenticationExecutions": [ { - 'providerId': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', + "providerId": "identity-provider-redirector", + "requirement": "ALTERNATIVE", }, ], - 'state': 'present', + "state": "present", } return_value_auth_flow_before = [{}] - return_value_copied = [{ - 'id': '2ac059fc-c548-414f-9c9e-84d42bd4944e', - 'alias': 'first broker login', - 'description': 'browser based authentication', - 'providerId': 'basic-flow', - 'topLevel': True, - 'builtIn': False, - 'authenticationExecutions': [ - { - 'authenticator': 'auth-cookie', - 'requirement': 'ALTERNATIVE', - 'priority': 10, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - ], - }] + return_value_copied = [ + { + "id": "2ac059fc-c548-414f-9c9e-84d42bd4944e", + "alias": "first broker login", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": True, + "builtIn": False, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + ], + } + ] return_value_executions_after = [ { - 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Identity Provider Redirector', - 'requirementChoices': ['REQUIRED', 'DISABLED'], - 'configurable': True, - 'providerId': 'identity-provider-redirector', - 'level': 0, - 'index': 0 + "id": "b678e30c-8469-40a7-8c21-8d0cda76a591", + "requirement": "ALTERNATIVE", + "displayName": "Identity Provider Redirector", + "requirementChoices": ["REQUIRED", "DISABLED"], + "configurable": True, + "providerId": "identity-provider-redirector", + "level": 0, + "index": 0, }, { - 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Cookie', - 'requirementChoices': [ - 'REQUIRED', - 'ALTERNATIVE', - 'DISABLED' - ], - 'configurable': False, - 'providerId': 'auth-cookie', - 'level': 0, - 'index': 1 + "id": "fdc208e9-c292-48b7-b7d1-1d98315ee893", + "requirement": "ALTERNATIVE", + "displayName": "Cookie", + "requirementChoices": ["REQUIRED", "ALTERNATIVE", "DISABLED"], + "configurable": False, + "providerId": "auth-cookie", + "level": 0, + "index": 1, }, ] changed = True @@ -164,10 +182,17 @@ def test_create_auth_flow_from_copy(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api( + get_authentication_flow_by_alias=return_value_auth_flow_before, + copy_auth_flow=return_value_copied, + get_executions_representation=return_value_executions_after, + ) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -179,75 +204,73 @@ def test_create_auth_flow_from_copy(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_auth_flow_from_copy_idempotency(self): """Add an already existing authentication flow from copy of an other flow to test idempotency""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'copyFrom': 'first broker login', - 'authenticationExecutions': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "copyFrom": "first broker login", + "authenticationExecutions": [ { - 'providerId': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', + "providerId": "identity-provider-redirector", + "requirement": "ALTERNATIVE", }, ], - 'state': 'present', + "state": "present", } - return_value_auth_flow_before = [{ - 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', - 'alias': 'Test create authentication flow copy', - 'description': '', - 'providerId': 'basic-flow', - 'topLevel': True, - 'builtIn': False, - 'authenticationExecutions': [ - { - 'authenticator': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', - 'priority': 0, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - { - 'authenticator': 'auth-cookie', - 'requirement': 'ALTERNATIVE', - 'priority': 0, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - ], - }] + return_value_auth_flow_before = [ + { + "id": "71275d5e-e11f-4be4-b119-0abfa87987a4", + "alias": "Test create authentication flow copy", + "description": "", + "providerId": "basic-flow", + "topLevel": True, + "builtIn": False, + "authenticationExecutions": [ + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 0, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 0, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + ], + } + ] return_value_executions_after = [ { - 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Identity Provider Redirector', - 'requirementChoices': ['REQUIRED', 'DISABLED'], - 'configurable': True, - 'providerId': 'identity-provider-redirector', - 'level': 0, - 'index': 0 + "id": "b678e30c-8469-40a7-8c21-8d0cda76a591", + "requirement": "ALTERNATIVE", + "displayName": "Identity Provider Redirector", + "requirementChoices": ["REQUIRED", "DISABLED"], + "configurable": True, + "providerId": "identity-provider-redirector", + "level": 0, + "index": 0, }, { - 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Cookie', - 'requirementChoices': [ - 'REQUIRED', - 'ALTERNATIVE', - 'DISABLED' - ], - 'configurable': False, - 'providerId': 'auth-cookie', - 'level': 0, - 'index': 1 + "id": "fdc208e9-c292-48b7-b7d1-1d98315ee893", + "requirement": "ALTERNATIVE", + "displayName": "Cookie", + "requirementChoices": ["REQUIRED", "ALTERNATIVE", "DISABLED"], + "configurable": False, + "providerId": "auth-cookie", + "level": 0, + "index": 1, }, ] changed = False @@ -256,10 +279,16 @@ def test_create_auth_flow_from_copy_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api( + get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, + ) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -271,31 +300,29 @@ def test_create_auth_flow_from_copy_idempotency(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_auth_flow_without_copy(self): """Add authentication without copy""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'authenticationExecutions': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "authenticationExecutions": [ { - 'providerId': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', - 'authenticationConfig': { - 'alias': 'name', - 'config': { - 'defaultProvider': 'value' - }, + "providerId": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "authenticationConfig": { + "alias": "name", + "config": {"defaultProvider": "value"}, }, }, ], - 'state': 'present', + "state": "present", } return_value_auth_flow_before = [{}] return_value_created_empty_flow = [ @@ -306,19 +333,19 @@ def test_create_auth_flow_without_copy(self): "description": "", "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", "providerId": "basic-flow", - "topLevel": True + "topLevel": True, }, ] return_value_executions_after = [ { - 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Identity Provider Redirector', - 'requirementChoices': ['REQUIRED', 'DISABLED'], - 'configurable': True, - 'providerId': 'identity-provider-redirector', - 'level': 0, - 'index': 0 + "id": "b678e30c-8469-40a7-8c21-8d0cda76a591", + "requirement": "ALTERNATIVE", + "displayName": "Identity Provider Redirector", + "requirementChoices": ["REQUIRED", "DISABLED"], + "configurable": True, + "providerId": "identity-provider-redirector", + "level": 0, + "index": 0, }, ] changed = True @@ -327,10 +354,17 @@ def test_create_auth_flow_without_copy(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api( + get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, + create_empty_auth_flow=return_value_created_empty_flow, + ) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -342,73 +376,69 @@ def test_create_auth_flow_without_copy(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_update_auth_flow_adding_exec(self): """Update authentication flow by adding execution""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'authenticationExecutions': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "authenticationExecutions": [ { - 'providerId': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', - 'authenticationConfig': { - 'alias': 'name', - 'config': { - 'defaultProvider': 'value' - }, + "providerId": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "authenticationConfig": { + "alias": "name", + "config": {"defaultProvider": "value"}, }, }, ], - 'state': 'present', + "state": "present", } - return_value_auth_flow_before = [{ - 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', - 'alias': 'Test create authentication flow copy', - 'description': '', - 'providerId': 'basic-flow', - 'topLevel': True, - 'builtIn': False, - 'authenticationExecutions': [ - { - 'authenticator': 'auth-cookie', - 'requirement': 'ALTERNATIVE', - 'priority': 0, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - ], - }] + return_value_auth_flow_before = [ + { + "id": "71275d5e-e11f-4be4-b119-0abfa87987a4", + "alias": "Test create authentication flow copy", + "description": "", + "providerId": "basic-flow", + "topLevel": True, + "builtIn": False, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 0, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + ], + } + ] return_value_executions_after = [ { - 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', - 'requirement': 'DISABLED', - 'displayName': 'Identity Provider Redirector', - 'requirementChoices': ['REQUIRED', 'DISABLED'], - 'configurable': True, - 'providerId': 'identity-provider-redirector', - 'level': 0, - 'index': 0 + "id": "b678e30c-8469-40a7-8c21-8d0cda76a591", + "requirement": "DISABLED", + "displayName": "Identity Provider Redirector", + "requirementChoices": ["REQUIRED", "DISABLED"], + "configurable": True, + "providerId": "identity-provider-redirector", + "level": 0, + "index": 0, }, { - 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', - 'requirement': 'ALTERNATIVE', - 'displayName': 'Cookie', - 'requirementChoices': [ - 'REQUIRED', - 'ALTERNATIVE', - 'DISABLED' - ], - 'configurable': False, - 'providerId': 'auth-cookie', - 'level': 0, - 'index': 1 + "id": "fdc208e9-c292-48b7-b7d1-1d98315ee893", + "requirement": "ALTERNATIVE", + "displayName": "Cookie", + "requirementChoices": ["REQUIRED", "ALTERNATIVE", "DISABLED"], + "configurable": False, + "providerId": "auth-cookie", + "level": 0, + "index": 1, }, ] changed = True @@ -417,10 +447,16 @@ def test_update_auth_flow_adding_exec(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api( + get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, + ) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -432,46 +468,52 @@ def test_update_auth_flow_adding_exec(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_auth_flow(self): """Delete authentication flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "state": "absent", } - return_value_auth_flow_before = [{ - 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', - 'alias': 'Test create authentication flow copy', - 'description': '', - 'providerId': 'basic-flow', - 'topLevel': True, - 'builtIn': False, - 'authenticationExecutions': [ - { - 'authenticator': 'auth-cookie', - 'requirement': 'ALTERNATIVE', - 'priority': 0, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - ], - }] + return_value_auth_flow_before = [ + { + "id": "71275d5e-e11f-4be4-b119-0abfa87987a4", + "alias": "Test create authentication flow copy", + "description": "", + "providerId": "basic-flow", + "topLevel": True, + "builtIn": False, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 0, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + ], + } + ] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -483,19 +525,19 @@ def test_delete_auth_flow(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_auth_flow_idempotency(self): """Delete second time authentication flow to test idempotency""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "state": "absent", } return_value_auth_flow_before = [{}] changed = False @@ -504,9 +546,13 @@ def test_delete_auth_flow_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -518,50 +564,50 @@ def test_delete_auth_flow_idempotency(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_force_update_auth_flow(self): """Delete authentication flow and create new one""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'alias': 'Test create authentication flow copy', - 'authenticationExecutions': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "alias": "Test create authentication flow copy", + "authenticationExecutions": [ { - 'providerId': 'identity-provider-redirector', - 'requirement': 'ALTERNATIVE', - 'authenticationConfig': { - 'alias': 'name', - 'config': { - 'defaultProvider': 'value' - }, + "providerId": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "authenticationConfig": { + "alias": "name", + "config": {"defaultProvider": "value"}, }, }, ], - 'state': 'present', - 'force': 'yes', + "state": "present", + "force": "yes", } - return_value_auth_flow_before = [{ - 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', - 'alias': 'Test create authentication flow copy', - 'description': '', - 'providerId': 'basic-flow', - 'topLevel': True, - 'builtIn': False, - 'authenticationExecutions': [ - { - 'authenticator': 'auth-cookie', - 'requirement': 'ALTERNATIVE', - 'priority': 0, - 'userSetupAllowed': False, - 'autheticatorFlow': False - }, - ], - }] + return_value_auth_flow_before = [ + { + "id": "71275d5e-e11f-4be4-b119-0abfa87987a4", + "alias": "Test create authentication flow copy", + "description": "", + "providerId": "basic-flow", + "topLevel": True, + "builtIn": False, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 0, + "userSetupAllowed": False, + "autheticatorFlow": False, + }, + ], + } + ] return_value_created_empty_flow = [ { "alias": "Test of the keycloak_auth module", @@ -570,19 +616,19 @@ def test_force_update_auth_flow(self): "description": "", "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", "providerId": "basic-flow", - "topLevel": True + "topLevel": True, }, ] return_value_executions_after = [ { - 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', - 'requirement': 'DISABLED', - 'displayName': 'Identity Provider Redirector', - 'requirementChoices': ['REQUIRED', 'DISABLED'], - 'configurable': True, - 'providerId': 'identity-provider-redirector', - 'level': 0, - 'index': 0 + "id": "b678e30c-8469-40a7-8c21-8d0cda76a591", + "requirement": "DISABLED", + "displayName": "Identity Provider Redirector", + "requirementChoices": ["REQUIRED", "DISABLED"], + "configurable": True, + "providerId": "identity-provider-redirector", + "level": 0, + "index": 0, }, ] changed = True @@ -591,10 +637,17 @@ def test_force_update_auth_flow(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, - get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ - as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, - mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with patch_keycloak_api( + get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, + create_empty_auth_flow=return_value_created_empty_flow, + ) as ( + mock_get_authentication_flow_by_alias, + mock_copy_auth_flow, + mock_create_empty_auth_flow, + mock_get_executions_representation, + mock_delete_authentication_flow_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -606,8 +659,8 @@ def test_force_update_auth_flow(self): self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py b/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py index 167201c93d5..4119b4aaf1b 100644 --- a/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py +++ b/tests/unit/plugins/modules/test_keycloak_authentication_required_actions.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_authentication_required_actions @@ -40,25 +43,15 @@ def patch_keycloak_api( """ obj = keycloak_authentication_required_actions.KeycloakAPI - with patch.object( - obj, - 'get_required_actions', - side_effect=get_required_actions - ) as mock_get_required_actions: + with patch.object(obj, "get_required_actions", side_effect=get_required_actions) as mock_get_required_actions: with patch.object( - obj, - 'register_required_action', - side_effect=register_required_action + obj, "register_required_action", side_effect=register_required_action ) as mock_register_required_action: with patch.object( - obj, - 'update_required_action', - side_effect=update_required_action + obj, "update_required_action", side_effect=update_required_action ) as mock_update_required_action: with patch.object( - obj, - 'delete_required_action', - side_effect=delete_required_action + obj, "delete_required_action", side_effect=delete_required_action ) as mock_delete_required_action: yield ( mock_get_required_actions, @@ -72,21 +65,20 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -94,18 +86,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -118,115 +115,115 @@ def test_register_required_action(self): """Register a new authentication required action.""" module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'required_actions': [ + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "required_actions": [ { - 'alias': 'test-provider-id', - 'name': 'Test provider ID', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID (DUPLICATE ALIAS)', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID (DUPLICATE ALIAS)", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID (DIFFERENT PROVIDER ID)', - 'providerId': 'test-provider-id-diff', + "alias": "test-provider-id", + "name": "Test provider ID (DIFFERENT PROVIDER ID)", + "providerId": "test-provider-id-diff", }, ], - 'state': 'present', + "state": "present", } return_value_required_actions = [ [ { - 'alias': 'CONFIGURE_TOTP', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Configure OTP', - 'priority': 10, - 'providerId': 'CONFIGURE_TOTP' - }, - { - 'alias': 'TERMS_AND_CONDITIONS', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Terms and conditions', - 'priority': 20, - 'providerId': 'TERMS_AND_CONDITIONS' - }, - { - 'alias': 'UPDATE_PASSWORD', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Password', - 'priority': 30, - 'providerId': 'UPDATE_PASSWORD' - }, - { - 'alias': 'UPDATE_PROFILE', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Profile', - 'priority': 40, - 'providerId': 'UPDATE_PROFILE' - }, - { - 'alias': 'VERIFY_EMAIL', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Verify Email', - 'priority': 50, - 'providerId': 'VERIFY_EMAIL' - }, - { - 'alias': 'delete_account', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Delete Account', - 'priority': 60, - 'providerId': 'delete_account' - }, - { - 'alias': 'webauthn-register', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register', - 'priority': 70, - 'providerId': 'webauthn-register' - }, - { - 'alias': 'webauthn-register-passwordless', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register Passwordless', - 'priority': 80, - 'providerId': 'webauthn-register-passwordless' - }, - { - 'alias': 'update_user_locale', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update User Locale', - 'priority': 1000, - 'providerId': 'update_user_locale' - } + "alias": "CONFIGURE_TOTP", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Configure OTP", + "priority": 10, + "providerId": "CONFIGURE_TOTP", + }, + { + "alias": "TERMS_AND_CONDITIONS", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Terms and conditions", + "priority": 20, + "providerId": "TERMS_AND_CONDITIONS", + }, + { + "alias": "UPDATE_PASSWORD", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Password", + "priority": 30, + "providerId": "UPDATE_PASSWORD", + }, + { + "alias": "UPDATE_PROFILE", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Profile", + "priority": 40, + "providerId": "UPDATE_PROFILE", + }, + { + "alias": "VERIFY_EMAIL", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Verify Email", + "priority": 50, + "providerId": "VERIFY_EMAIL", + }, + { + "alias": "delete_account", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Delete Account", + "priority": 60, + "providerId": "delete_account", + }, + { + "alias": "webauthn-register", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register", + "priority": 70, + "providerId": "webauthn-register", + }, + { + "alias": "webauthn-register-passwordless", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register Passwordless", + "priority": 80, + "providerId": "webauthn-register-passwordless", + }, + { + "alias": "update_user_locale", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update User Locale", + "priority": 1000, + "providerId": "update_user_locale", + }, ], ] @@ -253,130 +250,130 @@ def test_register_required_action(self): self.assertEqual(len(mock_delete_required_action.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_register_required_action_idempotency(self): """Register an already existing new authentication required action again.""" module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'required_actions': [ + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "required_actions": [ { - 'alias': 'test-provider-id', - 'name': 'Test provider ID', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID (DUPLICATE ALIAS)', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID (DUPLICATE ALIAS)", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID (DIFFERENT PROVIDER ID)', - 'providerId': 'test-provider-id-diff', + "alias": "test-provider-id", + "name": "Test provider ID (DIFFERENT PROVIDER ID)", + "providerId": "test-provider-id-diff", }, ], - 'state': 'present', + "state": "present", } return_value_required_actions = [ [ { - 'alias': 'CONFIGURE_TOTP', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Configure OTP', - 'priority': 10, - 'providerId': 'CONFIGURE_TOTP' - }, - { - 'alias': 'TERMS_AND_CONDITIONS', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Terms and conditions', - 'priority': 20, - 'providerId': 'TERMS_AND_CONDITIONS' - }, - { - 'alias': 'UPDATE_PASSWORD', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Password', - 'priority': 30, - 'providerId': 'UPDATE_PASSWORD' - }, - { - 'alias': 'UPDATE_PROFILE', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Profile', - 'priority': 40, - 'providerId': 'UPDATE_PROFILE' - }, - { - 'alias': 'VERIFY_EMAIL', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Verify Email', - 'priority': 50, - 'providerId': 'VERIFY_EMAIL' - }, - { - 'alias': 'delete_account', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Delete Account', - 'priority': 60, - 'providerId': 'delete_account' - }, - { - 'alias': 'webauthn-register', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register', - 'priority': 70, - 'providerId': 'webauthn-register' - }, - { - 'alias': 'webauthn-register-passwordless', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register Passwordless', - 'priority': 80, - 'providerId': 'webauthn-register-passwordless' - }, - { - 'alias': 'test-provider-id', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Test provider ID', - 'priority': 90, - 'providerId': 'test-provider-id' - }, - { - 'alias': 'update_user_locale', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update User Locale', - 'priority': 1000, - 'providerId': 'update_user_locale' - } + "alias": "CONFIGURE_TOTP", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Configure OTP", + "priority": 10, + "providerId": "CONFIGURE_TOTP", + }, + { + "alias": "TERMS_AND_CONDITIONS", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Terms and conditions", + "priority": 20, + "providerId": "TERMS_AND_CONDITIONS", + }, + { + "alias": "UPDATE_PASSWORD", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Password", + "priority": 30, + "providerId": "UPDATE_PASSWORD", + }, + { + "alias": "UPDATE_PROFILE", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Profile", + "priority": 40, + "providerId": "UPDATE_PROFILE", + }, + { + "alias": "VERIFY_EMAIL", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Verify Email", + "priority": 50, + "providerId": "VERIFY_EMAIL", + }, + { + "alias": "delete_account", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Delete Account", + "priority": 60, + "providerId": "delete_account", + }, + { + "alias": "webauthn-register", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register", + "priority": 70, + "providerId": "webauthn-register", + }, + { + "alias": "webauthn-register-passwordless", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register Passwordless", + "priority": 80, + "providerId": "webauthn-register-passwordless", + }, + { + "alias": "test-provider-id", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Test provider ID", + "priority": 90, + "providerId": "test-provider-id", + }, + { + "alias": "update_user_locale", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update User Locale", + "priority": 1000, + "providerId": "update_user_locale", + }, ], ] @@ -403,130 +400,130 @@ def test_register_required_action_idempotency(self): self.assertEqual(len(mock_delete_required_action.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_update_required_actions(self): """Update an authentication required action.""" module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'required_actions': [ + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "required_actions": [ { - 'alias': 'test-provider-id', - 'name': 'Test provider ID UPDATED', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID UPDATED", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID UPDATED (DUPLICATE ALIAS)', - 'providerId': 'test-provider-id', + "alias": "test-provider-id", + "name": "Test provider ID UPDATED (DUPLICATE ALIAS)", + "providerId": "test-provider-id", }, { - 'alias': 'test-provider-id', - 'name': 'Test provider ID UPDATED (DIFFERENT PROVIDER ID)', - 'providerId': 'test-provider-id-diff', + "alias": "test-provider-id", + "name": "Test provider ID UPDATED (DIFFERENT PROVIDER ID)", + "providerId": "test-provider-id-diff", }, ], - 'state': 'present', + "state": "present", } return_value_required_actions = [ [ { - 'alias': 'CONFIGURE_TOTP', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Configure OTP', - 'priority': 10, - 'providerId': 'CONFIGURE_TOTP' - }, - { - 'alias': 'TERMS_AND_CONDITIONS', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Terms and conditions', - 'priority': 20, - 'providerId': 'TERMS_AND_CONDITIONS' - }, - { - 'alias': 'UPDATE_PASSWORD', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Password', - 'priority': 30, - 'providerId': 'UPDATE_PASSWORD' - }, - { - 'alias': 'UPDATE_PROFILE', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Profile', - 'priority': 40, - 'providerId': 'UPDATE_PROFILE' - }, - { - 'alias': 'VERIFY_EMAIL', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Verify Email', - 'priority': 50, - 'providerId': 'VERIFY_EMAIL' - }, - { - 'alias': 'delete_account', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Delete Account', - 'priority': 60, - 'providerId': 'delete_account' - }, - { - 'alias': 'webauthn-register', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register', - 'priority': 70, - 'providerId': 'webauthn-register' - }, - { - 'alias': 'webauthn-register-passwordless', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register Passwordless', - 'priority': 80, - 'providerId': 'webauthn-register-passwordless' - }, - { - 'alias': 'test-provider-id', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Test provider ID', - 'priority': 90, - 'providerId': 'test-provider-id' - }, - { - 'alias': 'update_user_locale', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update User Locale', - 'priority': 1000, - 'providerId': 'update_user_locale' - } + "alias": "CONFIGURE_TOTP", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Configure OTP", + "priority": 10, + "providerId": "CONFIGURE_TOTP", + }, + { + "alias": "TERMS_AND_CONDITIONS", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Terms and conditions", + "priority": 20, + "providerId": "TERMS_AND_CONDITIONS", + }, + { + "alias": "UPDATE_PASSWORD", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Password", + "priority": 30, + "providerId": "UPDATE_PASSWORD", + }, + { + "alias": "UPDATE_PROFILE", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Profile", + "priority": 40, + "providerId": "UPDATE_PROFILE", + }, + { + "alias": "VERIFY_EMAIL", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Verify Email", + "priority": 50, + "providerId": "VERIFY_EMAIL", + }, + { + "alias": "delete_account", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Delete Account", + "priority": 60, + "providerId": "delete_account", + }, + { + "alias": "webauthn-register", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register", + "priority": 70, + "providerId": "webauthn-register", + }, + { + "alias": "webauthn-register-passwordless", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register Passwordless", + "priority": 80, + "providerId": "webauthn-register-passwordless", + }, + { + "alias": "test-provider-id", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Test provider ID", + "priority": 90, + "providerId": "test-provider-id", + }, + { + "alias": "update_user_locale", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update User Locale", + "priority": 1000, + "providerId": "update_user_locale", + }, ], ] @@ -553,118 +550,118 @@ def test_update_required_actions(self): self.assertEqual(len(mock_delete_required_action.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_required_action(self): """Delete a registered authentication required action.""" module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'required_actions': [ + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "required_actions": [ { - 'alias': 'test-provider-id', + "alias": "test-provider-id", }, ], - 'state': 'absent', + "state": "absent", } return_value_required_actions = [ [ { - 'alias': 'CONFIGURE_TOTP', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Configure OTP', - 'priority': 10, - 'providerId': 'CONFIGURE_TOTP' - }, - { - 'alias': 'TERMS_AND_CONDITIONS', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Terms and conditions', - 'priority': 20, - 'providerId': 'TERMS_AND_CONDITIONS' - }, - { - 'alias': 'UPDATE_PASSWORD', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Password', - 'priority': 30, - 'providerId': 'UPDATE_PASSWORD' - }, - { - 'alias': 'UPDATE_PROFILE', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Profile', - 'priority': 40, - 'providerId': 'UPDATE_PROFILE' - }, - { - 'alias': 'VERIFY_EMAIL', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Verify Email', - 'priority': 50, - 'providerId': 'VERIFY_EMAIL' - }, - { - 'alias': 'delete_account', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Delete Account', - 'priority': 60, - 'providerId': 'delete_account' - }, - { - 'alias': 'webauthn-register', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register', - 'priority': 70, - 'providerId': 'webauthn-register' - }, - { - 'alias': 'webauthn-register-passwordless', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register Passwordless', - 'priority': 80, - 'providerId': 'webauthn-register-passwordless' - }, - { - 'alias': 'test-provider-id', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Test provider ID', - 'priority': 90, - 'providerId': 'test-provider-id' - }, - { - 'alias': 'update_user_locale', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update User Locale', - 'priority': 1000, - 'providerId': 'update_user_locale' - } + "alias": "CONFIGURE_TOTP", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Configure OTP", + "priority": 10, + "providerId": "CONFIGURE_TOTP", + }, + { + "alias": "TERMS_AND_CONDITIONS", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Terms and conditions", + "priority": 20, + "providerId": "TERMS_AND_CONDITIONS", + }, + { + "alias": "UPDATE_PASSWORD", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Password", + "priority": 30, + "providerId": "UPDATE_PASSWORD", + }, + { + "alias": "UPDATE_PROFILE", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Profile", + "priority": 40, + "providerId": "UPDATE_PROFILE", + }, + { + "alias": "VERIFY_EMAIL", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Verify Email", + "priority": 50, + "providerId": "VERIFY_EMAIL", + }, + { + "alias": "delete_account", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Delete Account", + "priority": 60, + "providerId": "delete_account", + }, + { + "alias": "webauthn-register", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register", + "priority": 70, + "providerId": "webauthn-register", + }, + { + "alias": "webauthn-register-passwordless", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register Passwordless", + "priority": 80, + "providerId": "webauthn-register-passwordless", + }, + { + "alias": "test-provider-id", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Test provider ID", + "priority": 90, + "providerId": "test-provider-id", + }, + { + "alias": "update_user_locale", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update User Locale", + "priority": 1000, + "providerId": "update_user_locale", + }, ], ] @@ -691,109 +688,109 @@ def test_delete_required_action(self): self.assertEqual(len(mock_delete_required_action.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_required_action_idempotency(self): """Delete an already deleted authentication required action.""" module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'required_actions': [ + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "required_actions": [ { - 'alias': 'test-provider-id', + "alias": "test-provider-id", }, ], - 'state': 'absent', + "state": "absent", } return_value_required_actions = [ [ { - 'alias': 'CONFIGURE_TOTP', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Configure OTP', - 'priority': 10, - 'providerId': 'CONFIGURE_TOTP' - }, - { - 'alias': 'TERMS_AND_CONDITIONS', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Terms and conditions', - 'priority': 20, - 'providerId': 'TERMS_AND_CONDITIONS' - }, - { - 'alias': 'UPDATE_PASSWORD', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Password', - 'priority': 30, - 'providerId': 'UPDATE_PASSWORD' - }, - { - 'alias': 'UPDATE_PROFILE', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update Profile', - 'priority': 40, - 'providerId': 'UPDATE_PROFILE' - }, - { - 'alias': 'VERIFY_EMAIL', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Verify Email', - 'priority': 50, - 'providerId': 'VERIFY_EMAIL' - }, - { - 'alias': 'delete_account', - 'config': {}, - 'defaultAction': False, - 'enabled': False, - 'name': 'Delete Account', - 'priority': 60, - 'providerId': 'delete_account' - }, - { - 'alias': 'webauthn-register', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register', - 'priority': 70, - 'providerId': 'webauthn-register' - }, - { - 'alias': 'webauthn-register-passwordless', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Webauthn Register Passwordless', - 'priority': 80, - 'providerId': 'webauthn-register-passwordless' - }, - { - 'alias': 'update_user_locale', - 'config': {}, - 'defaultAction': False, - 'enabled': True, - 'name': 'Update User Locale', - 'priority': 1000, - 'providerId': 'update_user_locale' - } + "alias": "CONFIGURE_TOTP", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Configure OTP", + "priority": 10, + "providerId": "CONFIGURE_TOTP", + }, + { + "alias": "TERMS_AND_CONDITIONS", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Terms and conditions", + "priority": 20, + "providerId": "TERMS_AND_CONDITIONS", + }, + { + "alias": "UPDATE_PASSWORD", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Password", + "priority": 30, + "providerId": "UPDATE_PASSWORD", + }, + { + "alias": "UPDATE_PROFILE", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update Profile", + "priority": 40, + "providerId": "UPDATE_PROFILE", + }, + { + "alias": "VERIFY_EMAIL", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Verify Email", + "priority": 50, + "providerId": "VERIFY_EMAIL", + }, + { + "alias": "delete_account", + "config": {}, + "defaultAction": False, + "enabled": False, + "name": "Delete Account", + "priority": 60, + "providerId": "delete_account", + }, + { + "alias": "webauthn-register", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register", + "priority": 70, + "providerId": "webauthn-register", + }, + { + "alias": "webauthn-register-passwordless", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Webauthn Register Passwordless", + "priority": 80, + "providerId": "webauthn-register-passwordless", + }, + { + "alias": "update_user_locale", + "config": {}, + "defaultAction": False, + "enabled": True, + "name": "Update User Locale", + "priority": 1000, + "providerId": "update_user_locale", + }, ], ] @@ -820,8 +817,8 @@ def test_delete_required_action_idempotency(self): self.assertEqual(len(mock_delete_required_action.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_client.py b/tests/unit/plugins/modules/test_keycloak_client.py index f8e99afab6b..9fd08bae551 100644 --- a/tests/unit/plugins/modules/test_keycloak_client.py +++ b/tests/unit/plugins/modules/test_keycloak_client.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -10,7 +9,11 @@ import unittest from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_client @@ -19,8 +22,9 @@ @contextmanager -def patch_keycloak_api(get_client_by_clientid=None, get_client_by_id=None, update_client=None, create_client=None, - delete_client=None): +def patch_keycloak_api( + get_client_by_clientid=None, get_client_by_id=None, update_client=None, create_client=None, delete_client=None +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -36,31 +40,35 @@ def patch_keycloak_api(get_client_by_clientid=None, get_client_by_id=None, updat """ obj = keycloak_client.KeycloakAPI - with patch.object(obj, 'get_client_by_clientid', side_effect=get_client_by_clientid) as mock_get_client_by_clientid: - with patch.object(obj, 'get_client_by_id', side_effect=get_client_by_id) as mock_get_client_by_id: - with patch.object(obj, 'create_client', side_effect=create_client) as mock_create_client: - with patch.object(obj, 'update_client', side_effect=update_client) as mock_update_client: - with patch.object(obj, 'delete_client', side_effect=delete_client) as mock_delete_client: - yield mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client + with patch.object(obj, "get_client_by_clientid", side_effect=get_client_by_clientid) as mock_get_client_by_clientid: + with patch.object(obj, "get_client_by_id", side_effect=get_client_by_id) as mock_get_client_by_id: + with patch.object(obj, "create_client", side_effect=create_client) as mock_create_client: + with patch.object(obj, "update_client", side_effect=update_client) as mock_update_client: + with patch.object(obj, "delete_client", side_effect=delete_client) as mock_delete_client: + yield ( + mock_get_client_by_clientid, + mock_get_client_by_id, + mock_create_client, + mock_update_client, + mock_delete_client, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) @@ -80,12 +88,14 @@ def _create_wrapper(): def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( - '{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -98,28 +108,22 @@ def test_authentication_flow_binding_overrides_feature(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'https: // auth.example.com / auth', - 'token': '{{ access_token }}', - 'state': 'present', - 'realm': 'master', - 'client_id': 'test', - 'authentication_flow_binding_overrides': { - 'browser': '4c90336b-bf1d-4b87-916d-3677ba4e5fbb' - } + "auth_keycloak_url": "https: // auth.example.com / auth", + "token": "{{ access_token }}", + "state": "present", + "realm": "master", + "client_id": "test", + "authentication_flow_binding_overrides": {"browser": "4c90336b-bf1d-4b87-916d-3677ba4e5fbb"}, } return_value_get_client_by_clientid = [ None, { - "authenticationFlowBindingOverrides": { - "browser": "f9502b6d-d76a-4efe-8331-2ddd853c9f9c" - }, + "authenticationFlowBindingOverrides": {"browser": "f9502b6d-d76a-4efe-8331-2ddd853c9f9c"}, "clientId": "onboardingid", "enabled": "true", "protocol": "openid-connect", - "redirectUris": [ - "*" - ] - } + "redirectUris": ["*"], + }, ] changed = True @@ -127,8 +131,13 @@ def test_authentication_flow_binding_overrides_feature(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \ - as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client): + with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) as ( + mock_get_client_by_clientid, + mock_get_client_by_id, + mock_create_client, + mock_update_client, + mock_delete_client, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -139,8 +148,8 @@ def test_authentication_flow_binding_overrides_feature(self): self.assertEqual(mock_delete_client.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py b/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py index e6bcb4c4714..0f1c693bfde 100644 --- a/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py +++ b/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_client_rolemapping @@ -18,10 +21,16 @@ @contextmanager -def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_id_by_name=None, - get_client_group_rolemapping_by_id=None, get_client_group_available_rolemappings=None, - get_client_group_composite_rolemappings=None, add_group_rolemapping=None, - delete_group_rolemapping=None): +def patch_keycloak_api( + get_group_by_name=None, + get_client_id=None, + get_client_role_id_by_name=None, + get_client_group_rolemapping_by_id=None, + get_client_group_available_rolemappings=None, + get_client_group_composite_rolemappings=None, + add_group_rolemapping=None, + delete_group_rolemapping=None, +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -37,46 +46,60 @@ def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_ro """ obj = keycloak_client_rolemapping.KeycloakAPI - with patch.object(obj, 'get_group_by_name', - side_effect=get_group_by_name) as mock_get_group_by_name: - with patch.object(obj, 'get_client_id', - side_effect=get_client_id) as mock_get_client_id: - with patch.object(obj, 'get_client_role_id_by_name', - side_effect=get_client_role_id_by_name) as mock_get_client_role_id_by_name: - with patch.object(obj, 'get_client_group_rolemapping_by_id', - side_effect=get_client_group_rolemapping_by_id) as mock_get_client_group_rolemapping_by_id: - with patch.object(obj, 'get_client_group_available_rolemappings', - side_effect=get_client_group_available_rolemappings) as mock_get_client_group_available_rolemappings: - with patch.object(obj, 'get_client_group_composite_rolemappings', - side_effect=get_client_group_composite_rolemappings) as mock_get_client_group_composite_rolemappings: - with patch.object(obj, 'add_group_rolemapping', - side_effect=add_group_rolemapping) as mock_add_group_rolemapping: - with patch.object(obj, 'delete_group_rolemapping', - side_effect=delete_group_rolemapping) as mock_delete_group_rolemapping: - yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, \ - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, \ - mock_get_client_group_composite_rolemappings, mock_delete_group_rolemapping + with patch.object(obj, "get_group_by_name", side_effect=get_group_by_name) as mock_get_group_by_name: + with patch.object(obj, "get_client_id", side_effect=get_client_id) as mock_get_client_id: + with patch.object( + obj, "get_client_role_id_by_name", side_effect=get_client_role_id_by_name + ) as mock_get_client_role_id_by_name: + with patch.object( + obj, "get_client_group_rolemapping_by_id", side_effect=get_client_group_rolemapping_by_id + ) as mock_get_client_group_rolemapping_by_id: + with patch.object( + obj, + "get_client_group_available_rolemappings", + side_effect=get_client_group_available_rolemappings, + ) as mock_get_client_group_available_rolemappings: + with patch.object( + obj, + "get_client_group_composite_rolemappings", + side_effect=get_client_group_composite_rolemappings, + ) as mock_get_client_group_composite_rolemappings: + with patch.object( + obj, "add_group_rolemapping", side_effect=add_group_rolemapping + ) as mock_add_group_rolemapping: + with patch.object( + obj, "delete_group_rolemapping", side_effect=delete_group_rolemapping + ) as mock_delete_group_rolemapping: + yield ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -84,18 +107,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -108,61 +136,57 @@ def test_map_clientrole_to_group_with_name(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'realm': 'realm-name', - 'state': 'present', - 'client_id': 'test_client', - 'group_name': 'test_group', - 'parents': [ - { - 'name': 'parent_group' - } - ], - 'roles': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "realm": "realm-name", + "state": "present", + "client_id": "test_client", + "group_name": "test_group", + "parents": [{"name": "parent_group"}], + "roles": [ { - 'name': 'test_role1', + "name": "test_role1", }, { - 'name': 'test_role1', + "name": "test_role1", }, ], } - return_value_get_group_by_name = [{ - "access": { - "manage": "true", - "manageMembership": "true", - "view": "true" - }, - "attributes": "{}", - "clientRoles": "{}", - "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", - "name": "test_group", - "path": "/parent_group/test_group", - "realmRoles": "[]", - "subGroups": "[]" - }] - return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" - return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" - return_value_get_client_group_available_rolemappings = [[ - { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" - }, + return_value_get_group_by_name = [ { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" + "access": {"manage": "true", "manageMembership": "true", "view": "true"}, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/parent_group/test_group", + "realmRoles": "[]", + "subGroups": "[]", } - ]] + ] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_group_available_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2", + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1", + }, + ] + ] return_value_get_client_group_composite_rolemappings = [ None, [ @@ -171,16 +195,16 @@ def test_map_clientrole_to_group_with_name(self): "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" + "name": "test_role2", }, { "clientRole": "true", "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" - } - ] + "name": "test_role1", + }, + ], ] changed = True @@ -189,13 +213,22 @@ def test_map_clientrole_to_group_with_name(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): + with patch_keycloak_api( + get_group_by_name=return_value_get_group_by_name, + get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings, + ) as ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -208,63 +241,63 @@ def test_map_clientrole_to_group_with_name(self): self.assertEqual(mock_delete_group_rolemapping.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_map_clientrole_to_group_with_name_idempotency(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'realm': 'realm-name', - 'state': 'present', - 'client_id': 'test_client', - 'group_name': 'test_group', - 'roles': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "realm": "realm-name", + "state": "present", + "client_id": "test_client", + "group_name": "test_group", + "roles": [ { - 'name': 'test_role1', + "name": "test_role1", }, { - 'name': 'test_role1', + "name": "test_role1", }, ], } - return_value_get_group_by_name = [{ - "access": { - "manage": "true", - "manageMembership": "true", - "view": "true" - }, - "attributes": "{}", - "clientRoles": "{}", - "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", - "name": "test_group", - "path": "/test_group", - "realmRoles": "[]", - "subGroups": "[]" - }] + return_value_get_group_by_name = [ + { + "access": {"manage": "true", "manageMembership": "true", "view": "true"}, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]", + } + ] return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" return_value_get_client_group_available_rolemappings = [[]] - return_value_get_client_group_composite_rolemappings = [[ - { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" - }, - { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" - } - ]] + return_value_get_client_group_composite_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2", + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1", + }, + ] + ] changed = False @@ -272,13 +305,22 @@ def test_map_clientrole_to_group_with_name_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): + with patch_keycloak_api( + get_group_by_name=return_value_get_group_by_name, + get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings, + ) as ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -291,62 +333,62 @@ def test_map_clientrole_to_group_with_name_idempotency(self): self.assertEqual(mock_delete_group_rolemapping.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_map_clientrole_to_group_with_id(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'realm': 'realm-name', - 'state': 'present', - 'cid': 'c0f8490c-b224-4737-a567-20223e4c1727', - 'gid': '92f2400e-0ecb-4185-8950-12dcef616c2b', - 'roles': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "realm": "realm-name", + "state": "present", + "cid": "c0f8490c-b224-4737-a567-20223e4c1727", + "gid": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "roles": [ { - 'name': 'test_role1', + "name": "test_role1", }, { - 'name': 'test_role1', + "name": "test_role1", }, ], } - return_value_get_group_by_name = [{ - "access": { - "manage": "true", - "manageMembership": "true", - "view": "true" - }, - "attributes": "{}", - "clientRoles": "{}", - "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", - "name": "test_group", - "path": "/test_group", - "realmRoles": "[]", - "subGroups": "[]" - }] - return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" - return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" - return_value_get_client_group_available_rolemappings = [[ - { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" - }, + return_value_get_group_by_name = [ { - "clientRole": "true", - "composite": "false", - "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", - "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" + "access": {"manage": "true", "manageMembership": "true", "view": "true"}, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]", } - ]] + ] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_group_available_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2", + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1", + }, + ] + ] return_value_get_client_group_composite_rolemappings = [ None, [ @@ -355,16 +397,16 @@ def test_map_clientrole_to_group_with_id(self): "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" + "name": "test_role2", }, { "clientRole": "true", "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" - } - ] + "name": "test_role1", + }, + ], ] changed = True @@ -373,13 +415,22 @@ def test_map_clientrole_to_group_with_id(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): + with patch_keycloak_api( + get_group_by_name=return_value_get_group_by_name, + get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings, + ) as ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -392,44 +443,42 @@ def test_map_clientrole_to_group_with_id(self): self.assertEqual(mock_delete_group_rolemapping.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_remove_clientrole_from_group(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'realm': 'realm-name', - 'state': 'absent', - 'client_id': 'test_client', - 'group_name': 'test_group', - 'roles': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "realm": "realm-name", + "state": "absent", + "client_id": "test_client", + "group_name": "test_group", + "roles": [ { - 'name': 'test_role1', + "name": "test_role1", }, { - 'name': 'test_role1', + "name": "test_role1", }, ], } - return_value_get_group_by_name = [{ - "access": { - "manage": "true", - "manageMembership": "true", - "view": "true" - }, - "attributes": "{}", - "clientRoles": "{}", - "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", - "name": "test_group", - "path": "/test_group", - "realmRoles": "[]", - "subGroups": "[]" - }] + return_value_get_group_by_name = [ + { + "access": {"manage": "true", "manageMembership": "true", "view": "true"}, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]", + } + ] return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" return_value_get_client_group_available_rolemappings = [[]] @@ -440,17 +489,17 @@ def test_remove_clientrole_from_group(self): "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" + "name": "test_role2", }, { "clientRole": "true", "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" - } + "name": "test_role1", + }, ], - [] + [], ] changed = True @@ -459,13 +508,22 @@ def test_remove_clientrole_from_group(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): + with patch_keycloak_api( + get_group_by_name=return_value_get_group_by_name, + get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings, + ) as ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -478,44 +536,42 @@ def test_remove_clientrole_from_group(self): self.assertEqual(mock_delete_group_rolemapping.call_count, 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_remove_clientrole_from_group_idempotency(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'realm': 'realm-name', - 'state': 'absent', - 'client_id': 'test_client', - 'group_name': 'test_group', - 'roles': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "realm": "realm-name", + "state": "absent", + "client_id": "test_client", + "group_name": "test_group", + "roles": [ { - 'name': 'test_role1', + "name": "test_role1", }, { - 'name': 'test_role1', + "name": "test_role1", }, ], } - return_value_get_group_by_name = [{ - "access": { - "manage": "true", - "manageMembership": "true", - "view": "true" - }, - "attributes": "{}", - "clientRoles": "{}", - "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", - "name": "test_group", - "path": "/test_group", - "realmRoles": "[]", - "subGroups": "[]" - }] + return_value_get_group_by_name = [ + { + "access": {"manage": "true", "manageMembership": "true", "view": "true"}, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]", + } + ] return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" return_value_get_client_group_available_rolemappings = [ @@ -525,15 +581,15 @@ def test_remove_clientrole_from_group_idempotency(self): "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", - "name": "test_role2" + "name": "test_role2", }, { "clientRole": "true", "composite": "false", "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", - "name": "test_role1" - } + "name": "test_role1", + }, ] ] return_value_get_client_group_composite_rolemappings = [[]] @@ -544,13 +600,22 @@ def test_remove_clientrole_from_group_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, - get_client_role_id_by_name=return_value_get_client_role_id_by_name, - get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, - get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \ - as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, - mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings, - mock_delete_group_rolemapping): + with patch_keycloak_api( + get_group_by_name=return_value_get_group_by_name, + get_client_id=return_value_get_client_id, + get_client_role_id_by_name=return_value_get_client_role_id_by_name, + get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings, + get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings, + ) as ( + mock_get_group_by_name, + mock_get_client_id, + mock_get_client_role_id_by_name, + mock_add_group_rolemapping, + mock_get_client_group_rolemapping_by_id, + mock_get_client_group_available_rolemappings, + mock_get_client_group_composite_rolemappings, + mock_delete_group_rolemapping, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -563,8 +628,8 @@ def test_remove_clientrole_from_group_idempotency(self): self.assertEqual(mock_delete_group_rolemapping.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_clientscope.py b/tests/unit/plugins/modules/test_keycloak_clientscope.py index afcb0397a63..e45b3b67f9d 100644 --- a/tests/unit/plugins/modules/test_keycloak_clientscope.py +++ b/tests/unit/plugins/modules/test_keycloak_clientscope.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_clientscope @@ -18,10 +21,16 @@ @contextmanager -def patch_keycloak_api(get_clientscope_by_name=None, get_clientscope_by_clientscopeid=None, create_clientscope=None, - update_clientscope=None, get_clientscope_protocolmapper_by_name=None, - update_clientscope_protocolmappers=None, create_clientscope_protocolmapper=None, - delete_clientscope=None): +def patch_keycloak_api( + get_clientscope_by_name=None, + get_clientscope_by_clientscopeid=None, + create_clientscope=None, + update_clientscope=None, + get_clientscope_protocolmapper_by_name=None, + update_clientscope_protocolmappers=None, + create_clientscope_protocolmapper=None, + delete_clientscope=None, +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -42,47 +51,57 @@ def patch_keycloak_api(get_clientscope_by_name=None, get_clientscope_by_clientsc """ obj = keycloak_clientscope.KeycloakAPI - with patch.object(obj, 'get_clientscope_by_name', side_effect=get_clientscope_by_name) \ - as mock_get_clientscope_by_name: - with patch.object(obj, 'get_clientscope_by_clientscopeid', side_effect=get_clientscope_by_clientscopeid) \ - as mock_get_clientscope_by_clientscopeid: - with patch.object(obj, 'create_clientscope', side_effect=create_clientscope) \ - as mock_create_clientscope: - with patch.object(obj, 'update_clientscope', return_value=update_clientscope) \ - as mock_update_clientscope: - with patch.object(obj, 'get_clientscope_protocolmapper_by_name', - side_effect=get_clientscope_protocolmapper_by_name) \ - as mock_get_clientscope_protocolmapper_by_name: - with patch.object(obj, 'update_clientscope_protocolmappers', - side_effect=update_clientscope_protocolmappers) \ - as mock_update_clientscope_protocolmappers: - with patch.object(obj, 'create_clientscope_protocolmapper', - side_effect=create_clientscope_protocolmapper) \ - as mock_create_clientscope_protocolmapper: - with patch.object(obj, 'delete_clientscope', side_effect=delete_clientscope) \ - as mock_delete_clientscope: - yield mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, \ - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, mock_update_clientscope_protocolmappers, \ - mock_create_clientscope_protocolmapper, mock_delete_clientscope + with patch.object( + obj, "get_clientscope_by_name", side_effect=get_clientscope_by_name + ) as mock_get_clientscope_by_name: + with patch.object( + obj, "get_clientscope_by_clientscopeid", side_effect=get_clientscope_by_clientscopeid + ) as mock_get_clientscope_by_clientscopeid: + with patch.object(obj, "create_clientscope", side_effect=create_clientscope) as mock_create_clientscope: + with patch.object( + obj, "update_clientscope", return_value=update_clientscope + ) as mock_update_clientscope: + with patch.object( + obj, + "get_clientscope_protocolmapper_by_name", + side_effect=get_clientscope_protocolmapper_by_name, + ) as mock_get_clientscope_protocolmapper_by_name: + with patch.object( + obj, "update_clientscope_protocolmappers", side_effect=update_clientscope_protocolmappers + ) as mock_update_clientscope_protocolmappers: + with patch.object( + obj, "create_clientscope_protocolmapper", side_effect=create_clientscope_protocolmapper + ) as mock_create_clientscope_protocolmapper: + with patch.object( + obj, "delete_clientscope", side_effect=delete_clientscope + ) as mock_delete_clientscope: + yield ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) @@ -102,12 +121,14 @@ def _create_wrapper(): def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( - '{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -120,21 +141,18 @@ def test_create_clientscope(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'present', - 'name': 'my-new-kc-clientscope' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "present", + "name": "my-new-kc-clientscope", } return_value_get_clientscope_by_name = [ None, - { - "attributes": {}, - "id": "73fec1d2-f032-410c-8177-583104d01305", - "name": "my-new-kc-clientscope" - }] + {"attributes": {}, "id": "73fec1d2-f032-410c-8177-583104d01305", "name": "my-new-kc-clientscope"}, + ] changed = True @@ -142,11 +160,16 @@ def test_create_clientscope(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -161,25 +184,23 @@ def test_create_clientscope(self): self.assertEqual(mock_delete_clientscope.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_clientscope_idempotency(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'present', - 'name': 'my-new-kc-clientscope' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "present", + "name": "my-new-kc-clientscope", } - return_value_get_clientscope_by_name = [{ - "attributes": {}, - "id": "73fec1d2-f032-410c-8177-583104d01305", - "name": "my-new-kc-clientscope" - }] + return_value_get_clientscope_by_name = [ + {"attributes": {}, "id": "73fec1d2-f032-410c-8177-583104d01305", "name": "my-new-kc-clientscope"} + ] changed = False @@ -187,11 +208,16 @@ def test_create_clientscope_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -206,25 +232,23 @@ def test_create_clientscope_idempotency(self): self.assertEqual(mock_delete_clientscope.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_clientscope(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'absent', - 'name': 'my-new-kc-clientscope' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "absent", + "name": "my-new-kc-clientscope", } - return_value_get_clientscope_by_name = [{ - "attributes": {}, - "id": "73fec1d2-f032-410c-8177-583104d01305", - "name": "my-new-kc-clientscope" - }] + return_value_get_clientscope_by_name = [ + {"attributes": {}, "id": "73fec1d2-f032-410c-8177-583104d01305", "name": "my-new-kc-clientscope"} + ] changed = True @@ -232,11 +256,16 @@ def test_delete_clientscope(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -251,19 +280,19 @@ def test_delete_clientscope(self): self.assertEqual(mock_delete_clientscope.call_count, 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_clientscope_idempotency(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'absent', - 'name': 'my-new-kc-clientscope' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "absent", + "name": "my-new-kc-clientscope", } return_value_get_clientscope_by_name = [None] @@ -273,11 +302,16 @@ def test_delete_clientscope_idempotency(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -292,57 +326,57 @@ def test_delete_clientscope_idempotency(self): self.assertEqual(mock_delete_clientscope.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_clientscope_with_protocolmappers(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'present', - 'name': 'my-new-kc-clientscope', - 'protocolMappers': [ + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "present", + "name": "my-new-kc-clientscope", + "protocolMappers": [ { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'true', - 'id.token.claim': 'true', - 'access.token.claim': 'true', - 'userinfo.token.claim': 'true', - 'claim.name': 'protocol1', + "protocol": "openid-connect", + "config": { + "full.path": "true", + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true", + "claim.name": "protocol1", }, - 'name': 'protocol1', - 'protocolMapper': 'oidc-group-membership-mapper', + "name": "protocol1", + "protocolMapper": "oidc-group-membership-mapper", }, { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'false', - 'id.token.claim': 'false', - 'access.token.claim': 'false', - 'userinfo.token.claim': 'false', - 'claim.name': 'protocol2', + "protocol": "openid-connect", + "config": { + "full.path": "false", + "id.token.claim": "false", + "access.token.claim": "false", + "userinfo.token.claim": "false", + "claim.name": "protocol2", }, - 'name': 'protocol2', - 'protocolMapper': 'oidc-group-membership-mapper', + "name": "protocol2", + "protocolMapper": "oidc-group-membership-mapper", }, { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'true', - 'id.token.claim': 'false', - 'access.token.claim': 'true', - 'userinfo.token.claim': 'false', - 'claim.name': 'protocol3', + "protocol": "openid-connect", + "config": { + "full.path": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "userinfo.token.claim": "false", + "claim.name": "protocol3", }, - 'name': 'protocol3', - 'protocolMapper': 'oidc-group-membership-mapper', + "name": "protocol3", + "protocolMapper": "oidc-group-membership-mapper", }, - ] + ], } return_value_get_clientscope_by_name = [ None, @@ -357,13 +391,13 @@ def test_create_clientscope_with_protocolmappers(self): "claim.name": "protocol2", "full.path": "false", "id.token.claim": "false", - "userinfo.token.claim": "false" + "userinfo.token.claim": "false", }, "consentRequired": "false", "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", "name": "protocol2", "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" + "protocolMapper": "oidc-group-membership-mapper", }, { "config": { @@ -371,13 +405,13 @@ def test_create_clientscope_with_protocolmappers(self): "claim.name": "protocol3", "full.path": "true", "id.token.claim": "false", - "userinfo.token.claim": "false" + "userinfo.token.claim": "false", }, "consentRequired": "false", "id": "2103a559-185a-40f4-84ae-9ab311d5b812", "name": "protocol3", "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" + "protocolMapper": "oidc-group-membership-mapper", }, { "config": { @@ -385,15 +419,17 @@ def test_create_clientscope_with_protocolmappers(self): "claim.name": "protocol1", "full.path": "true", "id.token.claim": "true", - "userinfo.token.claim": "true" + "userinfo.token.claim": "true", }, "consentRequired": "false", "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", "name": "protocol1", "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" - }] - }] + "protocolMapper": "oidc-group-membership-mapper", + }, + ], + }, + ] changed = True @@ -401,11 +437,16 @@ def test_create_clientscope_with_protocolmappers(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -420,156 +461,160 @@ def test_create_clientscope_with_protocolmappers(self): self.assertEqual(mock_delete_clientscope.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_update_clientscope_with_protocolmappers(self): """Add a new authentication flow from copy of an other flow""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'realm': 'realm-name', - 'state': 'present', - 'name': 'my-new-kc-clientscope', - 'protocolMappers': [ - { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'false', - 'id.token.claim': 'false', - 'access.token.claim': 'false', - 'userinfo.token.claim': 'false', - 'claim.name': 'protocol1_updated', - }, - 'name': 'protocol1', - 'protocolMapper': 'oidc-group-membership-mapper', - }, - { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'true', - 'id.token.claim': 'false', - 'access.token.claim': 'false', - 'userinfo.token.claim': 'false', - 'claim.name': 'protocol2_updated', - }, - 'name': 'protocol2', - 'protocolMapper': 'oidc-group-membership-mapper', - }, - { - 'protocol': 'openid-connect', - 'config': { - 'full.path': 'true', - 'id.token.claim': 'true', - 'access.token.claim': 'true', - 'userinfo.token.claim': 'true', - 'claim.name': 'protocol3_updated', - }, - 'name': 'protocol3', - 'protocolMapper': 'oidc-group-membership-mapper', - }, - ] - } - return_value_get_clientscope_by_name = [{ - "attributes": {}, - "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_username": "admin", + "auth_password": "admin", + "auth_realm": "master", + "realm": "realm-name", + "state": "present", "name": "my-new-kc-clientscope", "protocolMappers": [ { + "protocol": "openid-connect", "config": { - "access.token.claim": "true", - "claim.name": "groups", - "full.path": "true", - "id.token.claim": "true", - "userinfo.token.claim": "true" + "full.path": "false", + "id.token.claim": "false", + "access.token.claim": "false", + "userinfo.token.claim": "false", + "claim.name": "protocol1_updated", }, - "consentRequired": "false", - "id": "e077007a-367a-444f-91ef-70277a1d868d", - "name": "groups", - "protocol": "saml", - "protocolMapper": "oidc-group-membership-mapper" + "name": "protocol1", + "protocolMapper": "oidc-group-membership-mapper", }, { - "config": { - "access.token.claim": "true", - "claim.name": "groups", - "full.path": "true", - "id.token.claim": "true", - "userinfo.token.claim": "true" - }, - "consentRequired": "false", - "id": "06c518aa-c627-43cc-9a82-d8467b508d34", - "name": "groups", "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" - }, - { - "config": { - "access.token.claim": "true", - "claim.name": "groups", - "full.path": "true", - "id.token.claim": "true", - "userinfo.token.claim": "true" - }, - "consentRequired": "false", - "id": "1d03c557-d97e-40f4-ac35-6cecd74ea70d", - "name": "groups", - "protocol": "wsfed", - "protocolMapper": "oidc-group-membership-mapper" - } - ] - }] - return_value_get_clientscope_by_clientscopeid = [{ - "attributes": {}, - "id": "2286032f-451e-44d5-8be6-e45aac7983a1", - "name": "my-new-kc-clientscope", - "protocolMappers": [ - { "config": { - "access.token.claim": "true", - "claim.name": "protocol1_updated", "full.path": "true", "id.token.claim": "false", - "userinfo.token.claim": "false" + "access.token.claim": "false", + "userinfo.token.claim": "false", + "claim.name": "protocol2_updated", }, - "consentRequired": "false", - "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", "name": "protocol2", - "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" + "protocolMapper": "oidc-group-membership-mapper", }, { + "protocol": "openid-connect", "config": { - "access.token.claim": "true", - "claim.name": "protocol1_updated", "full.path": "true", - "id.token.claim": "false", - "userinfo.token.claim": "false" + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true", + "claim.name": "protocol3_updated", }, - "consentRequired": "false", - "id": "2103a559-185a-40f4-84ae-9ab311d5b812", "name": "protocol3", - "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" + "protocolMapper": "oidc-group-membership-mapper", }, - { - "config": { - "access.token.claim": "false", - "claim.name": "protocol1_updated", - "full.path": "false", - "id.token.claim": "false", - "userinfo.token.claim": "false" + ], + } + return_value_get_clientscope_by_name = [ + { + "attributes": {}, + "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true", + }, + "consentRequired": "false", + "id": "e077007a-367a-444f-91ef-70277a1d868d", + "name": "groups", + "protocol": "saml", + "protocolMapper": "oidc-group-membership-mapper", }, - "consentRequired": "false", - "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", - "name": "protocol1", - "protocol": "openid-connect", - "protocolMapper": "oidc-group-membership-mapper" - } - ] - }] + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true", + }, + "consentRequired": "false", + "id": "06c518aa-c627-43cc-9a82-d8467b508d34", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true", + }, + "consentRequired": "false", + "id": "1d03c557-d97e-40f4-ac35-6cecd74ea70d", + "name": "groups", + "protocol": "wsfed", + "protocolMapper": "oidc-group-membership-mapper", + }, + ], + } + ] + return_value_get_clientscope_by_clientscopeid = [ + { + "attributes": {}, + "id": "2286032f-451e-44d5-8be6-e45aac7983a1", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false", + }, + "consentRequired": "false", + "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", + "name": "protocol2", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false", + }, + "consentRequired": "false", + "id": "2103a559-185a-40f4-84ae-9ab311d5b812", + "name": "protocol3", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + }, + { + "config": { + "access.token.claim": "false", + "claim.name": "protocol1_updated", + "full.path": "false", + "id.token.claim": "false", + "userinfo.token.claim": "false", + }, + "consentRequired": "false", + "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", + "name": "protocol1", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + }, + ], + } + ] changed = True @@ -577,12 +622,19 @@ def test_update_clientscope_with_protocolmappers(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name, - get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \ - as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, - mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, - mock_update_clientscope_protocolmappers, - mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with patch_keycloak_api( + get_clientscope_by_name=return_value_get_clientscope_by_name, + get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid, + ) as ( + mock_get_clientscope_by_name, + mock_get_clientscope_by_clientscopeid, + mock_create_clientscope, + mock_update_clientscope, + mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, + mock_delete_clientscope, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -597,8 +649,8 @@ def test_update_clientscope_with_protocolmappers(self): self.assertEqual(mock_delete_clientscope.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_component.py b/tests/unit/plugins/modules/test_keycloak_component.py index bec11da2e03..fd8f2e7ccd7 100644 --- a/tests/unit/plugins/modules/test_keycloak_component.py +++ b/tests/unit/plugins/modules/test_keycloak_component.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -12,25 +11,24 @@ from unittest.mock import patch from ansible_collections.community.general.plugins.modules import keycloak_realm_key -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_component @contextmanager def patch_keycloak_api(get_components=None, create_component=None, update_component=None, delete_component=None): - """Mock context manager for patching the methods in KeycloakAPI - """ + """Mock context manager for patching the methods in KeycloakAPI""" obj = keycloak_realm_key.KeycloakAPI - with patch.object(obj, 'get_components', side_effect=get_components) \ - as mock_get_components: - with patch.object(obj, 'create_component', side_effect=create_component) \ - as mock_create_component: - with patch.object(obj, 'update_component', side_effect=update_component) \ - as mock_update_component: - with patch.object(obj, 'delete_component', side_effect=delete_component) \ - as mock_delete_component: + with patch.object(obj, "get_components", side_effect=get_components) as mock_get_components: + with patch.object(obj, "create_component", side_effect=create_component) as mock_create_component: + with patch.object(obj, "update_component", side_effect=update_component) as mock_update_component: + with patch.object(obj, "delete_component", side_effect=delete_component) as mock_delete_component: yield mock_get_components, mock_create_component, mock_update_component, mock_delete_component @@ -38,19 +36,17 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) @@ -70,11 +66,14 @@ def _create_wrapper(): def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -87,18 +86,18 @@ def test_create_when_absent(self): """Add a new realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'test-user-provider', - 'state': 'present', - 'provider_id': 'my-provider', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'enabled': True, - 'my_custom_config': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "test-user-provider", + "state": "present", + "provider_id": "my-provider", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "enabled": True, + "my_custom_config": "foo", }, } return_value_component_create = [ @@ -111,24 +110,21 @@ def test_create_when_absent(self): "myCustomConfig": [ "foo", ], - "enabled": [ - "true" - ], - } + "enabled": ["true"], + }, } ] # get before_comp, get default_mapper, get after_mapper - return_value_components_get = [ - [], [], [] - ] + return_value_components_get = [[], [], []] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, create_component=return_value_component_create + ) as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -138,35 +134,38 @@ def test_create_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # must not contain parent_id - mock_create_component.assert_called_once_with({ - 'name': 'test-user-provider', - 'providerId': 'my-provider', - 'providerType': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'enabled': ['true'], - 'myCustomConfig': ['foo'], + mock_create_component.assert_called_once_with( + { + "name": "test-user-provider", + "providerId": "my-provider", + "providerType": "org.keycloak.storage.UserStorageProvider", + "config": { + "enabled": ["true"], + "myCustomConfig": ["foo"], + }, }, - }, 'realm-name') + "realm-name", + ) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present(self): """Update existing realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'test-user-provider', - 'state': 'present', - 'provider_id': 'my-provider', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'enabled': True, - 'my_custom_config': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "test-user-provider", + "state": "present", + "provider_id": "my-provider", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "enabled": True, + "my_custom_config": "foo", }, } return_value_components_get = [ @@ -181,27 +180,23 @@ def test_create_when_present(self): "myCustomConfig": [ "foo", ], - "enabled": [ - "true" - ], - } + "enabled": ["true"], + }, }, ], [], - [] - ] - return_value_component_update = [ - None + [], ] + return_value_component_update = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, update_component=return_value_component_update + ) as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -211,37 +206,39 @@ def test_create_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'test-user-provider', - 'state': 'absent', - 'provider_id': 'my-provider', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'enabled': True, - 'my_custom_config': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "test-user-provider", + "state": "absent", + "provider_id": "my-provider", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "enabled": True, + "my_custom_config": "foo", }, } - return_value_components_get = [ - [] - ] + return_value_components_get = [[]] changed = False # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api(get_components=return_value_components_get) as ( + mock_get_components, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -251,24 +248,24 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove an existing realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'test-user-provider', - 'state': 'absent', - 'provider_id': 'my-provider', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'enabled': True, - 'my_custom_config': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "test-user-provider", + "state": "absent", + "provider_id": "my-provider", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "enabled": True, + "my_custom_config": "foo", }, } @@ -284,26 +281,23 @@ def test_delete_when_present(self): "myCustomConfig": [ "foo", ], - "enabled": [ - "true" - ], - } + "enabled": ["true"], + }, }, ], [], - [] - ] - return_value_component_delete = [ - None + [], ] + return_value_component_delete = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, delete_component=return_value_component_delete + ) as (mock_get_components, mock_create_component, mock_update_component, mock_delete_component): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -313,8 +307,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/test_keycloak_identity_provider.py index 477cb565b0e..db751f981ba 100644 --- a/tests/unit/plugins/modules/test_keycloak_identity_provider.py +++ b/tests/unit/plugins/modules/test_keycloak_identity_provider.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ import unittest from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_identity_provider @@ -18,9 +21,17 @@ @contextmanager -def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None, - get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None, - delete_identity_provider_mapper=None, get_realm_by_id=None): +def patch_keycloak_api( + get_identity_provider, + create_identity_provider=None, + update_identity_provider=None, + delete_identity_provider=None, + get_identity_provider_mappers=None, + create_identity_provider_mapper=None, + update_identity_provider_mapper=None, + delete_identity_provider_mapper=None, + get_realm_by_id=None, +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -36,48 +47,62 @@ def patch_keycloak_api(get_identity_provider, create_identity_provider=None, upd """ obj = keycloak_identity_provider.KeycloakAPI - with patch.object(obj, 'get_identity_provider', side_effect=get_identity_provider) \ - as mock_get_identity_provider: - with patch.object(obj, 'create_identity_provider', side_effect=create_identity_provider) \ - as mock_create_identity_provider: - with patch.object(obj, 'update_identity_provider', side_effect=update_identity_provider) \ - as mock_update_identity_provider: - with patch.object(obj, 'delete_identity_provider', side_effect=delete_identity_provider) \ - as mock_delete_identity_provider: - with patch.object(obj, 'get_identity_provider_mappers', side_effect=get_identity_provider_mappers) \ - as mock_get_identity_provider_mappers: - with patch.object(obj, 'create_identity_provider_mapper', side_effect=create_identity_provider_mapper) \ - as mock_create_identity_provider_mapper: - with patch.object(obj, 'update_identity_provider_mapper', side_effect=update_identity_provider_mapper) \ - as mock_update_identity_provider_mapper: - with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \ - as mock_delete_identity_provider_mapper: - with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) \ - as mock_get_realm_by_id: - yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ - mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ - mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper, mock_get_realm_by_id + with patch.object(obj, "get_identity_provider", side_effect=get_identity_provider) as mock_get_identity_provider: + with patch.object( + obj, "create_identity_provider", side_effect=create_identity_provider + ) as mock_create_identity_provider: + with patch.object( + obj, "update_identity_provider", side_effect=update_identity_provider + ) as mock_update_identity_provider: + with patch.object( + obj, "delete_identity_provider", side_effect=delete_identity_provider + ) as mock_delete_identity_provider: + with patch.object( + obj, "get_identity_provider_mappers", side_effect=get_identity_provider_mappers + ) as mock_get_identity_provider_mappers: + with patch.object( + obj, "create_identity_provider_mapper", side_effect=create_identity_provider_mapper + ) as mock_create_identity_provider_mapper: + with patch.object( + obj, "update_identity_provider_mapper", side_effect=update_identity_provider_mapper + ) as mock_update_identity_provider_mapper: + with patch.object( + obj, "delete_identity_provider_mapper", side_effect=delete_identity_provider_mapper + ) as mock_delete_identity_provider_mapper: + with patch.object( + obj, "get_realm_by_id", side_effect=get_realm_by_id + ) as mock_get_realm_by_id: + yield ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -85,18 +110,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -109,46 +139,49 @@ def test_create_when_absent(self): """Add a new identity provider""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'alias': 'oidc-idp', - 'display_name': 'OpenID Connect IdP', - 'enabled': True, - 'provider_id': 'oidc', - 'config': { - 'issuer': 'https://idp.example.com', - 'authorizationUrl': 'https://idp.example.com/auth', - 'tokenUrl': 'https://idp.example.com/token', - 'userInfoUrl': 'https://idp.example.com/userinfo', - 'clientAuthMethod': 'client_secret_post', - 'clientId': 'my-client', - 'clientSecret': 'secret', - 'syncMode': "FORCE", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "alias": "oidc-idp", + "display_name": "OpenID Connect IdP", + "enabled": True, + "provider_id": "oidc", + "config": { + "issuer": "https://idp.example.com", + "authorizationUrl": "https://idp.example.com/auth", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "syncMode": "FORCE", }, - 'mappers': [{ - 'name': "first_name", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "first_name", - 'user.attribute': "first_name", - 'syncMode': "INHERIT", - } - }, { - 'name': "last_name", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "last_name", - 'user.attribute': "last_name", - 'syncMode': "INHERIT", - } - }] + "mappers": [ + { + "name": "first_name", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "first_name", + "user.attribute": "first_name", + "syncMode": "INHERIT", + }, + }, + { + "name": "last_name", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "last_name", + "user.attribute": "last_name", + "syncMode": "INHERIT", + }, + }, + ], } return_value_idp_get = [ None, @@ -164,7 +197,7 @@ def test_create_when_absent(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -174,37 +207,32 @@ def test_create_when_absent(self): "providerId": "oidc", "storeToken": False, "trustEmail": False, - } + }, ] return_value_mappers_get = [ - [{ - "config": { - "claim": "first_name", - "syncMode": "INHERIT", - "user.attribute": "first_name" + [ + { + "config": {"claim": "first_name", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }, { - "config": { - "claim": "last_name", - "syncMode": "INHERIT", - "user.attribute": "last_name" + { + "config": {"claim": "last_name", "syncMode": "INHERIT", "user.attribute": "last_name"}, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name", }, - "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "last_name" - }] + ] ] return_value_realm_get = [ { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'identityProviders': [ + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "identityProviders": [ { "addReadTokenRoleOnCreate": False, "alias": "oidc-idp", @@ -217,7 +245,7 @@ def test_create_when_absent(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -228,7 +256,7 @@ def test_create_when_absent(self): "storeToken": False, "trustEmail": False, } - ] + ], }, ] return_value_idp_created = [None] @@ -239,12 +267,23 @@ def test_create_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created, - get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with patch_keycloak_api( + get_identity_provider=return_value_idp_get, + get_identity_provider_mappers=return_value_mappers_get, + create_identity_provider=return_value_idp_created, + create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get, + ) as ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -255,61 +294,65 @@ def test_create_when_absent(self): self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_update_when_present(self): """Update existing identity provider""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'alias': 'oidc-idp', - 'display_name': 'OpenID Connect IdP', - 'enabled': True, - 'provider_id': 'oidc', - 'config': { - 'issuer': 'https://idp.example.com', - 'authorizationUrl': 'https://idp.example.com/auth', - 'tokenUrl': 'https://idp.example.com/token', - 'userInfoUrl': 'https://idp.example.com/userinfo', - 'clientAuthMethod': 'client_secret_post', - 'clientId': 'my-client', - 'clientSecret': 'secret', - 'syncMode': "FORCE" + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "alias": "oidc-idp", + "display_name": "OpenID Connect IdP", + "enabled": True, + "provider_id": "oidc", + "config": { + "issuer": "https://idp.example.com", + "authorizationUrl": "https://idp.example.com/auth", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "syncMode": "FORCE", }, - 'mappers': [{ - 'name': "username", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "username", - 'user.attribute': "username", - 'syncMode': "INHERIT", - } - }, { - 'name': "first_name", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "first_name", - 'user.attribute': "first_name", - 'syncMode': "INHERIT", - } - }, { - 'name': "last_name", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "last_name", - 'user.attribute': "last_name", - 'syncMode': "INHERIT", - } - }] + "mappers": [ + { + "name": "username", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "username", + "user.attribute": "username", + "syncMode": "INHERIT", + }, + }, + { + "name": "first_name", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "first_name", + "user.attribute": "first_name", + "syncMode": "INHERIT", + }, + }, + { + "name": "last_name", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "last_name", + "user.attribute": "last_name", + "syncMode": "INHERIT", + }, + }, + ], } return_value_idp_get = [ { @@ -324,7 +367,7 @@ def test_update_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP changeme", "enabled": True, @@ -347,7 +390,7 @@ def test_update_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -357,131 +400,103 @@ def test_update_when_present(self): "providerId": "oidc", "storeToken": False, "trustEmail": False, - } + }, ] return_value_mappers_get = [ - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }, { - "config": { - "claim": "first_name_changeme", - "syncMode": "INHERIT", - "user.attribute": "first_name" + { + "config": {"claim": "first_name_changeme", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }], - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" + ], + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }, { - "config": { - "claim": "first_name_changeme", - "syncMode": "INHERIT", - "user.attribute": "first_name" + { + "config": {"claim": "first_name_changeme", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }], - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" + ], + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }, { - "config": { - "claim": "first_name_changeme", - "syncMode": "INHERIT", - "user.attribute": "first_name" + { + "config": {"claim": "first_name_changeme", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }], - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" + ], + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }, { - "config": { - "claim": "first_name_changeme", - "syncMode": "INHERIT", - "user.attribute": "first_name" + { + "config": {"claim": "first_name_changeme", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }], - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" + ], + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }, { - "config": { - "claim": "first_name", - "syncMode": "INHERIT", - "user.attribute": "first_name" + { + "config": {"claim": "first_name", "syncMode": "INHERIT", "user.attribute": "first_name"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name", }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "first_name" - }, { - "config": { - "claim": "last_name", - "syncMode": "INHERIT", - "user.attribute": "last_name" + { + "config": {"claim": "last_name", "syncMode": "INHERIT", "user.attribute": "last_name"}, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name", }, - "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "last_name" - }] + ], ] return_value_realm_get = [ { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'identityProviders': [ + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "identityProviders": [ { "addReadTokenRoleOnCreate": False, "alias": "oidc-idp", @@ -494,7 +509,7 @@ def test_update_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -505,13 +520,13 @@ def test_update_when_present(self): "storeToken": False, "trustEmail": False, } - ] + ], }, { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'identityProviders': [ + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "identityProviders": [ { "addReadTokenRoleOnCreate": False, "alias": "oidc-idp", @@ -524,7 +539,7 @@ def test_update_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -535,7 +550,7 @@ def test_update_when_present(self): "storeToken": False, "trustEmail": False, } - ] + ], }, ] return_value_idp_updated = [None] @@ -547,12 +562,24 @@ def test_update_when_present(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with patch_keycloak_api( + get_identity_provider=return_value_idp_get, + get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, + update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get, + ) as ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -564,17 +591,17 @@ def test_update_when_present(self): self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_no_change_when_present(self): """Update existing identity provider""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", "addReadTokenRoleOnCreate": False, "alias": "oidc-idp", "authenticateByDefault": False, @@ -586,7 +613,7 @@ def test_no_change_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP changeme", "enabled": True, @@ -595,16 +622,18 @@ def test_no_change_when_present(self): "providerId": "oidc", "storeToken": False, "trustEmail": False, - 'mappers': [{ - 'name': "username", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'config': { - 'claim': "username", - 'user.attribute': "username", - 'syncMode': "INHERIT", + "mappers": [ + { + "name": "username", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "config": { + "claim": "username", + "user.attribute": "username", + "syncMode": "INHERIT", + }, } - }] + ], } return_value_idp_get = [ { @@ -619,7 +648,7 @@ def test_no_change_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP changeme", "enabled": True, @@ -632,35 +661,31 @@ def test_no_change_when_present(self): }, ] return_value_mappers_get = [ - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" - }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }], - [{ - 'config': { - 'claim': "username", - 'syncMode': "INHERIT", - 'user.attribute': "username" - }, - "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", - 'identityProviderAlias': "oidc-idp", - 'identityProviderMapper': "oidc-user-attribute-idp-mapper", - 'name': "username" - }] + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", + } + ], + [ + { + "config": {"claim": "username", "syncMode": "INHERIT", "user.attribute": "username"}, + "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "username", + } + ], ] return_value_realm_get = [ { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'identityProviders': [ + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "identityProviders": [ { "addReadTokenRoleOnCreate": False, "alias": "oidc-idp", @@ -673,7 +698,7 @@ def test_no_change_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -684,7 +709,7 @@ def test_no_change_when_present(self): "storeToken": False, "trustEmail": False, } - ] + ], } ] return_value_idp_updated = [None] @@ -696,12 +721,24 @@ def test_no_change_when_present(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, - create_identity_provider_mapper=return_value_mapper_created, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with patch_keycloak_api( + get_identity_provider=return_value_idp_get, + get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, + update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created, + get_realm_by_id=return_value_realm_get, + ) as ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -713,21 +750,21 @@ def test_no_change_when_present(self): self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent identity provider""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'alias': 'oidc-idp', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "alias": "oidc-idp", + "state": "absent", } return_value_idp_get = [None] changed = False @@ -736,10 +773,17 @@ def test_delete_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with patch_keycloak_api(get_identity_provider=return_value_idp_get) as ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -747,21 +791,21 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_identity_provider.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove an existing identity provider""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'alias': 'oidc-idp', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "alias": "oidc-idp", + "state": "absent", } return_value_idp_get = [ { @@ -776,7 +820,7 @@ def test_delete_when_present(self): "issuer": "https://idp.example.com", "syncMode": "FORCE", "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" + "userInfoUrl": "https://idp.example.com/userinfo", }, "displayName": "OpenID Connect IdP", "enabled": True, @@ -787,27 +831,25 @@ def test_delete_when_present(self): "storeToken": False, "trustEmail": False, }, - None + None, ] return_value_mappers_get = [ - [{ - "config": { - "claim": "email", - "syncMode": "INHERIT", - "user.attribute": "email" - }, - "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", - "identityProviderAlias": "oidc-idp", - "identityProviderMapper": "oidc-user-attribute-idp-mapper", - "name": "email" - }] + [ + { + "config": {"claim": "email", "syncMode": "INHERIT", "user.attribute": "email"}, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "email", + } + ] ] return_value_realm_get = [ { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'identityProviders': [ + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "identityProviders": [ { "alias": "oidc", "displayName": "", @@ -828,10 +870,10 @@ def test_delete_when_present(self): "authorizationUrl": "https://localhost:8000", "clientAuthMethod": "client_secret_post", "clientSecret": "real_secret", - "guiOrder": "0" - } + "guiOrder": "0", + }, }, - ] + ], }, ] return_value_idp_deleted = [None] @@ -841,11 +883,22 @@ def test_delete_when_present(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, - delete_identity_provider=return_value_idp_deleted, get_realm_by_id=return_value_realm_get) \ - as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, - mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, - mock_delete_identity_provider_mapper, mock_get_realm_by_id): + with patch_keycloak_api( + get_identity_provider=return_value_idp_get, + get_identity_provider_mappers=return_value_mappers_get, + delete_identity_provider=return_value_idp_deleted, + get_realm_by_id=return_value_realm_get, + ) as ( + mock_get_identity_provider, + mock_create_identity_provider, + mock_update_identity_provider, + mock_delete_identity_provider, + mock_get_identity_provider_mappers, + mock_create_identity_provider_mapper, + mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper, + mock_get_realm_by_id, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -855,8 +908,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm.py b/tests/unit/plugins/modules/test_keycloak_realm.py index 82ac628bfd2..57bbc5e3601 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm.py +++ b/tests/unit/plugins/modules/test_keycloak_realm.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_realm @@ -34,10 +37,10 @@ def patch_keycloak_api(get_realm_by_id, create_realm=None, update_realm=None, de """ obj = keycloak_realm.KeycloakAPI - with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) as mock_get_realm_by_id: - with patch.object(obj, 'create_realm', side_effect=create_realm) as mock_create_realm: - with patch.object(obj, 'update_realm', side_effect=update_realm) as mock_update_realm: - with patch.object(obj, 'delete_realm', side_effect=delete_realm) as mock_delete_realm: + with patch.object(obj, "get_realm_by_id", side_effect=get_realm_by_id) as mock_get_realm_by_id: + with patch.object(obj, "create_realm", side_effect=create_realm) as mock_create_realm: + with patch.object(obj, "update_realm", side_effect=update_realm) as mock_update_realm: + with patch.object(obj, "delete_realm", side_effect=delete_realm) as mock_delete_realm: yield mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm @@ -45,21 +48,20 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -67,18 +69,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -91,31 +98,30 @@ def test_create_when_absent(self): """Add a new realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "id": "realm-name", + "realm": "realm-name", + "enabled": True, } - return_value_absent = [None, {'id': 'realm-name', 'realm': 'realm-name', 'enabled': True}] - return_value_created = [{ - 'code': 201, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True - }] + return_value_absent = [None, {"id": "realm-name", "realm": "realm-name", "enabled": True}] + return_value_created = [{"code": 201, "id": "realm-name", "realm": "realm-name", "enabled": True}] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) as ( + mock_get_realm_by_id, + mock_create_realm, + mock_update_realm, + mock_delete_realm, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -124,48 +130,39 @@ def test_create_when_absent(self): self.assertEqual(len(mock_update_realm.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present_with_change(self): """Update with change a realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': False + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "id": "realm-name", + "realm": "realm-name", + "enabled": False, } return_value_absent = [ - { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True - }, - { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': False - } + {"id": "realm-name", "realm": "realm-name", "enabled": True}, + {"id": "realm-name", "realm": "realm-name", "enabled": False}, ] - return_value_updated = [{ - 'code': 201, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': False - }] + return_value_updated = [{"code": 201, "id": "realm-name", "realm": "realm-name", "enabled": False}] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) as ( + mock_get_realm_by_id, + mock_create_realm, + mock_update_realm, + mock_delete_realm, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -174,48 +171,39 @@ def test_create_when_present_with_change(self): self.assertEqual(len(mock_update_realm.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present_no_change(self): """Update without change a realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "id": "realm-name", + "realm": "realm-name", + "enabled": True, } return_value_absent = [ - { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True - }, - { - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True - } + {"id": "realm-name", "realm": "realm-name", "enabled": True}, + {"id": "realm-name", "realm": "realm-name", "enabled": True}, ] - return_value_updated = [{ - 'code': 201, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True - }] + return_value_updated = [{"code": 201, "id": "realm-name", "realm": "realm-name", "enabled": True}] changed = False # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) as ( + mock_get_realm_by_id, + mock_create_realm, + mock_update_realm, + mock_delete_realm, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -224,22 +212,22 @@ def test_create_when_present_no_change(self): self.assertEqual(len(mock_update_realm.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'state': 'absent' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "state": "absent", } return_value_absent = [None] return_value_deleted = [None] @@ -249,8 +237,12 @@ def test_delete_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) as ( + mock_get_realm_by_id, + mock_create_realm, + mock_update_realm, + mock_delete_realm, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -258,28 +250,24 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_realm.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove a present realm""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'id': 'realm-name', - 'realm': 'realm-name', - 'enabled': True, - 'state': 'absent' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "id": "realm-name", + "realm": "realm-name", + "enabled": True, + "state": "absent", } - return_value_absent = [ - { - 'id': 'realm-name', - 'realm': 'realm-name' - }] + return_value_absent = [{"id": "realm-name", "realm": "realm-name"}] return_value_deleted = [None] changed = True @@ -287,8 +275,12 @@ def test_delete_when_present(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ - as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) as ( + mock_get_realm_by_id, + mock_create_realm, + mock_update_realm, + mock_delete_realm, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -296,8 +288,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_realm.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm_info.py b/tests/unit/plugins/modules/test_keycloak_realm_info.py index 59e2bd94b92..b2fb9fdc844 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_info.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_info.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_realm_info @@ -34,7 +37,7 @@ def patch_keycloak_api(get_realm_info_by_id): """ obj = keycloak_realm_info.KeycloakAPI - with patch.object(obj, 'get_realm_info_by_id', side_effect=get_realm_info_by_id) as mock_get_realm_info_by_id: + with patch.object(obj, "get_realm_info_by_id", side_effect=get_realm_info_by_id) as mock_get_realm_info_by_id: yield mock_get_realm_info_by_id @@ -42,21 +45,20 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -64,18 +66,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -88,8 +95,8 @@ def test_get_public_info(self): """Get realm public info""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'realm': 'my-realm', + "auth_keycloak_url": "http://keycloak.url/auth", + "realm": "my-realm", } return_value = [ None, @@ -99,20 +106,19 @@ def test_get_public_info(self): "token-service": "https://auth.mock.com/auth/realms/my-realm/protocol/openid-connect", "account-service": "https://auth.mock.com/auth/realms/my-realm/account", "tokens-not-before": 0, - } + }, ] # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_info_by_id=return_value) \ - as (mock_get_realm_info_by_id): + with patch_keycloak_api(get_realm_info_by_id=return_value) as (mock_get_realm_info_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() self.assertEqual(len(mock_get_realm_info_by_id.mock_calls), 1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys.py b/tests/unit/plugins/modules/test_keycloak_realm_keys.py index 9f9ff750c27..7e5e88aef06 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_keys.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_realm_key @@ -18,43 +21,44 @@ @contextmanager -def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): - """Mock context manager for patching the methods in KeycloakAPI - """ +def patch_keycloak_api( + get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None +): + """Mock context manager for patching the methods in KeycloakAPI""" obj = keycloak_realm_key.KeycloakAPI - with patch.object(obj, 'get_components', side_effect=get_components) \ - as mock_get_components: - with patch.object(obj, 'get_component', side_effect=get_component) \ - as mock_get_component: - with patch.object(obj, 'create_component', side_effect=create_component) \ - as mock_create_component: - with patch.object(obj, 'update_component', side_effect=update_component) \ - as mock_update_component: - with patch.object(obj, 'delete_component', side_effect=delete_component) \ - as mock_delete_component: - yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + with patch.object(obj, "get_components", side_effect=get_components) as mock_get_components: + with patch.object(obj, "get_component", side_effect=get_component) as mock_get_component: + with patch.object(obj, "create_component", side_effect=create_component) as mock_create_component: + with patch.object(obj, "update_component", side_effect=update_component) as mock_update_component: + with patch.object(obj, "delete_component", side_effect=delete_component) as mock_delete_component: + yield ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -62,18 +66,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -86,20 +95,20 @@ def test_create_when_absent(self): """Add a new realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'testkey', - 'state': 'present', - 'provider_id': 'rsa', - 'config': { - 'priority': 0, - 'enabled': True, - 'private_key': 'privatekey', - 'algorithm': 'RS256', - 'certificate': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "testkey", + "state": "present", + "provider_id": "rsa", + "config": { + "priority": 0, + "enabled": True, + "private_key": "privatekey", + "algorithm": "RS256", + "certificate": "foo", }, } return_value_component_create = [ @@ -109,39 +118,32 @@ def test_create_when_absent(self): "providerId": "rsa", "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", "config": { - "privateKey": [ - "**********" - ], - "certificate": [ - "foo" - ], - "active": [ - "true" - ], - "priority": [ - "122" - ], - "enabled": [ - "true" - ], - "algorithm": [ - "RS256" - ] - } + "privateKey": ["**********"], + "certificate": ["foo"], + "active": ["true"], + "priority": ["122"], + "enabled": ["true"], + "algorithm": ["RS256"], + }, } ] # get before_comp, get default_mapper, get after_mapper - return_value_components_get = [ - [], [], [] - ] + return_value_components_get = [[], [], []] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, create_component=return_value_component_create + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -152,46 +154,48 @@ def test_create_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # must not contain parent_id - mock_create_component.assert_called_once_with({ - 'name': 'testkey', - 'providerId': 'rsa', - 'providerType': 'org.keycloak.keys.KeyProvider', - 'config': { - 'priority': ['0'], - 'enabled': ['true'], - 'privateKey': ['privatekey'], - 'algorithm': ['RS256'], - 'certificate': ['foo'], - 'active': ['true'], + mock_create_component.assert_called_once_with( + { + "name": "testkey", + "providerId": "rsa", + "providerType": "org.keycloak.keys.KeyProvider", + "config": { + "priority": ["0"], + "enabled": ["true"], + "privateKey": ["privatekey"], + "algorithm": ["RS256"], + "certificate": ["foo"], + "active": ["true"], + }, }, - }, 'realm-name') + "realm-name", + ) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present(self): """Update existing realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'testkey', - 'state': 'present', - 'provider_id': 'rsa', - 'config': { - 'priority': 0, - 'enabled': True, - 'private_key': 'privatekey', - 'algorithm': 'RS256', - 'certificate': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "testkey", + "state": "present", + "provider_id": "rsa", + "config": { + "priority": 0, + "enabled": True, + "private_key": "privatekey", + "algorithm": "RS256", + "certificate": "foo", }, } return_value_components_get = [ [ - { "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", "name": "testkey", @@ -199,42 +203,34 @@ def test_create_when_present(self): "providerType": "org.keycloak.keys.KeyProvider", "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", "config": { - "privateKey": [ - "**********" - ], - "certificate": [ - "foo" - ], - "active": [ - "true" - ], - "priority": [ - "122" - ], - "enabled": [ - "true" - ], - "algorithm": [ - "RS256" - ] - } + "privateKey": ["**********"], + "certificate": ["foo"], + "active": ["true"], + "priority": ["122"], + "enabled": ["true"], + "algorithm": ["RS256"], + }, }, ], [], - [] - ] - return_value_component_update = [ - None + [], ] + return_value_component_update = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, update_component=return_value_component_update + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -245,39 +241,42 @@ def test_create_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'testkey', - 'state': 'absent', - 'provider_id': 'rsa', - 'config': { - 'priority': 0, - 'enabled': True, - 'private_key': 'privatekey', - 'algorithm': 'RS256', - 'certificate': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "testkey", + "state": "absent", + "provider_id": "rsa", + "config": { + "priority": 0, + "enabled": True, + "private_key": "privatekey", + "algorithm": "RS256", + "certificate": "foo", }, } - return_value_components_get = [ - [] - ] + return_value_components_get = [[]] changed = False # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api(get_components=return_value_components_get) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -288,32 +287,31 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove an existing realm key""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'name': 'testkey', - 'state': 'absent', - 'provider_id': 'rsa', - 'config': { - 'priority': 0, - 'enabled': True, - 'private_key': 'privatekey', - 'algorithm': 'RS256', - 'certificate': 'foo', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "name": "testkey", + "state": "absent", + "provider_id": "rsa", + "config": { + "priority": 0, + "enabled": True, + "private_key": "privatekey", + "algorithm": "RS256", + "certificate": "foo", }, } return_value_components_get = [ [ - { "id": "c1a957aa-3df0-4f70-9418-44202bf4ae1f", "name": "testkey", @@ -321,41 +319,34 @@ def test_delete_when_present(self): "providerType": "org.keycloak.keys.KeyProvider", "parentId": "90c8fef9-15f8-4d5b-8b22-44e2e1cdcd09", "config": { - "privateKey": [ - "**********" - ], - "certificate": [ - "foo" - ], - "active": [ - "true" - ], - "priority": [ - "122" - ], - "enabled": [ - "true" - ], - "algorithm": [ - "RS256" - ] - } + "privateKey": ["**********"], + "certificate": ["foo"], + "active": ["true"], + "priority": ["122"], + "enabled": ["true"], + "algorithm": ["RS256"], + }, }, ], [], - [] - ] - return_value_component_delete = [ - None + [], ] + return_value_component_delete = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, delete_component=return_value_component_delete + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -366,8 +357,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py index ac1b6980e21..48ac4a730d3 100644 --- a/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py +++ b/tests/unit/plugins/modules/test_keycloak_realm_keys_metadata_info.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -11,10 +10,12 @@ from itertools import count from unittest.mock import patch -from ansible_collections.community.general.plugins.modules import \ - keycloak_realm_keys_metadata_info +from ansible_collections.community.general.plugins.modules import keycloak_realm_keys_metadata_info from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, ModuleTestCase, set_module_args) + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) @contextmanager @@ -42,14 +43,10 @@ def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count - ) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count - ) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response @@ -159,17 +156,13 @@ def test_get_public_info(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(side_effect=return_value) as ( - mock_get_realm_keys_metadata_by_id - ): + with patch_keycloak_api(side_effect=return_value) as (mock_get_realm_keys_metadata_by_id): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() result = exec_info.exception.args[0] self.assertIs(result["changed"], False) - self.assertEqual( - result["msg"], "Get realm keys metadata successful for ID my-realm" - ) + self.assertEqual(result["msg"], "Get realm keys metadata successful for ID my-realm") self.assertEqual(result["keys_metadata"], return_value[0]) self.assertEqual(len(mock_get_realm_keys_metadata_by_id.mock_calls), 1) diff --git a/tests/unit/plugins/modules/test_keycloak_role.py b/tests/unit/plugins/modules/test_keycloak_role.py index 6a13346caf2..dad95ba0c59 100644 --- a/tests/unit/plugins/modules/test_keycloak_role.py +++ b/tests/unit/plugins/modules/test_keycloak_role.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_role @@ -18,9 +21,18 @@ @contextmanager -def patch_keycloak_api(get_realm_role=None, create_realm_role=None, update_realm_role=None, delete_realm_role=None, - get_client_role=None, create_client_role=None, update_client_role=None, delete_client_role=None, - get_client_by_id=None, get_role_composites=None): +def patch_keycloak_api( + get_realm_role=None, + create_realm_role=None, + update_realm_role=None, + delete_realm_role=None, + get_client_role=None, + create_client_role=None, + update_client_role=None, + delete_client_role=None, + get_client_by_id=None, + get_role_composites=None, +): """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server Patches the `login` and `_post_json` methods @@ -36,40 +48,58 @@ def patch_keycloak_api(get_realm_role=None, create_realm_role=None, update_realm """ obj = keycloak_role.KeycloakAPI - with patch.object(obj, 'get_realm_role', side_effect=get_realm_role) as mock_get_realm_role: - with patch.object(obj, 'create_realm_role', side_effect=create_realm_role) as mock_create_realm_role: - with patch.object(obj, 'update_realm_role', side_effect=update_realm_role) as mock_update_realm_role: - with patch.object(obj, 'delete_realm_role', side_effect=delete_realm_role) as mock_delete_realm_role: - with patch.object(obj, 'get_client_role', side_effect=get_client_role) as mock_get_client_role: - with patch.object(obj, 'create_client_role', side_effect=create_client_role) as mock_create_client_role: - with patch.object(obj, 'update_client_role', side_effect=update_client_role) as mock_update_client_role: - with patch.object(obj, 'delete_client_role', side_effect=delete_client_role) as mock_delete_client_role: - with patch.object(obj, 'get_client_by_id', side_effect=get_client_by_id) as mock_get_client_by_id: - with patch.object(obj, 'get_role_composites', side_effect=get_role_composites) as mock_get_role_composites: - yield mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, \ - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, \ - mock_get_client_by_id, mock_get_role_composites + with patch.object(obj, "get_realm_role", side_effect=get_realm_role) as mock_get_realm_role: + with patch.object(obj, "create_realm_role", side_effect=create_realm_role) as mock_create_realm_role: + with patch.object(obj, "update_realm_role", side_effect=update_realm_role) as mock_update_realm_role: + with patch.object(obj, "delete_realm_role", side_effect=delete_realm_role) as mock_delete_realm_role: + with patch.object(obj, "get_client_role", side_effect=get_client_role) as mock_get_client_role: + with patch.object( + obj, "create_client_role", side_effect=create_client_role + ) as mock_create_client_role: + with patch.object( + obj, "update_client_role", side_effect=update_client_role + ) as mock_update_client_role: + with patch.object( + obj, "delete_client_role", side_effect=delete_client_role + ) as mock_delete_client_role: + with patch.object( + obj, "get_client_by_id", side_effect=get_client_by_id + ) as mock_get_client_by_id: + with patch.object( + obj, "get_role_composites", side_effect=get_role_composites + ) as mock_get_role_composites: + yield ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_id, + mock_get_role_composites, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -77,18 +107,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -101,15 +136,15 @@ def test_create_when_absent(self): """Add a new realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'description': 'role-description', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "description": "role-description", } return_value_absent = [ None, @@ -121,7 +156,7 @@ def test_create_when_absent(self): "description": "role-description", "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", "name": "role-name", - } + }, ] return_value_created = [None] changed = True @@ -130,10 +165,18 @@ def test_create_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -142,21 +185,21 @@ def test_create_when_absent(self): self.assertEqual(len(mock_update_realm_role.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present_with_change(self): """Update with change a realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'description': 'new-role-description', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "description": "new-role-description", } return_value_present = [ { @@ -176,7 +219,7 @@ def test_create_when_present_with_change(self): "description": "new-role-description", "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", "name": "role-name", - } + }, ] return_value_updated = [None] changed = True @@ -185,10 +228,20 @@ def test_create_when_present_with_change(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api( + get_realm_role=return_value_present, update_realm_role=return_value_updated + ) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -197,21 +250,21 @@ def test_create_when_present_with_change(self): self.assertEqual(len(mock_update_realm_role.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present_no_change(self): """Update without change a realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'description': 'role-description', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "description": "role-description", } return_value_present = [ { @@ -231,7 +284,7 @@ def test_create_when_present_no_change(self): "description": "role-description", "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", "name": "role-name", - } + }, ] return_value_updated = [None] changed = False @@ -240,10 +293,20 @@ def test_create_when_present_no_change(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api( + get_realm_role=return_value_present, update_realm_role=return_value_updated + ) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -252,32 +315,23 @@ def test_create_when_present_no_change(self): self.assertEqual(len(mock_update_realm_role.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_with_composites_when_present_no_change(self): """Update without change a realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'description': 'role-description', - 'composite': True, - 'composites': [ - { - 'client_id': 'client_1', - 'name': 'client-role1' - }, - { - 'name': 'realm-role-1' - } - ] - + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "description": "role-description", + "composite": True, + "composites": [{"client_id": "client_1", "name": "client-role1"}, {"name": "realm-role-1"}], } return_value_present = [ { @@ -297,21 +351,13 @@ def test_create_with_composites_when_present_no_change(self): "description": "role-description", "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", "name": "role-name", - } + }, ] return_value_updated = [None] return_get_role_composites = [ [ - { - 'clientRole': True, - 'containerId': 'c4367fac-f427-11ed-8e2f-aff070d20f0e', - 'name': 'client-role1' - }, - { - 'clientRole': False, - 'containerId': 'realm-name', - 'name': 'realm-role-1' - } + {"clientRole": True, "containerId": "c4367fac-f427-11ed-8e2f-aff070d20f0e", "name": "client-role1"}, + {"clientRole": False, "containerId": "realm-name", "name": "realm-role-1"}, ] ] return_get_client_by_client_id = [ @@ -327,9 +373,7 @@ def test_create_with_composites_when_present_no_change(self): "redirectUris": [ "http://localhost:8080/*", ], - "webOrigins": [ - "*" - ], + "webOrigins": ["*"], "notBefore": 0, "bearerOnly": False, "consentRequired": False, @@ -342,24 +386,13 @@ def test_create_with_composites_when_present_no_change(self): "protocol": "openid-connect", "attributes": { "backchannel.logout.session.required": "true", - "backchannel.logout.revoke.offline.tokens": "false" + "backchannel.logout.revoke.offline.tokens": "false", }, "authenticationFlowBindingOverrides": {}, "fullScopeAllowed": True, "nodeReRegistrationTimeout": -1, - "defaultClientScopes": [ - "web-origins", - "acr", - "profile", - "roles", - "email" - ], - "optionalClientScopes": [ - "address", - "phone", - "offline_access", - "microprofile-jwt" - ] + "defaultClientScopes": ["web-origins", "acr", "profile", "roles", "email"], + "optionalClientScopes": ["address", "phone", "offline_access", "microprofile-jwt"], } ] @@ -369,12 +402,23 @@ def test_create_with_composites_when_present_no_change(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated, - get_client_by_id=return_get_client_by_client_id, - get_role_composites=return_get_role_composites) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api( + get_realm_role=return_value_present, + update_realm_role=return_value_updated, + get_client_by_id=return_get_client_by_client_id, + get_role_composites=return_get_role_composites, + ) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -385,21 +429,21 @@ def test_create_with_composites_when_present_no_change(self): self.assertEqual(len(mock_get_role_composites.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'state': 'absent' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "state": "absent", } return_value_absent = [None] return_value_deleted = [None] @@ -409,10 +453,18 @@ def test_delete_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -420,21 +472,21 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_realm_role.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove a present realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'name': 'role-name', - 'state': 'absent' + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "name": "role-name", + "state": "absent", } return_value_absent = [ { @@ -454,10 +506,18 @@ def test_delete_when_present(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -465,7 +525,7 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_realm_role.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) class TestKeycloakClientRole(ModuleTestCase): @@ -477,26 +537,18 @@ def test_create_client_role_with_composites_when_absent(self): """Update with change a realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'client_id': 'client-name', - 'name': 'role-name', - 'description': 'role-description', - 'composite': True, - 'composites': [ - { - 'client_id': 'client_1', - 'name': 'client-role1' - }, - { - 'name': 'realm-role-1' - } - ] + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "client_id": "client-name", + "name": "role-name", + "description": "role-description", + "composite": True, + "composites": [{"client_id": "client_1", "name": "client-role1"}, {"name": "realm-role-1"}], } return_get_client_role = [ None, @@ -504,21 +556,12 @@ def test_create_client_role_with_composites_when_absent(self): "attributes": {}, "clientRole": True, "composite": True, - "composites": [ - { - 'client': { - 'client1': ['client-role1'] - } - }, - { - 'realm': ['realm-role-1'] - } - ], + "composites": [{"client": {"client1": ["client-role1"]}}, {"realm": ["realm-role-1"]}], "containerId": "9ae25ec2-f40a-11ed-9261-b3bacf720f69", "description": "role-description", "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", "name": "role-name", - } + }, ] changed = True @@ -526,10 +569,18 @@ def test_create_client_role_with_composites_when_absent(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_client_role=return_get_client_role) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api(get_client_role=return_get_client_role) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -541,32 +592,24 @@ def test_create_client_role_with_composites_when_absent(self): self.assertEqual(len(mock_update_client_role.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_client_role_with_composites_when_present_no_change(self): """Update with change a realm role""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_client_id': 'admin-cli', - 'validate_certs': True, - 'realm': 'realm-name', - 'client_id': 'client-name', - 'name': 'role-name', - 'description': 'role-description', - 'composite': True, - 'composites': [ - { - 'client_id': 'client_1', - 'name': 'client-role1' - }, - { - 'name': 'realm-role-1' - } - ] + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "auth_client_id": "admin-cli", + "validate_certs": True, + "realm": "realm-name", + "client_id": "client-name", + "name": "role-name", + "description": "role-description", + "composite": True, + "composites": [{"client_id": "client_1", "name": "client-role1"}, {"name": "realm-role-1"}], } return_get_client_role = [ { @@ -581,16 +624,8 @@ def test_create_client_role_with_composites_when_present_no_change(self): ] return_get_role_composites = [ [ - { - 'clientRole': True, - 'containerId': 'c4367fac-f427-11ed-8e2f-aff070d20f0e', - 'name': 'client-role1' - }, - { - 'clientRole': False, - 'containerId': 'realm-name', - 'name': 'realm-role-1' - } + {"clientRole": True, "containerId": "c4367fac-f427-11ed-8e2f-aff070d20f0e", "name": "client-role1"}, + {"clientRole": False, "containerId": "realm-name", "name": "realm-role-1"}, ] ] return_get_client_by_client_id = [ @@ -606,9 +641,7 @@ def test_create_client_role_with_composites_when_present_no_change(self): "redirectUris": [ "http://localhost:8080/*", ], - "webOrigins": [ - "*" - ], + "webOrigins": ["*"], "notBefore": 0, "bearerOnly": False, "consentRequired": False, @@ -621,24 +654,13 @@ def test_create_client_role_with_composites_when_present_no_change(self): "protocol": "openid-connect", "attributes": { "backchannel.logout.session.required": "true", - "backchannel.logout.revoke.offline.tokens": "false" + "backchannel.logout.revoke.offline.tokens": "false", }, "authenticationFlowBindingOverrides": {}, "fullScopeAllowed": True, "nodeReRegistrationTimeout": -1, - "defaultClientScopes": [ - "web-origins", - "acr", - "profile", - "roles", - "email" - ], - "optionalClientScopes": [ - "address", - "phone", - "offline_access", - "microprofile-jwt" - ] + "defaultClientScopes": ["web-origins", "acr", "profile", "roles", "email"], + "optionalClientScopes": ["address", "phone", "offline_access", "microprofile-jwt"], } ] changed = False @@ -647,11 +669,22 @@ def test_create_client_role_with_composites_when_present_no_change(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_client_role=return_get_client_role, get_client_by_id=return_get_client_by_client_id, - get_role_composites=return_get_role_composites) \ - as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role, - mock_get_client_role, mock_create_client_role, mock_update_client_role, mock_delete_client_role, - mock_get_client_by_client_id, mock_get_role_composites): + with patch_keycloak_api( + get_client_role=return_get_client_role, + get_client_by_id=return_get_client_by_client_id, + get_role_composites=return_get_role_composites, + ) as ( + mock_get_realm_role, + mock_create_realm_role, + mock_update_realm_role, + mock_delete_realm_role, + mock_get_client_role, + mock_create_client_role, + mock_update_client_role, + mock_delete_client_role, + mock_get_client_by_client_id, + mock_get_role_composites, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -665,8 +698,8 @@ def test_create_client_role_with_composites_when_present_no_change(self): self.assertEqual(len(mock_get_role_composites.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_user.py b/tests/unit/plugins/modules/test_keycloak_user.py index 82efe6ce48a..0ceb870b9e8 100644 --- a/tests/unit/plugins/modules/test_keycloak_user.py +++ b/tests/unit/plugins/modules/test_keycloak_user.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -10,7 +9,11 @@ import unittest from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_user @@ -19,12 +22,14 @@ @contextmanager -def patch_keycloak_api(get_user_by_username=None, - create_user=None, - update_user_groups_membership=None, - get_user_groups=None, - delete_user=None, - update_user=None): +def patch_keycloak_api( + get_user_by_username=None, + create_user=None, + update_user_groups_membership=None, + get_user_groups=None, + delete_user=None, + update_user=None, +): """Mock context manager for patching the methods in KeycloakAPI that contact the Keycloak server Patches the `get_user_by_username` and `create_user` methods @@ -32,33 +37,39 @@ def patch_keycloak_api(get_user_by_username=None, """ obj = keycloak_user.KeycloakAPI - with patch.object(obj, 'get_user_by_username', side_effect=get_user_by_username) as mock_get_user_by_username: - with patch.object(obj, 'create_user', side_effect=create_user) as mock_create_user: - with patch.object(obj, 'update_user_groups_membership', side_effect=update_user_groups_membership) as mock_update_user_groups_membership: - with patch.object(obj, 'get_user_groups', side_effect=get_user_groups) as mock_get_user_groups: - with patch.object(obj, 'delete_user', side_effect=delete_user) as mock_delete_user: - with patch.object(obj, 'update_user', side_effect=update_user) as mock_update_user: - yield mock_get_user_by_username, mock_create_user, mock_update_user_groups_membership, \ - mock_get_user_groups, mock_delete_user, mock_update_user + with patch.object(obj, "get_user_by_username", side_effect=get_user_by_username) as mock_get_user_by_username: + with patch.object(obj, "create_user", side_effect=create_user) as mock_create_user: + with patch.object( + obj, "update_user_groups_membership", side_effect=update_user_groups_membership + ) as mock_update_user_groups_membership: + with patch.object(obj, "get_user_groups", side_effect=get_user_groups) as mock_get_user_groups: + with patch.object(obj, "delete_user", side_effect=delete_user) as mock_delete_user: + with patch.object(obj, "update_user", side_effect=update_user) as mock_update_user: + yield ( + mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) @@ -78,12 +89,14 @@ def _create_wrapper(): def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( - '{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -96,17 +109,17 @@ def test_add_new_user(self): """Add a new user""" module_args = { - 'auth_keycloak_url': 'https: // auth.example.com / auth', - 'token': '{{ access_token }}', - 'state': 'present', - 'realm': 'master', - 'username': 'test', - 'groups': [] + "auth_keycloak_url": "https: // auth.example.com / auth", + "token": "{{ access_token }}", + "state": "present", + "realm": "master", + "username": "test", + "groups": [], } return_value_get_user_by_username = [None] return_value_update_user_groups_membership = [False] return_get_user_groups = [[]] - return_create_user = [{'id': '123eqwdawer24qwdqw4'}] + return_create_user = [{"id": "123eqwdawer24qwdqw4"}] return_delete_user = None return_update_user = None changed = True @@ -115,18 +128,21 @@ def test_add_new_user(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): + with patch_keycloak_api( + get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user, + ) as ( + mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -138,31 +154,31 @@ def test_add_new_user(self): self.assertEqual(mock_delete_user.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_add_exiting_user_no_change(self): """Add a new user""" module_args = { - 'auth_keycloak_url': 'https: // auth.example.com / auth', - 'token': '{{ access_token }}', - 'state': 'present', - 'realm': 'master', - 'username': 'test', - 'groups': [] + "auth_keycloak_url": "https: // auth.example.com / auth", + "token": "{{ access_token }}", + "state": "present", + "realm": "master", + "username": "test", + "groups": [], } return_value_get_user_by_username = [ { - 'id': '123eqwdawer24qwdqw4', - 'username': 'test', - 'groups': [], - 'enabled': True, - 'emailVerified': False, - 'disableableCredentialTypes': [], - 'requiredActions': [], - 'credentials': [], - 'federatedIdentities': [], - 'clientConsents': [] + "id": "123eqwdawer24qwdqw4", + "username": "test", + "groups": [], + "enabled": True, + "emailVerified": False, + "disableableCredentialTypes": [], + "requiredActions": [], + "credentials": [], + "federatedIdentities": [], + "clientConsents": [], } ] return_value_update_user_groups_membership = [False] @@ -176,18 +192,21 @@ def test_add_exiting_user_no_change(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): + with patch_keycloak_api( + get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user, + ) as ( + mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -199,55 +218,52 @@ def test_add_exiting_user_no_change(self): self.assertEqual(mock_delete_user.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_update_user_with_group_changes(self): """Update groups for a user""" module_args = { - 'auth_keycloak_url': 'https: // auth.example.com / auth', - 'token': '{{ access_token }}', - 'state': 'present', - 'realm': 'master', - 'username': 'test', - 'first_name': 'test', - 'last_name': 'user', - 'groups': [{ - 'name': 'group1', - 'state': 'present' - }] + "auth_keycloak_url": "https: // auth.example.com / auth", + "token": "{{ access_token }}", + "state": "present", + "realm": "master", + "username": "test", + "first_name": "test", + "last_name": "user", + "groups": [{"name": "group1", "state": "present"}], } return_value_get_user_by_username = [ { - 'id': '123eqwdawer24qwdqw4', - 'username': 'test', - 'groups': [], - 'enabled': True, - 'emailVerified': False, - 'disableableCredentialTypes': [], - 'requiredActions': [], - 'credentials': [], - 'federatedIdentities': [], - 'clientConsents': [] + "id": "123eqwdawer24qwdqw4", + "username": "test", + "groups": [], + "enabled": True, + "emailVerified": False, + "disableableCredentialTypes": [], + "requiredActions": [], + "credentials": [], + "federatedIdentities": [], + "clientConsents": [], } ] return_value_update_user_groups_membership = [True] - return_get_user_groups = [['group1']] + return_get_user_groups = [["group1"]] return_create_user = None return_delete_user = None return_update_user = [ { - 'id': '123eqwdawer24qwdqw4', - 'username': 'test', - 'first_name': 'test', - 'last_name': 'user', - 'enabled': True, - 'emailVerified': False, - 'disableableCredentialTypes': [], - 'requiredActions': [], - 'credentials': [], - 'federatedIdentities': [], - 'clientConsents': [] + "id": "123eqwdawer24qwdqw4", + "username": "test", + "first_name": "test", + "last_name": "user", + "enabled": True, + "emailVerified": False, + "disableableCredentialTypes": [], + "requiredActions": [], + "credentials": [], + "federatedIdentities": [], + "clientConsents": [], } ] changed = True @@ -256,18 +272,21 @@ def test_update_user_with_group_changes(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): + with patch_keycloak_api( + get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user, + ) as ( + mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -279,31 +298,31 @@ def test_update_user_with_group_changes(self): self.assertEqual(mock_delete_user.call_count, 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_user(self): """Delete a user""" module_args = { - 'auth_keycloak_url': 'https: // auth.example.com / auth', - 'token': '{{ access_token }}', - 'state': 'absent', - 'realm': 'master', - 'username': 'test', - 'groups': [] + "auth_keycloak_url": "https: // auth.example.com / auth", + "token": "{{ access_token }}", + "state": "absent", + "realm": "master", + "username": "test", + "groups": [], } return_value_get_user_by_username = [ { - 'id': '123eqwdawer24qwdqw4', - 'username': 'test', - 'groups': [], - 'enabled': True, - 'emailVerified': False, - 'disableableCredentialTypes': [], - 'requiredActions': [], - 'credentials': [], - 'federatedIdentities': [], - 'clientConsents': [] + "id": "123eqwdawer24qwdqw4", + "username": "test", + "groups": [], + "enabled": True, + "emailVerified": False, + "disableableCredentialTypes": [], + "requiredActions": [], + "credentials": [], + "federatedIdentities": [], + "clientConsents": [], } ] return_value_update_user_groups_membership = None @@ -317,18 +336,21 @@ def test_delete_user(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_user_by_username=return_value_get_user_by_username, - create_user=return_create_user, - update_user_groups_membership=return_value_update_user_groups_membership, - get_user_groups=return_get_user_groups, - update_user=return_update_user, - delete_user=return_delete_user) \ - as (mock_get_user_by_username, - mock_create_user, - mock_update_user_groups_membership, - mock_get_user_groups, - mock_delete_user, - mock_update_user): + with patch_keycloak_api( + get_user_by_username=return_value_get_user_by_username, + create_user=return_create_user, + update_user_groups_membership=return_value_update_user_groups_membership, + get_user_groups=return_get_user_groups, + update_user=return_update_user, + delete_user=return_delete_user, + ) as ( + mock_get_user_by_username, + mock_create_user, + mock_update_user_groups_membership, + mock_get_user_groups, + mock_delete_user, + mock_update_user, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -340,8 +362,8 @@ def test_delete_user(self): self.assertEqual(mock_delete_user.call_count, 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_user_execute_actions_email.py b/tests/unit/plugins/modules/test_keycloak_user_execute_actions_email.py index c6653c3490a..010934c5aeb 100644 --- a/tests/unit/plugins/modules/test_keycloak_user_execute_actions_email.py +++ b/tests/unit/plugins/modules/test_keycloak_user_execute_actions_email.py @@ -9,10 +9,15 @@ from unittest.mock import patch from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, ) -from ansible_collections.community.general.plugins.modules import keycloak_user_execute_actions_email as module_under_test +from ansible_collections.community.general.plugins.modules import ( + keycloak_user_execute_actions_email as module_under_test, +) from io import StringIO from itertools import count @@ -21,36 +26,40 @@ def _create_wrapper(text_as_string): def _wrapper(): return StringIO(text_as_string) + return _wrapper def _build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) if callable(future_response): return future_response() return future_response + return _mocked_requests def _mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': _create_wrapper('{"access_token": "alongtoken"}') + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": _create_wrapper( + '{"access_token": "alongtoken"}' + ) } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=_build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @contextmanager def patch_keycloak_api(get_user_by_username=None, send_execute_actions_email=None): obj = module_under_test.KeycloakAPI - with patch.object(obj, 'get_user_by_username', side_effect=get_user_by_username) as m_get_user: - with patch.object(obj, 'send_execute_actions_email', side_effect=send_execute_actions_email) as m_send: + with patch.object(obj, "get_user_by_username", side_effect=get_user_by_username) as m_get_user: + with patch.object(obj, "send_execute_actions_email", side_effect=send_execute_actions_email) as m_send: yield m_get_user, m_send @@ -61,40 +70,40 @@ def setUp(self): def test_default_action_with_username(self): module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'username': 'jdoe' + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "username": "jdoe", } with set_module_args(module_args): with _mock_good_connection(): with patch_keycloak_api( - get_user_by_username=lambda **kwargs: {'id': 'uid-123', 'username': 'jdoe'}, + get_user_by_username=lambda **kwargs: {"id": "uid-123", "username": "jdoe"}, send_execute_actions_email=lambda **kwargs: None, ) as (m_get_user, m_send): with self.assertRaises(AnsibleExitJson) as result: self.module.main() data = result.exception.args[0] - self.assertTrue(data['changed']) - self.assertEqual(data['user_id'], 'uid-123') - self.assertEqual(data['actions'], ['UPDATE_PASSWORD']) + self.assertTrue(data["changed"]) + self.assertEqual(data["user_id"], "uid-123") + self.assertEqual(data["actions"], ["UPDATE_PASSWORD"]) self.assertEqual(len(m_get_user.mock_calls), 1) self.assertEqual(len(m_send.mock_calls), 1) def test_user_not_found(self): module_args = { - 'auth_client_id': 'admin-cli', - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_password': 'admin', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'realm': 'master', - 'username': 'missing' + "auth_client_id": "admin-cli", + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_password": "admin", + "auth_realm": "master", + "auth_username": "admin", + "realm": "master", + "username": "missing", } with set_module_args(module_args): @@ -106,8 +115,8 @@ def test_user_not_found(self): with self.assertRaises(AnsibleFailJson) as result: self.module.main() data = result.exception.args[0] - self.assertIn("User 'missing' not found", data['msg']) + self.assertIn("User 'missing' not found", data["msg"]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_user_federation.py b/tests/unit/plugins/modules/test_keycloak_user_federation.py index e79951d288d..3990c912e2c 100644 --- a/tests/unit/plugins/modules/test_keycloak_user_federation.py +++ b/tests/unit/plugins/modules/test_keycloak_user_federation.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_user_federation @@ -18,43 +21,44 @@ @contextmanager -def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): - """Mock context manager for patching the methods in KeycloakAPI - """ +def patch_keycloak_api( + get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None +): + """Mock context manager for patching the methods in KeycloakAPI""" obj = keycloak_user_federation.KeycloakAPI - with patch.object(obj, 'get_components', side_effect=get_components) \ - as mock_get_components: - with patch.object(obj, 'get_component', side_effect=get_component) \ - as mock_get_component: - with patch.object(obj, 'create_component', side_effect=create_component) \ - as mock_create_component: - with patch.object(obj, 'update_component', side_effect=update_component) \ - as mock_update_component: - with patch.object(obj, 'delete_component', side_effect=delete_component) \ - as mock_delete_component: - yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + with patch.object(obj, "get_components", side_effect=get_components) as mock_get_components: + with patch.object(obj, "get_component", side_effect=get_component) as mock_get_component: + with patch.object(obj, "create_component", side_effect=create_component) as mock_create_component: + with patch.object(obj, "update_component", side_effect=update_component) as mock_update_component: + with patch.object(obj, "delete_component", side_effect=delete_component) as mock_delete_component: + yield ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ) def get_response(object_with_future_response, method, get_id_call_count): if callable(object_with_future_response): return object_with_future_response() if isinstance(object_with_future_response, dict): - return get_response( - object_with_future_response[method], method, get_id_call_count) + return get_response(object_with_future_response[method], method, get_id_call_count) if isinstance(object_with_future_response, list): call_number = next(get_id_call_count) - return get_response( - object_with_future_response[call_number], method, get_id_call_count) + return get_response(object_with_future_response[call_number], method, get_id_call_count) return object_with_future_response def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -62,18 +66,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), + } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -86,24 +95,24 @@ def test_create_when_absent(self): """Add a new user federation""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'realm': 'realm-name', - 'name': 'kerberos', - 'state': 'present', - 'provider_id': 'kerberos', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'priority': 0, - 'enabled': True, - 'cachePolicy': 'DEFAULT', - 'kerberosRealm': 'REALM', - 'serverPrincipal': 'princ', - 'keyTab': 'keytab', - 'allowPasswordAuthentication': False, - 'updateProfileFirstLogin': False, + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "realm": "realm-name", + "name": "kerberos", + "state": "present", + "provider_id": "kerberos", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "priority": 0, + "enabled": True, + "cachePolicy": "DEFAULT", + "kerberosRealm": "REALM", + "serverPrincipal": "princ", + "keyTab": "keytab", + "allowPasswordAuthentication": False, + "updateProfileFirstLogin": False, }, } return_value_component_create = [ @@ -114,45 +123,34 @@ def test_create_when_absent(self): "providerType": "org.keycloak.storage.UserStorageProvider", "parentId": "kerberos", "config": { - "serverPrincipal": [ - "princ" - ], - "allowPasswordAuthentication": [ - "false" - ], - "keyTab": [ - "keytab" - ], - "cachePolicy": [ - "DEFAULT" - ], - "updateProfileFirstLogin": [ - "false" - ], - "kerberosRealm": [ - "REALM" - ], - "priority": [ - "0" - ], - "enabled": [ - "true" - ] - } + "serverPrincipal": ["princ"], + "allowPasswordAuthentication": ["false"], + "keyTab": ["keytab"], + "cachePolicy": ["DEFAULT"], + "updateProfileFirstLogin": ["false"], + "kerberosRealm": ["REALM"], + "priority": ["0"], + "enabled": ["true"], + }, } ] # get before_comp, get default_mapper, get after_mapper - return_value_components_get = [ - [], [], [] - ] + return_value_components_get = [[], [], []] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, create_component=return_value_component_create + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -163,30 +161,30 @@ def test_create_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present(self): """Update existing user federation""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'realm': 'realm-name', - 'name': 'kerberos', - 'state': 'present', - 'provider_id': 'kerberos', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'priority': 0, - 'enabled': True, - 'cachePolicy': 'DEFAULT', - 'kerberosRealm': 'REALM', - 'serverPrincipal': 'princ', - 'keyTab': 'keytab', - 'allowPasswordAuthentication': False, - 'updateProfileFirstLogin': False, + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "realm": "realm-name", + "name": "kerberos", + "state": "present", + "provider_id": "kerberos", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "priority": 0, + "enabled": True, + "cachePolicy": "DEFAULT", + "kerberosRealm": "REALM", + "serverPrincipal": "princ", + "keyTab": "keytab", + "allowPasswordAuthentication": False, + "updateProfileFirstLogin": False, }, } return_value_components_get = [ @@ -198,35 +196,19 @@ def test_create_when_present(self): "providerType": "org.keycloak.storage.UserStorageProvider", "parentId": "kerberos", "config": { - "serverPrincipal": [ - "princ" - ], - "allowPasswordAuthentication": [ - "false" - ], - "keyTab": [ - "keytab" - ], - "cachePolicy": [ - "DEFAULT" - ], - "updateProfileFirstLogin": [ - "false" - ], - "kerberosRealm": [ - "REALM" - ], - "priority": [ - "0" - ], - "enabled": [ - "false" - ] - } + "serverPrincipal": ["princ"], + "allowPasswordAuthentication": ["false"], + "keyTab": ["keytab"], + "cachePolicy": ["DEFAULT"], + "updateProfileFirstLogin": ["false"], + "kerberosRealm": ["REALM"], + "priority": ["0"], + "enabled": ["false"], + }, } ], [], - [] + [], ] return_value_component_get = [ { @@ -236,45 +218,35 @@ def test_create_when_present(self): "providerType": "org.keycloak.storage.UserStorageProvider", "parentId": "kerberos", "config": { - "serverPrincipal": [ - "princ" - ], - "allowPasswordAuthentication": [ - "false" - ], - "keyTab": [ - "keytab" - ], - "cachePolicy": [ - "DEFAULT" - ], - "updateProfileFirstLogin": [ - "false" - ], - "kerberosRealm": [ - "REALM" - ], - "priority": [ - "0" - ], - "enabled": [ - "true" - ] - } + "serverPrincipal": ["princ"], + "allowPasswordAuthentication": ["false"], + "keyTab": ["keytab"], + "cachePolicy": ["DEFAULT"], + "updateProfileFirstLogin": ["false"], + "kerberosRealm": ["REALM"], + "priority": ["0"], + "enabled": ["true"], + }, } ] - return_value_component_update = [ - None - ] + return_value_component_update = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, get_component=return_value_component_get, - update_component=return_value_component_update) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, + get_component=return_value_component_get, + update_component=return_value_component_update, + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -285,60 +257,60 @@ def test_create_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_with_mappers(self): """Add a new user federation with mappers""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'realm': 'realm-name', - 'name': 'ldap', - 'state': 'present', - 'provider_id': 'ldap', - 'provider_type': 'org.keycloak.storage.UserStorageProvider', - 'config': { - 'priority': 0, - 'enabled': True, - 'cachePolicy': 'DEFAULT', - 'batchSizeForSync': 1000, - 'editMode': 'READ_ONLY', - 'importEnabled': True, - 'syncRegistrations': False, - 'vendor': 'other', - 'usernameLDAPAttribute': 'uid', - 'rdnLDAPAttribute': 'uid', - 'uuidLDAPAttribute': 'entryUUID', - 'userObjectClasses': 'inetOrgPerson, organizationalPerson', - 'connectionUrl': 'ldaps://ldap.example.com:636', - 'usersDn': 'ou=Users,dc=example,dc=com', - 'authType': 'none', - 'searchScope': 1, - 'validatePasswordPolicy': False, - 'trustEmail': False, - 'useTruststoreSpi': 'ldapsOnly', - 'connectionPooling': True, - 'pagination': True, - 'allowKerberosAuthentication': False, - 'krbPrincipalAttribute': 'krbPrincipalName', - 'debug': False, - 'useKerberosForPasswordAuthentication': False, + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "realm": "realm-name", + "name": "ldap", + "state": "present", + "provider_id": "ldap", + "provider_type": "org.keycloak.storage.UserStorageProvider", + "config": { + "priority": 0, + "enabled": True, + "cachePolicy": "DEFAULT", + "batchSizeForSync": 1000, + "editMode": "READ_ONLY", + "importEnabled": True, + "syncRegistrations": False, + "vendor": "other", + "usernameLDAPAttribute": "uid", + "rdnLDAPAttribute": "uid", + "uuidLDAPAttribute": "entryUUID", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "connectionUrl": "ldaps://ldap.example.com:636", + "usersDn": "ou=Users,dc=example,dc=com", + "authType": "none", + "searchScope": 1, + "validatePasswordPolicy": False, + "trustEmail": False, + "useTruststoreSpi": "ldapsOnly", + "connectionPooling": True, + "pagination": True, + "allowKerberosAuthentication": False, + "krbPrincipalAttribute": "krbPrincipalName", + "debug": False, + "useKerberosForPasswordAuthentication": False, }, - 'mappers': [ + "mappers": [ { - 'name': 'full name', - 'providerId': 'full-name-ldap-mapper', - 'providerType': 'org.keycloak.storage.ldap.mappers.LDAPStorageMapper', - 'config': { - 'ldap.full.name.attribute': 'cn', - 'read.only': True, - 'write.only': False, - } + "name": "full name", + "providerId": "full-name-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", + "config": { + "ldap.full.name.attribute": "cn", + "read.only": True, + "write.only": False, + }, } - ] + ], } return_value_components_get = [ [], @@ -350,13 +322,13 @@ def test_create_with_mappers(self): "is.mandatory.in.ldap": "false", "ldap.attribute": "mail", "read.only": "true", - "user.model.attribute": "email" + "user.model.attribute": "email", }, "id": "77e1763f-c51a-4286-bade-75577d64803c", "name": "email", "parentId": "e5f48aa3-b56b-4983-a8ad-2c7b8b5e77cb", "providerId": "user-attribute-ldap-mapper", - "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", }, ], [ @@ -366,23 +338,11 @@ def test_create_with_mappers(self): "providerId": "full-name-ldap-mapper", "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", "parentId": "eb691537-b73c-4cd8-b481-6031c26499d8", - "config": { - "ldap.full.name.attribute": [ - "cn" - ], - "read.only": [ - "true" - ], - "write.only": [ - "false" - ] - } + "config": {"ldap.full.name.attribute": ["cn"], "read.only": ["true"], "write.only": ["false"]}, } - ] - ] - return_value_component_delete = [ - None + ], ] + return_value_component_delete = [None] return_value_component_create = [ { "id": "eb691537-b73c-4cd8-b481-6031c26499d8", @@ -391,82 +351,32 @@ def test_create_with_mappers(self): "providerType": "org.keycloak.storage.UserStorageProvider", "parentId": "ldap", "config": { - "pagination": [ - "true" - ], - "connectionPooling": [ - "true" - ], - "usersDn": [ - "ou=Users,dc=example,dc=com" - ], - "cachePolicy": [ - "DEFAULT" - ], - "useKerberosForPasswordAuthentication": [ - "false" - ], - "importEnabled": [ - "true" - ], - "enabled": [ - "true" - ], - "krbPrincipalAttribute": [ - "krb5PrincipalName" - ], - "usernameLDAPAttribute": [ - "uid" - ], - "vendor": [ - "other" - ], - "uuidLDAPAttribute": [ - "entryUUID" - ], - "connectionUrl": [ - "ldaps://ldap.example.com:636" - ], - "allowKerberosAuthentication": [ - "false" - ], - "syncRegistrations": [ - "false" - ], - "authType": [ - "none" - ], - "debug": [ - "false" - ], - "searchScope": [ - "1" - ], - "useTruststoreSpi": [ - "ldapsOnly" - ], - "trustEmail": [ - "false" - ], - "priority": [ - "0" - ], - "userObjectClasses": [ - "inetOrgPerson, organizationalPerson" - ], - "rdnLDAPAttribute": [ - "uid" - ], - "editMode": [ - "READ_ONLY" - ], - "validatePasswordPolicy": [ - "false" - ], - "batchSizeForSync": [ - "1000" - ] - } + "pagination": ["true"], + "connectionPooling": ["true"], + "usersDn": ["ou=Users,dc=example,dc=com"], + "cachePolicy": ["DEFAULT"], + "useKerberosForPasswordAuthentication": ["false"], + "importEnabled": ["true"], + "enabled": ["true"], + "krbPrincipalAttribute": ["krb5PrincipalName"], + "usernameLDAPAttribute": ["uid"], + "vendor": ["other"], + "uuidLDAPAttribute": ["entryUUID"], + "connectionUrl": ["ldaps://ldap.example.com:636"], + "allowKerberosAuthentication": ["false"], + "syncRegistrations": ["false"], + "authType": ["none"], + "debug": ["false"], + "searchScope": ["1"], + "useTruststoreSpi": ["ldapsOnly"], + "trustEmail": ["false"], + "priority": ["0"], + "userObjectClasses": ["inetOrgPerson, organizationalPerson"], + "rdnLDAPAttribute": ["uid"], + "editMode": ["READ_ONLY"], + "validatePasswordPolicy": ["false"], + "batchSizeForSync": ["1000"], + }, }, { "id": "2dfadafd-8b34-495f-a98b-153e71a22311", @@ -474,18 +384,8 @@ def test_create_with_mappers(self): "providerId": "full-name-ldap-mapper", "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper", "parentId": "eb691537-b73c-4cd8-b481-6031c26499d8", - "config": { - "ldap.full.name.attribute": [ - "cn" - ], - "read.only": [ - "true" - ], - "write.only": [ - "false" - ] - } - } + "config": {"ldap.full.name.attribute": ["cn"], "read.only": ["true"], "write.only": ["false"]}, + }, ] changed = True @@ -493,8 +393,15 @@ def test_create_with_mappers(self): with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, create_component=return_value_component_create + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -505,31 +412,34 @@ def test_create_with_mappers(self): self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent user federation""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'realm': 'realm-name', - 'name': 'kerberos', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "realm": "realm-name", + "name": "kerberos", + "state": "absent", } - return_value_components_get = [ - [] - ] + return_value_components_get = [[]] changed = False # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api(get_components=return_value_components_get) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -540,19 +450,19 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove an existing user federation""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'realm': 'realm-name', - 'name': 'kerberos', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "realm": "realm-name", + "name": "kerberos", + "state": "absent", } return_value_components_get = [ [ @@ -563,46 +473,35 @@ def test_delete_when_present(self): "providerType": "org.keycloak.storage.UserStorageProvider", "parentId": "kerberos", "config": { - "serverPrincipal": [ - "princ" - ], - "allowPasswordAuthentication": [ - "false" - ], - "keyTab": [ - "keytab" - ], - "cachePolicy": [ - "DEFAULT" - ], - "updateProfileFirstLogin": [ - "false" - ], - "kerberosRealm": [ - "REALM" - ], - "priority": [ - "0" - ], - "enabled": [ - "false" - ] - } + "serverPrincipal": ["princ"], + "allowPasswordAuthentication": ["false"], + "keyTab": ["keytab"], + "cachePolicy": ["DEFAULT"], + "updateProfileFirstLogin": ["false"], + "kerberosRealm": ["REALM"], + "priority": ["0"], + "enabled": ["false"], + }, } ], - [] - ] - return_value_component_delete = [ - None + [], ] + return_value_component_delete = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \ - as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_components_get, delete_component=return_value_component_delete + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -613,8 +512,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_keycloak_userprofile.py b/tests/unit/plugins/modules/test_keycloak_userprofile.py index b6b0122f7b3..7032a2a18d3 100644 --- a/tests/unit/plugins/modules/test_keycloak_userprofile.py +++ b/tests/unit/plugins/modules/test_keycloak_userprofile.py @@ -1,4 +1,3 @@ - # Copyright (c) 2024, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,11 @@ from contextlib import contextmanager from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules import keycloak_userprofile @@ -20,17 +23,24 @@ @contextmanager -def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None): - """Mock context manager for patching the methods in KeycloakAPI - """ +def patch_keycloak_api( + get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None +): + """Mock context manager for patching the methods in KeycloakAPI""" obj = keycloak_userprofile.KeycloakAPI - with patch.object(obj, 'get_components', side_effect=get_components) as mock_get_components: - with patch.object(obj, 'get_component', side_effect=get_component) as mock_get_component: - with patch.object(obj, 'create_component', side_effect=create_component) as mock_create_component: - with patch.object(obj, 'update_component', side_effect=update_component) as mock_update_component: - with patch.object(obj, 'delete_component', side_effect=delete_component) as mock_delete_component: - yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component + with patch.object(obj, "get_components", side_effect=get_components) as mock_get_components: + with patch.object(obj, "get_component", side_effect=get_component) as mock_get_component: + with patch.object(obj, "create_component", side_effect=create_component) as mock_create_component: + with patch.object(obj, "update_component", side_effect=update_component) as mock_update_component: + with patch.object(obj, "delete_component", side_effect=delete_component) as mock_delete_component: + yield ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ) def get_response(object_with_future_response, method, get_id_call_count): @@ -47,9 +57,10 @@ def get_response(object_with_future_response, method, get_id_call_count): def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): url = args[0] - method = kwargs['method'] + method = kwargs["method"] future_response = response_dict.get(url, None) return get_response(future_response, method, get_id_user_count) + return _mocked_requests @@ -57,19 +68,23 @@ def create_wrapper(text_as_string): """Allow to mock many times a call to one address. Without this function, the StringIO is empty for the second call. """ + def _create_wrapper(): return StringIO(text_as_string) + return _create_wrapper def mock_good_connection(): token_response = { - 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), + "http://keycloak.url/auth/realms/master/protocol/openid-connect/token": create_wrapper( + '{"access_token": "alongtoken"}' + ), } return patch( - 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + "ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url", side_effect=build_mocked_request(count(), token_response), - autospec=True + autospec=True, ) @@ -98,118 +113,52 @@ def test_create_when_absent(self): "displayName": "${username}", "multivalued": False, "name": "username", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, "required": None, "validations": { - "length": { - "max": 255, - "min": 3 - }, + "length": {"max": 255, "min": 3}, "up_username_not_idn_homograph": {}, - "username_prohibited_characters": {} - } + "username_prohibited_characters": {}, + }, }, { "annotations": {}, "displayName": "${email}", "multivalued": False, "name": "email", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "email": {}, - "length": { - "max": 255 - } - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"email": {}, "length": {"max": 255}}, }, { "annotations": {}, "displayName": "${firstName}", "multivalued": False, "name": "firstName", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "length": { - "max": 255 - }, - "person_name_prohibited_characters": {} - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"length": {"max": 255}, "person_name_prohibited_characters": {}}, }, { "annotations": {}, "displayName": "${lastName}", "multivalued": False, "name": "lastName", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "length": { - "max": 255 - }, - "person_name_prohibited_characters": {} - } - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"length": {"max": 255}, "person_name_prohibited_characters": {}}, + }, ], "groups": [ { "displayDescription": "Attributes, which refer to user metadata", "displayHeader": "User metadata", - "name": "user-metadata" + "name": "user-metadata", } ], } ] - } + }, } return_value_component_create = [ { @@ -225,110 +174,42 @@ def test_create_when_absent(self): "name": "username", "displayName": "${username}", "validations": { - "length": { - "min": 3, - "max": 255 - }, + "length": {"min": 3, "max": 255}, "username-prohibited-characters": {}, - "up-username-not-idn-homograph": {} - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] + "up-username-not-idn-homograph": {}, }, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, "multivalued": False, "annotations": {}, - "required": None + "required": None, }, { "name": "email", "displayName": "${email}", - "validations": { - "email": {}, - "length": { - "max": 255 - } - }, - "required": { - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, + "validations": {"email": {}, "length": {"max": 255}}, + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, "multivalued": False, - "annotations": {} + "annotations": {}, }, { "name": "firstName", "displayName": "${firstName}", - "validations": { - "length": { - "max": 255 - }, - "person-name-prohibited-characters": {} - }, - "required": { - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, + "validations": {"length": {"max": 255}, "person-name-prohibited-characters": {}}, + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, "multivalued": False, - "annotations": {} + "annotations": {}, }, { "name": "lastName", "displayName": "${lastName}", - "validations": { - "length": { - "max": 255 - }, - "person-name-prohibited-characters": {} - }, - "required": { - - - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, + "validations": {"length": {"max": 255}, "person-name-prohibited-characters": {}}, + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, "multivalued": False, - "annotations": {} - } + "annotations": {}, + }, ], "groups": [ { @@ -339,20 +220,25 @@ def test_create_when_absent(self): ], } ] - } + }, } ] - return_value_get_components_get = [ - [], [] - ] + return_value_get_components_get = [[], []] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, create_component=return_value_component_create) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_get_components_get, create_component=return_value_component_create + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -363,7 +249,7 @@ def test_create_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_create_when_present(self): """Update existing userprofile""" @@ -385,118 +271,52 @@ def test_create_when_present(self): "displayName": "${username}", "multivalued": False, "name": "username", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, "required": None, "validations": { - "length": { - "max": 255, - "min": 3 - }, + "length": {"max": 255, "min": 3}, "up_username_not_idn_homograph": {}, - "username_prohibited_characters": {} - } + "username_prohibited_characters": {}, + }, }, { "annotations": {}, "displayName": "${email}", "multivalued": False, "name": "email", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "email": {}, - "length": { - "max": 255 - } - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"email": {}, "length": {"max": 255}}, }, { "annotations": {}, "displayName": "${firstName}", "multivalued": False, "name": "firstName", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "length": { - "max": 255 - }, - "person_name_prohibited_characters": {} - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"length": {"max": 255}, "person_name_prohibited_characters": {}}, }, { "annotations": {}, "displayName": "${lastName}", "multivalued": False, "name": "lastName", - "permissions": { - "edit": [ - "admin", - "user" - ], - "view": [ - "admin", - "user" - ] - }, - "required": { - "roles": [ - "user" - ] - }, - "validations": { - "length": { - "max": 255 - }, - "person_name_prohibited_characters": {} - } - } + "permissions": {"edit": ["admin", "user"], "view": ["admin", "user"]}, + "required": {"roles": ["user"]}, + "validations": {"length": {"max": 255}, "person_name_prohibited_characters": {}}, + }, ], "groups": [ { "displayDescription": "Attributes, which refer to user metadata", "displayHeader": "User metadata", - "name": "user-metadata" + "name": "user-metadata", } ], } ] - } + }, } return_value_get_components_get = [ [ @@ -507,141 +327,87 @@ def test_create_when_present(self): "providerType": "org.keycloak.userprofile.UserProfileProvider", "config": { "kc.user.profile.config": [ - dumps({ - "attributes": [ - { - "name": "username", - "displayName": "${username}", - "validations": { - "length": { - "min": 3, - "max": 255 + dumps( + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": {"min": 3, "max": 255}, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {}, }, - "username-prohibited-characters": {}, - "up-username-not-idn-homograph": {} - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {}, - "required": None - }, - { - "name": "email", - "displayName": "${email}", - "validations": { - "email": {}, - "length": { - "max": 255 - } + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, + "required": None, }, - "required": { - "roles": [ - "user" - ] + { + "name": "email", + "displayName": "${email}", + "validations": {"email": {}, "length": {"max": 255}}, + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {} - }, - { - "name": "firstName", - "displayName": "${firstName}", - "validations": { - "length": { - "max": 255 + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": {"max": 255}, + "person-name-prohibited-characters": {}, }, - "person-name-prohibited-characters": {} - }, - "required": { - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "multivalued": False, - "annotations": {} - }, - { - "name": "lastName", - "displayName": "${lastName}", - "validations": { - "length": { - "max": 255 + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": {"max": 255}, + "person-name-prohibited-characters": {}, }, - "person-name-prohibited-characters": {} - }, - "required": { - "roles": [ - "user" - ] + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {} - } - ], - "groups": [ - { - "name": "user-metadata", - "displayHeader": "User metadata", - "displayDescription": "Attributes, which refer to user metadata", - } - ], - }) + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ) ] - } + }, } ], - [] - ] - return_value_component_update = [ - None + [], ] + return_value_component_update = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, - update_component=return_value_component_update) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_get_components_get, update_component=return_value_component_update + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -652,23 +418,21 @@ def test_create_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_absent(self): """Remove an absent userprofile""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'provider_id': 'declarative-user-profile', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "provider_id": "declarative-user-profile", + "state": "absent", } - return_value_get_components_get = [ - [] - ] + return_value_get_components_get = [[]] changed = False # Run the module @@ -676,7 +440,12 @@ def test_delete_when_absent(self): with set_module_args(module_args): with mock_good_connection(): with patch_keycloak_api(get_components=return_value_get_components_get) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -687,19 +456,19 @@ def test_delete_when_absent(self): self.assertEqual(len(mock_delete_component.mock_calls), 0) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) def test_delete_when_present(self): """Remove an existing userprofile""" module_args = { - 'auth_keycloak_url': 'http://keycloak.url/auth', - 'auth_realm': 'master', - 'auth_username': 'admin', - 'auth_password': 'admin', - 'parent_id': 'realm-name', - 'provider_id': 'declarative-user-profile', - 'state': 'absent', + "auth_keycloak_url": "http://keycloak.url/auth", + "auth_realm": "master", + "auth_username": "admin", + "auth_password": "admin", + "parent_id": "realm-name", + "provider_id": "declarative-user-profile", + "state": "absent", } return_value_get_components_get = [ [ @@ -710,140 +479,87 @@ def test_delete_when_present(self): "providerType": "org.keycloak.userprofile.UserProfileProvider", "config": { "kc.user.profile.config": [ - dumps({ - "attributes": [ - { - "name": "username", - "displayName": "${username}", - "validations": { - "length": { - "min": 3, - "max": 255 + dumps( + { + "attributes": [ + { + "name": "username", + "displayName": "${username}", + "validations": { + "length": {"min": 3, "max": 255}, + "username-prohibited-characters": {}, + "up-username-not-idn-homograph": {}, }, - "username-prohibited-characters": {}, - "up-username-not-idn-homograph": {} + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, + "required": None, }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] + { + "name": "email", + "displayName": "${email}", + "validations": {"email": {}, "length": {"max": 255}}, + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "multivalued": False, - "annotations": {}, - "required": None - }, - { - "name": "email", - "displayName": "${email}", - "validations": { - "email": {}, - "length": { - "max": 255 - } - }, - "required": { - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {} - }, - { - "name": "firstName", - "displayName": "${firstName}", - "validations": { - "length": { - "max": 255 + { + "name": "firstName", + "displayName": "${firstName}", + "validations": { + "length": {"max": 255}, + "person-name-prohibited-characters": {}, }, - "person-name-prohibited-characters": {} - }, - "required": { - "roles": [ - "user" - ] + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {} - }, - { - "name": "lastName", - "displayName": "${lastName}", - "validations": { - "length": { - "max": 255 + { + "name": "lastName", + "displayName": "${lastName}", + "validations": { + "length": {"max": 255}, + "person-name-prohibited-characters": {}, }, - "person-name-prohibited-characters": {} + "required": {"roles": ["user"]}, + "permissions": {"view": ["admin", "user"], "edit": ["admin", "user"]}, + "multivalued": False, + "annotations": {}, }, - "required": { - "roles": [ - "user" - ] - }, - "permissions": { - "view": [ - "admin", - "user" - ], - "edit": [ - "admin", - "user" - ] - }, - "multivalued": False, - "annotations": {} - } - ], - "groups": [ - { - "name": "user-metadata", - "displayHeader": "User metadata", - "displayDescription": "Attributes, which refer to user metadata", - } - ], - }) + ], + "groups": [ + { + "name": "user-metadata", + "displayHeader": "User metadata", + "displayDescription": "Attributes, which refer to user metadata", + } + ], + } + ) ] - } + }, } ], - [] - ] - return_value_component_delete = [ - None + [], ] + return_value_component_delete = [None] changed = True # Run the module with set_module_args(module_args): with mock_good_connection(): - with patch_keycloak_api(get_components=return_value_get_components_get, delete_component=return_value_component_delete) as ( - mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component): + with patch_keycloak_api( + get_components=return_value_get_components_get, delete_component=return_value_component_delete + ) as ( + mock_get_components, + mock_get_component, + mock_create_component, + mock_update_component, + mock_delete_component, + ): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() @@ -854,8 +570,8 @@ def test_delete_when_present(self): self.assertEqual(len(mock_delete_component.mock_calls), 1) # Verify that the module's changed status matches what is expected - self.assertIs(exec_info.exception.args[0]['changed'], changed) + self.assertIs(exec_info.exception.args[0]["changed"], changed) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_linode.py b/tests/unit/plugins/modules/test_linode.py index 2b53d752b02..3c911a1992f 100644 --- a/tests/unit/plugins/modules/test_linode.py +++ b/tests/unit/plugins/modules/test_linode.py @@ -12,7 +12,7 @@ from .linode_conftest import api_key, auth # noqa: F401, pylint: disable=unused-import if not linode.HAS_LINODE: - pytestmark = pytest.mark.skip('test_linode.py requires the `linode-python` module') + pytestmark = pytest.mark.skip("test_linode.py requires the `linode-python` module") def test_name_is_a_required_parameter(api_key, auth): diff --git a/tests/unit/plugins/modules/test_linode_v4.py b/tests/unit/plugins/modules/test_linode_v4.py index 6f0601814f2..a1fe56cbf21 100644 --- a/tests/unit/plugins/modules/test_linode_v4.py +++ b/tests/unit/plugins/modules/test_linode_v4.py @@ -10,7 +10,7 @@ import pytest -linode_apiv4 = pytest.importorskip('linode_api4') +linode_apiv4 = pytest.importorskip("linode_api4") from linode_api4.errors import ApiError as LinodeApiError from linode_api4 import LinodeClient @@ -24,31 +24,29 @@ def test_mandatory_state_is_validated(capfd): with pytest.raises(SystemExit): - with set_module_args({'label': 'foo'}): + with set_module_args({"label": "foo"}): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert all(txt in results['msg'] for txt in ('state', 'required')) - assert results['failed'] is True + assert all(txt in results["msg"] for txt in ("state", "required")) + assert results["failed"] is True def test_mandatory_label_is_validated(capfd): with pytest.raises(SystemExit): - with set_module_args({'state': 'present'}): + with set_module_args({"state": "present"}): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert all(txt in results['msg'] for txt in ('label', 'required')) - assert results['failed'] is True + assert all(txt in results["msg"] for txt in ("label", "required")) + assert results["failed"] is True -def test_mandatory_access_token_is_validated(default_args, - no_access_token_in_env, - capfd): +def test_mandatory_access_token_is_validated(default_args, no_access_token_in_env, capfd): with pytest.raises(SystemExit): with set_module_args(default_args): linode_v4.initialise_module() @@ -56,65 +54,62 @@ def test_mandatory_access_token_is_validated(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert all(txt in results['msg'] for txt in ( - 'missing', - 'required', - 'access_token', - )) + assert results["failed"] is True + assert all( + txt in results["msg"] + for txt in ( + "missing", + "required", + "access_token", + ) + ) -def test_mandatory_access_token_passed_in_env(default_args, - access_token): +def test_mandatory_access_token_passed_in_env(default_args, access_token): with set_module_args(default_args): - try: module = linode_v4.initialise_module() except SystemExit: pytest.fail("'access_token' is passed in environment") - now_set_token = module.params['access_token'] - assert now_set_token == os.environ['LINODE_ACCESS_TOKEN'] + now_set_token = module.params["access_token"] + assert now_set_token == os.environ["LINODE_ACCESS_TOKEN"] -def test_mandatory_access_token_passed_in_as_parameter(default_args, - no_access_token_in_env): - default_args.update({'access_token': 'foo'}) +def test_mandatory_access_token_passed_in_as_parameter(default_args, no_access_token_in_env): + default_args.update({"access_token": "foo"}) with set_module_args(default_args): - try: module = linode_v4.initialise_module() except SystemExit: pytest.fail("'access_token' is passed in as parameter") - assert module.params['access_token'] == 'foo' + assert module.params["access_token"] == "foo" -def test_instance_by_label_cannot_authenticate(capfd, access_token, - default_args): +def test_instance_by_label_cannot_authenticate(capfd, access_token, default_args): with set_module_args(default_args): module = linode_v4.initialise_module() - client = LinodeClient(module.params['access_token']) + client = LinodeClient(module.params["access_token"]) - target = 'linode_api4.linode_client.LinodeGroup.instances' - with mock.patch(target, side_effect=LinodeApiError('foo')): + target = "linode_api4.linode_client.LinodeGroup.instances" + with mock.patch(target, side_effect=LinodeApiError("foo")): with pytest.raises(SystemExit): linode_v4.maybe_instance_from_label(module, client) out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert 'Unable to query the Linode API' in results['msg'] + assert results["failed"] is True + assert "Unable to query the Linode API" in results["msg"] -def test_no_instances_found_with_label_gives_none(default_args, - access_token): +def test_no_instances_found_with_label_gives_none(default_args, access_token): with set_module_args(default_args): module = linode_v4.initialise_module() - client = LinodeClient(module.params['access_token']) + client = LinodeClient(module.params["access_token"]) - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[]): result = linode_v4.maybe_instance_from_label(module, client) @@ -122,120 +117,93 @@ def test_no_instances_found_with_label_gives_none(default_args, def test_optional_region_is_validated(default_args, capfd, access_token): - default_args.update({'type': 'foo', 'image': 'bar'}) + default_args.update({"type": "foo", "image": "bar"}) with set_module_args(default_args): - with pytest.raises(SystemExit): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert all(txt in results['msg'] for txt in ( - 'required', - 'together', - 'region' - )) + assert results["failed"] is True + assert all(txt in results["msg"] for txt in ("required", "together", "region")) def test_optional_type_is_validated(default_args, capfd, access_token): - default_args.update({'region': 'foo', 'image': 'bar'}) + default_args.update({"region": "foo", "image": "bar"}) with set_module_args(default_args): - with pytest.raises(SystemExit): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert all(txt in results['msg'] for txt in ( - 'required', - 'together', - 'type' - )) + assert results["failed"] is True + assert all(txt in results["msg"] for txt in ("required", "together", "type")) def test_optional_image_is_validated(default_args, capfd, access_token): - default_args.update({'type': 'foo', 'region': 'bar'}) + default_args.update({"type": "foo", "region": "bar"}) with set_module_args(default_args): - with pytest.raises(SystemExit): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert all(txt in results['msg'] for txt in ( - 'required', - 'together', - 'image' - )) + assert results["failed"] is True + assert all(txt in results["msg"] for txt in ("required", "together", "image")) -@pytest.mark.parametrize('value', [True, False]) +@pytest.mark.parametrize("value", [True, False]) def test_private_ip_valid_values(default_args, access_token, value): - default_args.update({'private_ip': value}) + default_args.update({"private_ip": value}) with set_module_args(default_args): - module = linode_v4.initialise_module() - assert module.params['private_ip'] is value + assert module.params["private_ip"] is value -@pytest.mark.parametrize('value', ['not-a-bool', 42]) +@pytest.mark.parametrize("value", ["not-a-bool", 42]) def test_private_ip_invalid_values(default_args, capfd, access_token, value): - default_args.update({'private_ip': value}) + default_args.update({"private_ip": value}) with set_module_args(default_args): - with pytest.raises(SystemExit): linode_v4.initialise_module() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] is True - assert 'not a valid boolean' in results['msg'] + assert results["failed"] is True + assert "not a valid boolean" in results["msg"] def test_private_ip_default_value(default_args, access_token): - default_args.pop('private_ip', None) + default_args.pop("private_ip", None) with set_module_args(default_args): - module = linode_v4.initialise_module() - assert module.params['private_ip'] is False + assert module.params["private_ip"] is False def test_private_ip_is_forwarded_to_linode(default_args, mock_linode, access_token): - default_args.update({'private_ip': True}) + default_args.update({"private_ip": True}) with set_module_args(default_args): - - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[]): with pytest.raises(SystemExit): - target = 'linode_api4.linode_client.LinodeGroup.instance_create' - with mock.patch(target, return_value=(mock_linode, 'passw0rd')) as instance_create_mock: + target = "linode_api4.linode_client.LinodeGroup.instance_create" + with mock.patch(target, return_value=(mock_linode, "passw0rd")) as instance_create_mock: linode_v4.main() args, kwargs = instance_create_mock.call_args - assert kwargs['private_ip'] is True - - -def test_instance_already_created(default_args, - mock_linode, - capfd, - access_token): - default_args.update({ - 'type': 'foo', - 'region': 'bar', - 'image': 'baz' - }) - with set_module_args(default_args): + assert kwargs["private_ip"] is True - target = 'linode_api4.linode_client.LinodeGroup.instances' + +def test_instance_already_created(default_args, mock_linode, capfd, access_token): + default_args.update({"type": "foo", "region": "bar", "image": "baz"}) + with set_module_args(default_args): + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[mock_linode]): with pytest.raises(SystemExit) as sys_exit_exc: linode_v4.main() @@ -245,30 +213,19 @@ def test_instance_already_created(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['changed'] is False - assert 'root_password' not in results['instance'] - assert ( - results['instance']['label'] == - mock_linode._raw_json['label'] - ) + assert results["changed"] is False + assert "root_password" not in results["instance"] + assert results["instance"]["label"] == mock_linode._raw_json["label"] -def test_instance_to_be_created_without_root_pass(default_args, - mock_linode, - capfd, - access_token): - default_args.update({ - 'type': 'foo', - 'region': 'bar', - 'image': 'baz' - }) +def test_instance_to_be_created_without_root_pass(default_args, mock_linode, capfd, access_token): + default_args.update({"type": "foo", "region": "bar", "image": "baz"}) with set_module_args(default_args): - - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[]): with pytest.raises(SystemExit) as sys_exit_exc: - target = 'linode_api4.linode_client.LinodeGroup.instance_create' - with mock.patch(target, return_value=(mock_linode, 'passw0rd')): + target = "linode_api4.linode_client.LinodeGroup.instance_create" + with mock.patch(target, return_value=(mock_linode, "passw0rd")): linode_v4.main() assert sys_exit_exc.value.code == 0 @@ -276,30 +233,25 @@ def test_instance_to_be_created_without_root_pass(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['changed'] is True - assert ( - results['instance']['label'] == - mock_linode._raw_json['label'] + assert results["changed"] is True + assert results["instance"]["label"] == mock_linode._raw_json["label"] + assert results["instance"]["root_pass"] == "passw0rd" + + +def test_instance_to_be_created_with_root_pass(default_args, mock_linode, capfd, access_token): + default_args.update( + { + "type": "foo", + "region": "bar", + "image": "baz", + "root_pass": "passw0rd", + } ) - assert results['instance']['root_pass'] == 'passw0rd' - - -def test_instance_to_be_created_with_root_pass(default_args, - mock_linode, - capfd, - access_token): - default_args.update({ - 'type': 'foo', - 'region': 'bar', - 'image': 'baz', - 'root_pass': 'passw0rd', - }) with set_module_args(default_args): - - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[]): with pytest.raises(SystemExit) as sys_exit_exc: - target = 'linode_api4.linode_client.LinodeGroup.instance_create' + target = "linode_api4.linode_client.LinodeGroup.instance_create" with mock.patch(target, return_value=mock_linode): linode_v4.main() @@ -308,22 +260,15 @@ def test_instance_to_be_created_with_root_pass(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['changed'] is True - assert ( - results['instance']['label'] == - mock_linode._raw_json['label'] - ) - assert 'root_pass' not in results['instance'] + assert results["changed"] is True + assert results["instance"]["label"] == mock_linode._raw_json["label"] + assert "root_pass" not in results["instance"] -def test_instance_to_be_deleted(default_args, - mock_linode, - capfd, - access_token): - default_args.update({'state': 'absent'}) +def test_instance_to_be_deleted(default_args, mock_linode, capfd, access_token): + default_args.update({"state": "absent"}) with set_module_args(default_args): - - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[mock_linode]): with pytest.raises(SystemExit) as sys_exit_exc: linode_v4.main() @@ -333,21 +278,14 @@ def test_instance_to_be_deleted(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['changed'] is True - assert ( - results['instance']['label'] == - mock_linode._raw_json['label'] - ) + assert results["changed"] is True + assert results["instance"]["label"] == mock_linode._raw_json["label"] -def test_instance_already_deleted_no_change(default_args, - mock_linode, - capfd, - access_token): - default_args.update({'state': 'absent'}) +def test_instance_already_deleted_no_change(default_args, mock_linode, capfd, access_token): + default_args.update({"state": "absent"}) with set_module_args(default_args): - - target = 'linode_api4.linode_client.LinodeGroup.instances' + target = "linode_api4.linode_client.LinodeGroup.instances" with mock.patch(target, return_value=[]): with pytest.raises(SystemExit) as sys_exit_exc: linode_v4.main() @@ -357,17 +295,15 @@ def test_instance_already_deleted_no_change(default_args, out, err = capfd.readouterr() results = json.loads(out) - assert results['changed'] is False - assert results['instance'] == {} + assert results["changed"] is False + assert results["instance"] == {} def test_user_agent_created_properly(): try: - from ansible.module_utils.ansible_release import ( - __version__ as ansible_version - ) + from ansible.module_utils.ansible_release import __version__ as ansible_version except ImportError: - ansible_version = 'unknown' + ansible_version = "unknown" - expected_user_agent = f'Ansible-linode_v4_module/{ansible_version}' - assert expected_user_agent == get_user_agent('linode_v4_module') + expected_user_agent = f"Ansible-linode_v4_module/{ansible_version}" + assert expected_user_agent == get_user_agent("linode_v4_module") diff --git a/tests/unit/plugins/modules/test_lvg_rename.py b/tests/unit/plugins/modules/test_lvg_rename.py index ff4a7d6df12..53a47f78fe2 100644 --- a/tests/unit/plugins/modules/test_lvg_rename.py +++ b/tests/unit/plugins/modules/test_lvg_rename.py @@ -7,19 +7,24 @@ from unittest.mock import patch from ansible_collections.community.general.plugins.modules import lvg_rename from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args) + AnsibleFailJson, + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) -VGS_OUTPUT = '''\ +VGS_OUTPUT = """\ vg_data_testhost1;XKZ5gn-YhWY-NlrT-QCFN-qmMG-VGT9-7uOmex vg_sys_testhost2;xgy2SJ-YlYd-fde2-e3oG-zdXL-0xGf-ihqG2H -''' +""" class TestLvgRename(ModuleTestCase): """Tests for lvg_rename internals""" + module = lvg_rename - module_path = 'ansible_collections.community.general.plugins.modules.lvg_rename' + module_path = "ansible_collections.community.general.plugins.modules.lvg_rename" def setUp(self): """Prepare mocks for module testing""" @@ -27,132 +32,128 @@ def setUp(self): self.mock_run_responses = {} - patched_module_get_bin_path = patch(f'{self.module_path}.AnsibleModule.get_bin_path') + patched_module_get_bin_path = patch(f"{self.module_path}.AnsibleModule.get_bin_path") self.mock_module_get_bin_path = patched_module_get_bin_path.start() - self.mock_module_get_bin_path.return_value = '/mocpath' + self.mock_module_get_bin_path.return_value = "/mocpath" self.addCleanup(patched_module_get_bin_path.stop) - patched_module_run_command = patch(f'{self.module_path}.AnsibleModule.run_command') + patched_module_run_command = patch(f"{self.module_path}.AnsibleModule.run_command") self.mock_module_run_command = patched_module_run_command.start() self.addCleanup(patched_module_run_command.stop) def test_vg_not_found_by_name(self): """When the VG by the specified by vg name not found, the module should exit with error""" failed = True - self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, '')] - expected_msg = 'Both current (vg_missing) and new (vg_data_testhost2) VG are missing.' + self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, "")] + expected_msg = "Both current (vg_missing) and new (vg_data_testhost2) VG are missing." module_args = { - 'vg': 'vg_missing', - 'vg_new': 'vg_data_testhost2', + "vg": "vg_missing", + "vg_new": "vg_data_testhost2", } with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) - self.assertIs(result.exception.args[0]['failed'], failed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["failed"], failed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) def test_vg_not_found_by_uuid(self): """When the VG by the specified vg UUID not found, the module should exit with error""" failed = True - self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, '')] - expected_msg = 'Both current (Yfj4YG-c8nI-z7w5-B7Fw-i2eM-HqlF-ApFVp0) and new (vg_data_testhost2) VG are missing.' + self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, "")] + expected_msg = ( + "Both current (Yfj4YG-c8nI-z7w5-B7Fw-i2eM-HqlF-ApFVp0) and new (vg_data_testhost2) VG are missing." + ) module_args = { - 'vg': 'Yfj4YG-c8nI-z7w5-B7Fw-i2eM-HqlF-ApFVp0', - 'vg_new': 'vg_data_testhost2', + "vg": "Yfj4YG-c8nI-z7w5-B7Fw-i2eM-HqlF-ApFVp0", + "vg_new": "vg_data_testhost2", } with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) - self.assertIs(result.exception.args[0]['failed'], failed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["failed"], failed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) def test_vg_and_vg_new_both_exists(self): """When a VG found for both vg and vg_new options, the module should exit with error""" failed = True - self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, '')] - expected_msg = 'The new VG name (vg_sys_testhost2) is already in use.' + self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, "")] + expected_msg = "The new VG name (vg_sys_testhost2) is already in use." module_args = { - 'vg': 'vg_data_testhost1', - 'vg_new': 'vg_sys_testhost2', + "vg": "vg_data_testhost1", + "vg_new": "vg_sys_testhost2", } with set_module_args(args=module_args): - with self.assertRaises(AnsibleFailJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) - self.assertIs(result.exception.args[0]['failed'], failed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["failed"], failed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) def test_vg_needs_renaming(self): """When the VG found for vg option and there is no VG for vg_new option, - the module should call vgrename""" + the module should call vgrename""" changed = True self.mock_module_run_command.side_effect = [ - (0, VGS_OUTPUT, ''), - (0, ' Volume group "vg_data_testhost1" successfully renamed to "vg_data_testhost2"', '') + (0, VGS_OUTPUT, ""), + (0, ' Volume group "vg_data_testhost1" successfully renamed to "vg_data_testhost2"', ""), ] expected_msg = ' Volume group "vg_data_testhost1" successfully renamed to "vg_data_testhost2"' module_args = { - 'vg': '/dev/vg_data_testhost1', - 'vg_new': 'vg_data_testhost2', + "vg": "/dev/vg_data_testhost1", + "vg_new": "vg_data_testhost2", } with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 2) - self.assertIs(result.exception.args[0]['changed'], changed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["changed"], changed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) def test_vg_needs_renaming_in_check_mode(self): """When running in check mode and the VG found for vg option and there is no VG for vg_new option, - the module should not call vgrename""" + the module should not call vgrename""" changed = True - self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, '')] - expected_msg = 'Running in check mode. The module would rename VG /dev/vg_data_testhost1 to vg_data_testhost2.' + self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, "")] + expected_msg = "Running in check mode. The module would rename VG /dev/vg_data_testhost1 to vg_data_testhost2." module_args = { - 'vg': '/dev/vg_data_testhost1', - 'vg_new': 'vg_data_testhost2', - '_ansible_check_mode': True, + "vg": "/dev/vg_data_testhost1", + "vg_new": "vg_data_testhost2", + "_ansible_check_mode": True, } with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) - self.assertIs(result.exception.args[0]['changed'], changed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["changed"], changed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) def test_vg_needs_no_renaming(self): """When the VG not found for vg option and the VG found for vg_new option, - the module should not call vgrename""" + the module should not call vgrename""" changed = False - self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, '')] - expected_msg = 'The new VG (vg_data_testhost1) already exists, nothing to do.' + self.mock_module_run_command.side_effect = [(0, VGS_OUTPUT, "")] + expected_msg = "The new VG (vg_data_testhost1) already exists, nothing to do." module_args = { - 'vg': 'vg_data_testhostX', - 'vg_new': 'vg_data_testhost1', + "vg": "vg_data_testhostX", + "vg_new": "vg_data_testhost1", } with set_module_args(args=module_args): - with self.assertRaises(AnsibleExitJson) as result: self.module.main() self.assertEqual(len(self.mock_module_run_command.mock_calls), 1) - self.assertIs(result.exception.args[0]['changed'], changed) - self.assertEqual(result.exception.args[0]['msg'], expected_msg) + self.assertIs(result.exception.args[0]["changed"], changed) + self.assertEqual(result.exception.args[0]["msg"], expected_msg) diff --git a/tests/unit/plugins/modules/test_lxca_cmms.py b/tests/unit/plugins/modules/test_lxca_cmms.py index 0dbf39c17f1..0b59df5b785 100644 --- a/tests/unit/plugins/modules/test_lxca_cmms.py +++ b/tests/unit/plugins/modules/test_lxca_cmms.py @@ -11,35 +11,41 @@ from ansible_collections.community.general.plugins.modules import lxca_cmms -@pytest.fixture(scope='module') -@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True) +@pytest.fixture(scope="module") +@mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn", autospec=True +) def setup_module(close_conn): close_conn.return_value = True -class TestMyModule(): - @pytest.mark.parametrize('patch_ansible_module', - [ - {}, - { - "auth_url": "https://10.240.14.195", - "login_user": "USERID", - }, - { - "auth_url": "https://10.240.14.195", - "login_password": "Password", - }, - { - "login_user": "USERID", - "login_password": "Password", - }, - ], - indirect=['patch_ansible_module']) - @pytest.mark.usefixtures('patch_ansible_module') - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module', autospec=True) - def test_without_required_parameters(self, _setup_conn, _execute_module, - mocker, capfd, setup_module): +class TestMyModule: + @pytest.mark.parametrize( + "patch_ansible_module", + [ + {}, + { + "auth_url": "https://10.240.14.195", + "login_user": "USERID", + }, + { + "auth_url": "https://10.240.14.195", + "login_password": "Password", + }, + { + "login_user": "USERID", + "login_password": "Password", + }, + ], + indirect=["patch_ansible_module"], + ) + @pytest.mark.usefixtures("patch_ansible_module") + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module", autospec=True) + def test_without_required_parameters(self, _setup_conn, _execute_module, mocker, capfd, setup_module): """Failure must occurs when all parameters are missing""" with pytest.raises(SystemExit): _setup_conn.return_value = "Fake connection" @@ -47,18 +53,20 @@ def test_without_required_parameters(self, _setup_conn, _execute_module, lxca_cmms.main() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] - assert 'missing required arguments' in results['msg'] + assert results["failed"] + assert "missing required arguments" in results["msg"] - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule', autospec=True) + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module", autospec=True) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule", autospec=True) def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module): expected_arguments_spec = dict( login_user=dict(required=True), login_password=dict(required=True, no_log=True), - command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', - 'cmms_by_chassis_uuid']), + command_options=dict(default="cmms", choices=["cmms", "cmms_by_uuid", "cmms_by_chassis_uuid"]), auth_url=dict(required=True), uuid=dict(), chassis=dict(), @@ -74,14 +82,14 @@ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, set } mod_obj.params = args lxca_cmms.main() - assert mock.call(argument_spec=expected_arguments_spec, - supports_check_mode=False) == ansible_mod_cls.call_args + assert mock.call(argument_spec=expected_arguments_spec, supports_check_mode=False) == ansible_mod_cls.call_args - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms._cmms_by_uuid', - autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule', - autospec=True) + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_cmms._cmms_by_uuid", autospec=True) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule", autospec=True) def test__cmms_empty_list(self, ansible_mod_cls, _get_cmms, _setup_conn, setup_module): mod_obj = ansible_mod_cls.return_value args = { diff --git a/tests/unit/plugins/modules/test_lxca_nodes.py b/tests/unit/plugins/modules/test_lxca_nodes.py index 42538862047..41a0c48569e 100644 --- a/tests/unit/plugins/modules/test_lxca_nodes.py +++ b/tests/unit/plugins/modules/test_lxca_nodes.py @@ -11,35 +11,41 @@ from ansible_collections.community.general.plugins.modules import lxca_nodes -@pytest.fixture(scope='module') -@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True) +@pytest.fixture(scope="module") +@mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn", autospec=True +) def setup_module(close_conn): close_conn.return_value = True -class TestMyModule(): - @pytest.mark.parametrize('patch_ansible_module', - [ - {}, - { - "auth_url": "https://10.240.14.195", - "login_user": "USERID", - }, - { - "auth_url": "https://10.240.14.195", - "login_password": "Password", - }, - { - "login_user": "USERID", - "login_password": "Password", - }, - ], - indirect=['patch_ansible_module']) - @pytest.mark.usefixtures('patch_ansible_module') - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module', autospec=True) - def test_without_required_parameters(self, _setup_conn, _execute_module, - mocker, capfd, setup_module): +class TestMyModule: + @pytest.mark.parametrize( + "patch_ansible_module", + [ + {}, + { + "auth_url": "https://10.240.14.195", + "login_user": "USERID", + }, + { + "auth_url": "https://10.240.14.195", + "login_password": "Password", + }, + { + "login_user": "USERID", + "login_password": "Password", + }, + ], + indirect=["patch_ansible_module"], + ) + @pytest.mark.usefixtures("patch_ansible_module") + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module", autospec=True) + def test_without_required_parameters(self, _setup_conn, _execute_module, mocker, capfd, setup_module): """Failure must occurs when all parameters are missing""" with pytest.raises(SystemExit): _setup_conn.return_value = "Fake connection" @@ -47,20 +53,29 @@ def test_without_required_parameters(self, _setup_conn, _execute_module, lxca_nodes.main() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] - assert 'missing required arguments' in results['msg'] + assert results["failed"] + assert "missing required arguments" in results["msg"] - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule', autospec=True) + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module", autospec=True) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule", autospec=True) def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module): expected_arguments_spec = dict( login_user=dict(required=True), login_password=dict(required=True, no_log=True), - command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', - 'nodes_by_chassis_uuid', - 'nodes_status_managed', - 'nodes_status_unmanaged']), + command_options=dict( + default="nodes", + choices=[ + "nodes", + "nodes_by_uuid", + "nodes_by_chassis_uuid", + "nodes_status_managed", + "nodes_status_unmanaged", + ], + ), auth_url=dict(required=True), uuid=dict(), chassis=dict(), @@ -76,14 +91,14 @@ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, set } mod_obj.params = args lxca_nodes.main() - assert mock.call(argument_spec=expected_arguments_spec, - supports_check_mode=False) == ansible_mod_cls.call_args + assert mock.call(argument_spec=expected_arguments_spec, supports_check_mode=False) == ansible_mod_cls.call_args - @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes._nodes_by_uuid', - autospec=True) - @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule', - autospec=True) + @mock.patch( + "ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn", + autospec=True, + ) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_nodes._nodes_by_uuid", autospec=True) + @mock.patch("ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule", autospec=True) def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module): mod_obj = ansible_mod_cls.return_value args = { diff --git a/tests/unit/plugins/modules/test_macports.py b/tests/unit/plugins/modules/test_macports.py index 2c4acb447ea..84fb5c86fec 100644 --- a/tests/unit/plugins/modules/test_macports.py +++ b/tests/unit/plugins/modules/test_macports.py @@ -11,15 +11,35 @@ TESTED_MODULE = macports.__name__ QUERY_PORT_TEST_CASES = [ - pytest.param('', False, False, id='Not installed'), - pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28', True, False, id='Installed but not active'), - pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)', True, True, id='Installed and active'), - pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 + pytest.param("", False, False, id="Not installed"), + pytest.param( + " git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28", + True, + False, + id="Installed but not active", + ), + pytest.param( + " git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)", + True, + True, + id="Installed and active", + ), + pytest.param( + """ git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 -''', True, False, id='2 versions installed, neither active'), - pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active) +""", + True, + False, + id="2 versions installed, neither active", + ), + pytest.param( + """ git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active) git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 -''', True, True, id='2 versions installed, one active'), +""", + True, + True, + id="2 versions installed, one active", + ), ] @@ -27,8 +47,8 @@ def test_macports_query_port(mocker, run_cmd_return_val, present_expected, active_expected): module = mocker.Mock() run_command = mocker.Mock() - run_command.return_value = (0, run_cmd_return_val, '') + run_command.return_value = (0, run_cmd_return_val, "") module.run_command = run_command - assert macports.query_port(module, 'port', 'git', state="present") == present_expected - assert macports.query_port(module, 'port', 'git', state="active") == active_expected + assert macports.query_port(module, "port", "git", state="present") == present_expected + assert macports.query_port(module, "port", "git", state="active") == active_expected diff --git a/tests/unit/plugins/modules/test_maven_artifact.py b/tests/unit/plugins/modules/test_maven_artifact.py index 39a8d110904..1bdaccde3e1 100644 --- a/tests/unit/plugins/modules/test_maven_artifact.py +++ b/tests/unit/plugins/modules/test_maven_artifact.py @@ -9,7 +9,7 @@ from ansible.module_utils import basic -pytestmark = pytest.mark.usefixtures('patch_ansible_module') +pytestmark = pytest.mark.usefixtures("patch_ansible_module") maven_metadata_example = b""" @@ -52,16 +52,21 @@ """ -@pytest.mark.parametrize('patch_ansible_module, version_by_spec, version_choosed', [ - (None, "(,3.9]", "3.8.2"), - (None, "3.0", "3.8.2"), - (None, "[3.7]", "3.7"), - (None, "[4.10, 4.12]", "4.12"), - (None, "[4.10, 4.12)", "4.11"), - (None, "[2.0,)", "4.13-beta-2"), -]) +@pytest.mark.parametrize( + "patch_ansible_module, version_by_spec, version_choosed", + [ + (None, "(,3.9]", "3.8.2"), + (None, "3.0", "3.8.2"), + (None, "[3.7]", "3.7"), + (None, "[4.10, 4.12]", "4.12"), + (None, "[4.10, 4.12)", "4.11"), + (None, "[2.0,)", "4.13-beta-2"), + ], +) def test_find_version_by_spec(mocker, version_by_spec, version_choosed): - _getContent = mocker.patch('ansible_collections.community.general.plugins.modules.maven_artifact.MavenDownloader._getContent') + _getContent = mocker.patch( + "ansible_collections.community.general.plugins.modules.maven_artifact.MavenDownloader._getContent" + ) _getContent.return_value = maven_metadata_example artifact = maven_artifact.Artifact("junit", "junit", None, version_by_spec, "jar") diff --git a/tests/unit/plugins/modules/test_modprobe.py b/tests/unit/plugins/modules/test_modprobe.py index ff288c441db..2cd879c93a3 100644 --- a/tests/unit/plugins/modules/test_modprobe.py +++ b/tests/unit/plugins/modules/test_modprobe.py @@ -6,7 +6,10 @@ import sys from unittest.mock import patch, Mock, mock_open -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules.modprobe import Modprobe, build_module @@ -15,10 +18,10 @@ def setUp(self): super().setUp() self.mock_module_loaded = patch( - 'ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded' + "ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded" ) - self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.module_loaded = self.mock_module_loaded.start() self.run_command = self.mock_run_command.start() @@ -32,43 +35,47 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_load_module_success(self): - with set_module_args(dict( - name='test', - state='present', - )): + with set_module_args( + dict( + name="test", + state="present", + ) + ): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] self.module_loaded.side_effect = [True] - self.run_command.side_effect = [(0, '', '')] + self.run_command.side_effect = [(0, "", "")] modprobe = Modprobe(module) modprobe.load_module() assert modprobe.result == { - 'changed': True, - 'name': 'test', - 'params': '', - 'state': 'present', + "changed": True, + "name": "test", + "params": "", + "state": "present", } def test_load_module_unchanged(self): - with set_module_args(dict( - name='test', - state='present', - )): + with set_module_args( + dict( + name="test", + state="present", + ) + ): module = build_module() module.warn = Mock() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] self.module_loaded.side_effect = [False] - self.run_command.side_effect = [(0, '', ''), (1, '', '')] + self.run_command.side_effect = [(0, "", ""), (1, "", "")] modprobe = Modprobe(module) modprobe.load_module() - module.warn.assert_called_once_with('') + module.warn.assert_called_once_with("") class TestUnloadModule(ModuleTestCase): @@ -76,10 +83,10 @@ def setUp(self): super().setUp() self.mock_module_loaded = patch( - 'ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded' + "ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded" ) - self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.module_loaded = self.mock_module_loaded.start() self.run_command = self.mock_run_command.start() @@ -93,52 +100,54 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_unload_module_success(self): - with set_module_args(dict( - name='test', - state='absent', - )): + with set_module_args( + dict( + name="test", + state="absent", + ) + ): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] self.module_loaded.side_effect = [False] - self.run_command.side_effect = [(0, '', '')] + self.run_command.side_effect = [(0, "", "")] modprobe = Modprobe(module) modprobe.unload_module() assert modprobe.result == { - 'changed': True, - 'name': 'test', - 'params': '', - 'state': 'absent', + "changed": True, + "name": "test", + "params": "", + "state": "absent", } def test_unload_module_failure(self): - with set_module_args(dict( - name='test', - state='absent', - )): + with set_module_args( + dict( + name="test", + state="absent", + ) + ): module = build_module() module.fail_json = Mock() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] self.module_loaded.side_effect = [True] - self.run_command.side_effect = [(1, '', '')] + self.run_command.side_effect = [(1, "", "")] modprobe = Modprobe(module) modprobe.unload_module() dummy_result = { - 'changed': False, - 'name': 'test', - 'state': 'absent', - 'params': '', + "changed": False, + "name": "test", + "state": "absent", + "params": "", } - module.fail_json.assert_called_once_with( - msg='', rc=1, stdout='', stderr='', **dummy_result - ) + module.fail_json.assert_called_once_with(msg="", rc=1, stdout="", stderr="", **dummy_result) class TestModuleIsLoadedPersistently(ModuleTestCase): @@ -148,7 +157,7 @@ def setUp(self): super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -159,58 +168,47 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_module_is_loaded(self): - - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data="dummy") + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files"): + modprobe.modules_files = ["/etc/modules-load.d/dummy.conf"] assert modprobe.module_is_loaded_persistently - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') + mocked_file.assert_called_once_with("/etc/modules-load.d/dummy.conf") def test_module_is_not_loaded_empty_file(self): - - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data="") + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files"): + modprobe.modules_files = ["/etc/modules-load.d/dummy.conf"] assert not modprobe.module_is_loaded_persistently - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') + mocked_file.assert_called_once_with("/etc/modules-load.d/dummy.conf") def test_module_is_not_loaded_no_files(self): - - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files"): modprobe.modules_files = [] assert not modprobe.module_is_loaded_persistently @@ -222,7 +220,7 @@ def setUp(self): self.skipTest("open_mock doesn't support readline in earlier python versions") super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -233,54 +231,47 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_module_permanent_params_exist(self): - files_content = [ - 'options dummy numdummies=4\noptions dummy dummy_parameter1=6', - 'options dummy dummy_parameter2=5 #Comment\noptions notdummy notdummy_param=5' + "options dummy numdummies=4\noptions dummy dummy_parameter1=6", + "options dummy dummy_parameter2=5 #Comment\noptions notdummy notdummy_param=5", ] mock_files_content = [mock_open(read_data=content).return_value for content in files_content] - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open() + ) as mocked_file: mocked_file.side_effect = mock_files_content - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files"): + modprobe.modprobe_files = ["/etc/modprobe.d/dummy1.conf", "/etc/modprobe.d/dummy2.conf"] - assert modprobe.permanent_params == set(['numdummies=4', 'dummy_parameter1=6', 'dummy_parameter2=5']) + assert modprobe.permanent_params == set( + ["numdummies=4", "dummy_parameter1=6", "dummy_parameter2=5"] + ) def test_module_permanent_params_empty(self): - - files_content = [ - '', - '' - ] + files_content = ["", ""] mock_files_content = [mock_open(read_data=content).return_value for content in files_content] - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file: + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data="") + ) as mocked_file: mocked_file.side_effect = mock_files_content - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf'] + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files"): + modprobe.modprobe_files = ["/etc/modprobe.d/dummy1.conf", "/etc/modprobe.d/dummy2.conf"] assert modprobe.permanent_params == set() @@ -289,7 +280,7 @@ class TestCreateModuleFIle(ModuleTestCase): def setUp(self): super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -300,29 +291,26 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_create_file(self): - - with set_module_args(dict( - name='dummy', - state='present', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open() + ) as mocked_file: modprobe.create_module_file() - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('dummy\n') + mocked_file.assert_called_once_with("/etc/modules-load.d/dummy.conf", "w") + mocked_file().write.assert_called_once_with("dummy\n") class TestCreateModuleOptionsFIle(ModuleTestCase): def setUp(self): super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -333,30 +321,26 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_create_file(self): - - with set_module_args(dict( - name='dummy', - state='present', - params='numdummies=4', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", params="numdummies=4", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file: + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open() + ) as mocked_file: modprobe.create_module_options_file() - mocked_file.assert_called_once_with('/etc/modprobe.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('options dummy numdummies=4\n') + mocked_file.assert_called_once_with("/etc/modprobe.d/dummy.conf", "w") + mocked_file().write.assert_called_once_with("options dummy numdummies=4\n") class TestDisableOldParams(ModuleTestCase): def setUp(self): super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -367,54 +351,48 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_disable_old_params_file_changed(self): - mock_data = 'options dummy numdummies=4' - - with set_module_args(dict( - name='dummy', - state='present', - params='numdummies=4', - persistent='present' - )): + mock_data = "options dummy numdummies=4" + + with set_module_args(dict(name="dummy", state="present", params="numdummies=4", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data=mock_data) + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files"): + modprobe.modprobe_files = ["/etc/modprobe.d/dummy1.conf"] modprobe.disable_old_params() - mocked_file.assert_called_with('/etc/modprobe.d/dummy1.conf', 'w') - mocked_file().write.assert_called_once_with('#options dummy numdummies=4') + mocked_file.assert_called_with("/etc/modprobe.d/dummy1.conf", "w") + mocked_file().write.assert_called_once_with("#options dummy numdummies=4") def test_disable_old_params_file_unchanged(self): - mock_data = 'options notdummy numdummies=4' - - with set_module_args(dict( - name='dummy', - state='present', - params='numdummies=4', - persistent='present' - )): + mock_data = "options notdummy numdummies=4" + + with set_module_args(dict(name="dummy", state="present", params="numdummies=4", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'): - modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data=mock_data) + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files"): + modprobe.modprobe_files = ["/etc/modprobe.d/dummy1.conf"] modprobe.disable_old_params() - mocked_file.assert_called_once_with('/etc/modprobe.d/dummy1.conf') + mocked_file.assert_called_once_with("/etc/modprobe.d/dummy1.conf") class TestDisableModulePermanent(ModuleTestCase): def setUp(self): super().setUp() - self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() @@ -425,42 +403,34 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_disable_module_permanent_file_changed(self): - - with set_module_args(dict( - name='dummy', - state='present', - params='numdummies=4', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", params="numdummies=4", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data="dummy") + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files"): + modprobe.modules_files = ["/etc/modules-load.d/dummy.conf"] modprobe.disable_module_permanent() - mocked_file.assert_called_with('/etc/modules-load.d/dummy.conf', 'w') - mocked_file().write.assert_called_once_with('#dummy') + mocked_file.assert_called_with("/etc/modules-load.d/dummy.conf", "w") + mocked_file().write.assert_called_once_with("#dummy") def test_disable_module_permanent_file_unchanged(self): - - with set_module_args(dict( - name='dummy', - state='present', - params='numdummies=4', - persistent='present' - )): + with set_module_args(dict(name="dummy", state="present", params="numdummies=4", persistent="present")): module = build_module() - self.get_bin_path.side_effect = ['modprobe'] + self.get_bin_path.side_effect = ["modprobe"] modprobe = Modprobe(module) - with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='notdummy')) as mocked_file: - with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'): - modprobe.modules_files = ['/etc/modules-load.d/dummy.conf'] + with patch( + "ansible_collections.community.general.plugins.modules.modprobe.open", mock_open(read_data="notdummy") + ) as mocked_file: + with patch("ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files"): + modprobe.modules_files = ["/etc/modules-load.d/dummy.conf"] modprobe.disable_module_permanent() - mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf') + mocked_file.assert_called_once_with("/etc/modules-load.d/dummy.conf") diff --git a/tests/unit/plugins/modules/test_monit.py b/tests/unit/plugins/modules/test_monit.py index 8a03c2a0987..8dc50050b3f 100644 --- a/tests/unit/plugins/modules/test_monit.py +++ b/tests/unit/plugins/modules/test_monit.py @@ -10,7 +10,10 @@ import pytest from ansible_collections.community.general.plugins.modules import monit -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) TEST_OUTPUT = """ @@ -26,23 +29,23 @@ def setUp(self): self.module = mock.MagicMock() self.module.exit_json.side_effect = AnsibleExitJson self.module.fail_json.side_effect = AnsibleFailJson - self.monit = monit.Monit(self.module, 'monit', 'processX', 1) + self.monit = monit.Monit(self.module, "monit", "processX", 1) self.monit._status_change_retry_count = 1 - mock_sleep = mock.patch('time.sleep') + mock_sleep = mock.patch("time.sleep") mock_sleep.start() self.addCleanup(mock_sleep.stop) def patch_status(self, side_effect): if not isinstance(side_effect, list): side_effect = [side_effect] - return mock.patch.object(self.monit, 'get_status', side_effect=side_effect) + return mock.patch.object(self.monit, "get_status", side_effect=side_effect) def test_change_state_success(self): with self.patch_status([monit.Status.OK, monit.Status.NOT_MONITORED]): with self.assertRaises(AnsibleExitJson): self.monit.stop() self.module.fail_json.assert_not_called() - self.module.run_command.assert_called_with(['monit', 'stop', 'processX'], check_rc=True) + self.module.run_command.assert_called_with(["monit", "stop", "processX"], check_rc=True) def test_change_state_fail(self): with self.patch_status([monit.Status.OK] * 3): @@ -50,12 +53,12 @@ def test_change_state_fail(self): self.monit.stop() def test_reload_fail(self): - self.module.run_command.return_value = (1, 'stdout', 'stderr') + self.module.run_command.return_value = (1, "stdout", "stderr") with self.assertRaises(AnsibleFailJson): self.monit.reload() def test_reload(self): - self.module.run_command.return_value = (0, '', '') + self.module.run_command.return_value = (0, "", "") with self.patch_status(monit.Status.OK): with self.assertRaises(AnsibleExitJson): self.monit.reload() @@ -66,7 +69,7 @@ def test_wait_for_status_to_stop_pending(self): monit.Status.DOES_NOT_EXIST, monit.Status.INITIALIZING, monit.Status.OK.pending(), - monit.Status.OK + monit.Status.OK, ] with self.patch_status(status) as get_status: self.monit.wait_for_monit_to_stop_pending() @@ -99,63 +102,74 @@ def test_timeout(self): self.monit.wait_for_monit_to_stop_pending() -@pytest.mark.parametrize('status_name', monit.StatusValue.ALL_STATUS) +@pytest.mark.parametrize("status_name", monit.StatusValue.ALL_STATUS) def test_status_value(status_name): value = getattr(monit.StatusValue, status_name.upper()) status = monit.StatusValue(value) - assert getattr(status, f'is_{status_name}') - assert not all(getattr(status, f'is_{name}') for name in monit.StatusValue.ALL_STATUS if name != status_name) + assert getattr(status, f"is_{status_name}") + assert not all(getattr(status, f"is_{name}") for name in monit.StatusValue.ALL_STATUS if name != status_name) BASIC_OUTPUT_CASES = [ - (TEST_OUTPUT % ('Process', 'processX', name), getattr(monit.Status, name.upper())) + (TEST_OUTPUT % ("Process", "processX", name), getattr(monit.Status, name.upper())) for name in monit.StatusValue.ALL_STATUS ] -@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [ - ('', monit.Status.MISSING), - (TEST_OUTPUT % ('Process', 'processY', 'OK'), monit.Status.MISSING), - (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - start pending'), monit.Status.OK), - (TEST_OUTPUT % ('Process', 'processX', 'Monitored - stop pending'), monit.Status.NOT_MONITORED), - (TEST_OUTPUT % ('Process', 'processX', 'Monitored - restart pending'), monit.Status.OK), - (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - monitor pending'), monit.Status.OK), - (TEST_OUTPUT % ('Process', 'processX', 'Does not exist'), monit.Status.DOES_NOT_EXIST), - (TEST_OUTPUT % ('Process', 'processX', 'Not monitored'), monit.Status.NOT_MONITORED), - (TEST_OUTPUT % ('Process', 'processX', 'Running'), monit.Status.OK), - (TEST_OUTPUT % ('Process', 'processX', 'Execution failed | Does not exist'), monit.Status.EXECUTION_FAILED), - (TEST_OUTPUT % ('Process', 'processX', 'Some Unknown Status'), monit.Status.EXECUTION_FAILED), -]) +@pytest.mark.parametrize( + "output, expected", + BASIC_OUTPUT_CASES + + [ + ("", monit.Status.MISSING), + (TEST_OUTPUT % ("Process", "processY", "OK"), monit.Status.MISSING), + (TEST_OUTPUT % ("Process", "processX", "Not Monitored - start pending"), monit.Status.OK), + (TEST_OUTPUT % ("Process", "processX", "Monitored - stop pending"), monit.Status.NOT_MONITORED), + (TEST_OUTPUT % ("Process", "processX", "Monitored - restart pending"), monit.Status.OK), + (TEST_OUTPUT % ("Process", "processX", "Not Monitored - monitor pending"), monit.Status.OK), + (TEST_OUTPUT % ("Process", "processX", "Does not exist"), monit.Status.DOES_NOT_EXIST), + (TEST_OUTPUT % ("Process", "processX", "Not monitored"), monit.Status.NOT_MONITORED), + (TEST_OUTPUT % ("Process", "processX", "Running"), monit.Status.OK), + (TEST_OUTPUT % ("Process", "processX", "Execution failed | Does not exist"), monit.Status.EXECUTION_FAILED), + (TEST_OUTPUT % ("Process", "processX", "Some Unknown Status"), monit.Status.EXECUTION_FAILED), + ], +) def test_parse_status(output, expected): module = mock.MagicMock() - status = monit.Monit(module, '', 'processX', 0)._parse_status(output, '') + status = monit.Monit(module, "", "processX", 0)._parse_status(output, "") assert status == expected -@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [ - (TEST_OUTPUT % ('Process', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('File', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Fifo', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Filesystem', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Directory', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Remote host', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('System', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Program', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Network', 'processX', 'OK'), monit.Status.OK), - (TEST_OUTPUT % ('Unsupported', 'processX', 'OK'), monit.Status.MISSING), -]) +@pytest.mark.parametrize( + "output, expected", + BASIC_OUTPUT_CASES + + [ + (TEST_OUTPUT % ("Process", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("File", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Fifo", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Filesystem", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Directory", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Remote host", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("System", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Program", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Network", "processX", "OK"), monit.Status.OK), + (TEST_OUTPUT % ("Unsupported", "processX", "OK"), monit.Status.MISSING), + ], +) def test_parse_status_supports_all_services(output, expected): - status = monit.Monit(None, '', 'processX', 0)._parse_status(output, '') + status = monit.Monit(None, "", "processX", 0)._parse_status(output, "") assert status == expected -@pytest.mark.parametrize('output, expected', [ - ('This is monit version 5.18.1', '5.18.1'), - ('This is monit version 12.18', '12.18'), - ('This is monit version 5.1.12', '5.1.12'), -]) +@pytest.mark.parametrize( + "output, expected", + [ + ("This is monit version 5.18.1", "5.18.1"), + ("This is monit version 12.18", "12.18"), + ("This is monit version 5.1.12", "5.1.12"), + ], +) def test_parse_version(output, expected): module = mock.MagicMock() - module.run_command.return_value = (0, output, '') - raw_version, version_tuple = monit.Monit(module, '', 'processX', 0)._get_monit_version() + module.run_command.return_value = (0, output, "") + raw_version, version_tuple = monit.Monit(module, "", "processX", 0)._get_monit_version() assert raw_version == expected diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py index ed840b397c8..6023763a3ed 100644 --- a/tests/unit/plugins/modules/test_nmcli.py +++ b/tests/unit/plugins/modules/test_nmcli.py @@ -12,128 +12,128 @@ from ansible_collections.community.general.plugins.modules import nmcli from ansible.module_utils.basic import AnsibleModule -pytestmark = pytest.mark.usefixtures('patch_ansible_module') +pytestmark = pytest.mark.usefixtures("patch_ansible_module") TESTCASE_CONNECTION = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "generic", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "team", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'bond', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "bond", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'bond-slave', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "bond-slave", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'bridge', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "bridge", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'vlan', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "vlan", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'vxlan', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "vxlan", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'gre', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "gre", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'ipip', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "ipip", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'sit', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "sit", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'dummy', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "dummy", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'gsm', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "gsm", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'wireguard', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "wireguard", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'vpn', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "vpn", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'infiniband', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "infiniband", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'macvlan', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "macvlan", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, { - 'type': 'loopback', - 'conn_name': 'non_existent_nw_device', - 'state': 'absent', - '_ansible_check_mode': True, + "type": "loopback", + "conn_name": "non_existent_nw_device", + "state": "absent", + "_ansible_check_mode": True, }, ] TESTCASE_GENERIC = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -156,27 +156,27 @@ TESTCASE_GENERIC_DIFF_CHECK = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.2', - 'route_metric4': -1, - 'state': 'present', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.2", + "route_metric4": -1, + "state": "present", + "_ansible_check_mode": False, }, ] TESTCASE_GENERIC_MODIFY_ROUTING_RULES = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'routing_rules4': ['priority 5 from 10.0.0.0/24 table 5000', 'priority 10 from 10.0.1.0/24 table 5001'], - 'state': 'present', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "routing_rules4": ["priority 5 from 10.0.0.0/24 table 5000", "priority 10 from 10.0.1.0/24 table 5001"], + "state": "present", + "_ansible_check_mode": False, }, ] @@ -199,25 +199,24 @@ TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'], - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": "2001:beef:cafe:10::1/64", + "routes6": ["fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2"], + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', - 'next_hop': '2001:beef:cafe:10::2'}], - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": "2001:beef:cafe:10::1/64", + "routes6_extended": [{"ip": "fd2e:446f:d85d:5::/64", "next_hop": "2001:beef:cafe:10::2"}], + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -240,20 +239,20 @@ TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes4': ['192.168.200.0/24 192.168.1.1'], - 'route_metric4': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes4": ["192.168.200.0/24 192.168.1.1"], + "route_metric4": 10, + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes4_extended': [{'ip': '192.168.200.0/24', 'next_hop': '192.168.1.1'}], - 'route_metric4': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes4_extended": [{"ip": "192.168.200.0/24", "next_hop": "192.168.1.1"}], + "route_metric4": 10, + "state": "present", + "_ansible_check_mode": False, }, ] @@ -269,39 +268,39 @@ TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_CLEAR = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes4': [], - 'state': 'present', - '_ansible_check_mode': False, - '_ansible_diff': True, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes4": [], + "state": "present", + "_ansible_check_mode": False, + "_ansible_diff": True, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes4_extended': [], - 'state': 'present', - '_ansible_check_mode': False, - '_ansible_diff': True, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes4_extended": [], + "state": "present", + "_ansible_check_mode": False, + "_ansible_diff": True, }, ] TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'], - 'route_metric6': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes6": ["fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2"], + "route_metric6": 10, + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}], - 'route_metric6': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "routes6_extended": [{"ip": "fd2e:446f:d85d:5::/64", "next_hop": "2001:beef:cafe:10::2"}], + "route_metric6": 10, + "state": "present", + "_ansible_check_mode": False, }, ] @@ -317,25 +316,27 @@ TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', 'fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5'], - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": "2001:beef:cafe:10::1/64", + "routes6": ["fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2", "fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5"], + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}, - {'ip': 'fd2e:8890:abcd:25::/64', 'next_hop': '2001:beef:cafe:10::5'}], - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": "2001:beef:cafe:10::1/64", + "routes6_extended": [ + {"ip": "fd2e:446f:d85d:5::/64", "next_hop": "2001:beef:cafe:10::2"}, + {"ip": "fd2e:8890:abcd:25::/64", "next_hop": "2001:beef:cafe:10::5"}, + ], + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -358,15 +359,15 @@ TESTCASE_ETHERNET_ADD_SRIOV_VFS = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'sriov': { - 'total-vfs': 16, - 'vfs': '0 spoof-check=true vlans=100', + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "sriov": { + "total-vfs": 16, + "vfs": "0 spoof-check=true vlans=100", }, - 'state': 'present', - '_ansible_check_mode': False, + "state": "present", + "_ansible_check_mode": False, } ] @@ -380,28 +381,28 @@ TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'method4': 'disabled', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'], - 'route_metric6': 5, - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "method4": "disabled", + "ip6": "2001:beef:cafe:10::1/64", + "routes6": ["fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2"], + "route_metric6": 5, + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'method4': 'disabled', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}], - 'route_metric6': 5, - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "method4": "disabled", + "ip6": "2001:beef:cafe:10::1/64", + "routes6_extended": [{"ip": "fd2e:446f:d85d:5::/64", "next_hop": "2001:beef:cafe:10::2"}], + "route_metric6": 5, + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -424,29 +425,31 @@ TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'method4': 'disabled', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', 'fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5'], - 'route_metric6': 5, - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "method4": "disabled", + "ip6": "2001:beef:cafe:10::1/64", + "routes6": ["fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2", "fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5"], + "route_metric6": 5, + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'method4': 'disabled', - 'ip6': '2001:beef:cafe:10::1/64', - 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}, - {'ip': 'fd2e:8890:abcd:25::/64', 'next_hop': '2001:beef:cafe:10::5'}], - 'route_metric6': 5, - 'method6': 'manual', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "method4": "disabled", + "ip6": "2001:beef:cafe:10::1/64", + "routes6_extended": [ + {"ip": "fd2e:446f:d85d:5::/64", "next_hop": "2001:beef:cafe:10::2"}, + {"ip": "fd2e:8890:abcd:25::/64", "next_hop": "2001:beef:cafe:10::5"}, + ], + "route_metric6": 5, + "method6": "manual", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -469,15 +472,15 @@ TESTCASE_GENERIC_DNS4_SEARCH = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - 'dns4_search': 'search.redhat.com', - 'dns6_search': 'search6.redhat.com', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "dns4_search": "search.redhat.com", + "dns6_search": "search6.redhat.com", + "_ansible_check_mode": False, } ] @@ -501,15 +504,15 @@ TESTCASE_GENERIC_DNS4_OPTIONS = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - 'dns4_options': [], - 'dns6_options': [], - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "dns4_options": [], + "dns6_options": [], + "_ansible_check_mode": False, } ] @@ -533,14 +536,14 @@ TESTCASE_GENERIC_ZONE = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - 'zone': 'external', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "zone": "external", + "_ansible_check_mode": False, } ] @@ -563,27 +566,27 @@ TESTCASE_GENERIC_ZONE_ONLY = [ { - 'type': 'generic', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'generic_non_existant', - 'state': 'present', - 'zone': 'public', - '_ansible_check_mode': False, + "type": "generic", + "conn_name": "non_existent_nw_device", + "ifname": "generic_non_existant", + "state": "present", + "zone": "public", + "_ansible_check_mode": False, } ] TESTCASE_BOND = [ { - 'type': 'bond', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'bond_non_existant', - 'mode': 'active-backup', - 'xmit_hash_policy': 'layer3+4', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - 'primary': 'non_existent_primary', - '_ansible_check_mode': False, + "type": "bond", + "conn_name": "non_existent_nw_device", + "ifname": "bond_non_existant", + "mode": "active-backup", + "xmit_hash_policy": "layer3+4", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "primary": "non_existent_primary", + "_ansible_check_mode": False, } ] @@ -606,16 +609,16 @@ TESTCASE_BRIDGE = [ { - 'type': 'bridge', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'br0_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'mac': '52:54:00:ab:cd:ef', - 'maxage': 100, - 'stp': True, - 'state': 'present', - '_ansible_check_mode': False, + "type": "bridge", + "conn_name": "non_existent_nw_device", + "ifname": "br0_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "mac": "52:54:00:ab:cd:ef", + "maxage": 100, + "stp": True, + "state": "present", + "_ansible_check_mode": False, } ] @@ -644,13 +647,13 @@ TESTCASE_BRIDGE_SLAVE = [ { - 'type': 'bridge-slave', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'br0_non_existant', - 'hairpin': True, - 'path_cost': 100, - 'state': 'present', - '_ansible_check_mode': False, + "type": "bridge-slave", + "conn_name": "non_existent_nw_device", + "ifname": "br0_non_existant", + "hairpin": True, + "path_cost": 100, + "state": "present", + "_ansible_check_mode": False, } ] @@ -667,11 +670,11 @@ TESTCASE_TEAM = [ { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'state': 'present', - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "state": "present", + "_ansible_check_mode": False, } ] @@ -693,62 +696,62 @@ TESTCASE_TEAM_HWADDR_POLICY_FAILS = [ { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'runner_hwaddr_policy': 'by_active', - 'state': 'present', - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "runner_hwaddr_policy": "by_active", + "state": "present", + "_ansible_check_mode": False, } ] TESTCASE_TEAM_RUNNER_FAST_RATE = [ { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'runner': 'lacp', - 'runner_fast_rate': True, - 'state': 'present', - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "runner": "lacp", + "runner_fast_rate": True, + "state": "present", + "_ansible_check_mode": False, } ] TESTCASE_TEAM_RUNNER_FAST_RATE_FAILS = [ { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'runner_fast_rate': True, - 'state': 'present', - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "runner_fast_rate": True, + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'state': 'present', - 'runner_fast_rate': False, - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "state": "present", + "runner_fast_rate": False, + "_ansible_check_mode": False, }, { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'state': 'present', - 'runner': 'activebackup', - 'runner_fast_rate': False, - '_ansible_check_mode': False, + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "state": "present", + "runner": "activebackup", + "runner_fast_rate": False, + "_ansible_check_mode": False, }, { - 'type': 'team', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'team0_non_existant', - 'state': 'present', - 'runner': 'activebackup', - 'runner_fast_rate': True, - '_ansible_check_mode': False, - } + "type": "team", + "conn_name": "non_existent_nw_device", + "ifname": "team0_non_existant", + "state": "present", + "runner": "activebackup", + "runner_fast_rate": True, + "_ansible_check_mode": False, + }, ] TESTCASE_TEAM_RUNNER_FAST_RATE_SHOW_OUTPUT = """\ @@ -769,12 +772,12 @@ TESTCASE_TEAM_SLAVE = [ { - 'type': 'team-slave', - 'conn_name': 'non_existent_nw_slaved_device', - 'ifname': 'generic_slaved_non_existant', - 'master': 'team0_non_existant', - 'state': 'present', - '_ansible_check_mode': False, + "type": "team-slave", + "conn_name": "non_existent_nw_slaved_device", + "ifname": "generic_slaved_non_existant", + "master": "team0_non_existant", + "state": "present", + "_ansible_check_mode": False, } ] @@ -789,14 +792,14 @@ TESTCASE_VLAN = [ { - 'type': 'vlan', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'vlan_not_exists', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'vlanid': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "vlan", + "conn_name": "non_existent_nw_device", + "ifname": "vlan_not_exists", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "vlanid": 10, + "state": "present", + "_ansible_check_mode": False, } ] @@ -820,14 +823,14 @@ TESTCASE_VXLAN = [ { - 'type': 'vxlan', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'vxlan-existent_nw_device', - 'vxlan_id': 11, - 'vxlan_local': '192.168.225.5', - 'vxlan_remote': '192.168.225.6', - 'state': 'present', - '_ansible_check_mode': False, + "type": "vxlan", + "conn_name": "non_existent_nw_device", + "ifname": "vxlan-existent_nw_device", + "vxlan_id": 11, + "vxlan_local": "192.168.225.5", + "vxlan_remote": "192.168.225.6", + "state": "present", + "_ansible_check_mode": False, } ] @@ -842,16 +845,16 @@ TESTCASE_GRE = [ { - 'type': 'gre', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'gre-existent_nw_device', - 'ip_tunnel_dev': 'non_existent_gre_device', - 'ip_tunnel_local': '192.168.225.5', - 'ip_tunnel_remote': '192.168.225.6', - 'ip_tunnel_input_key': '1', - 'ip_tunnel_output_key': '2', - 'state': 'present', - '_ansible_check_mode': False, + "type": "gre", + "conn_name": "non_existent_nw_device", + "ifname": "gre-existent_nw_device", + "ip_tunnel_dev": "non_existent_gre_device", + "ip_tunnel_local": "192.168.225.5", + "ip_tunnel_remote": "192.168.225.6", + "ip_tunnel_input_key": "1", + "ip_tunnel_output_key": "2", + "state": "present", + "_ansible_check_mode": False, } ] @@ -875,14 +878,14 @@ TESTCASE_IPIP = [ { - 'type': 'ipip', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ipip-existent_nw_device', - 'ip_tunnel_dev': 'non_existent_ipip_device', - 'ip_tunnel_local': '192.168.225.5', - 'ip_tunnel_remote': '192.168.225.6', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ipip", + "conn_name": "non_existent_nw_device", + "ifname": "ipip-existent_nw_device", + "ip_tunnel_dev": "non_existent_ipip_device", + "ip_tunnel_local": "192.168.225.5", + "ip_tunnel_remote": "192.168.225.6", + "state": "present", + "_ansible_check_mode": False, } ] @@ -904,14 +907,14 @@ TESTCASE_SIT = [ { - 'type': 'sit', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'sit-existent_nw_device', - 'ip_tunnel_dev': 'non_existent_sit_device', - 'ip_tunnel_local': '192.168.225.5', - 'ip_tunnel_remote': '192.168.225.6', - 'state': 'present', - '_ansible_check_mode': False, + "type": "sit", + "conn_name": "non_existent_nw_device", + "ifname": "sit-existent_nw_device", + "ip_tunnel_dev": "non_existent_sit_device", + "ip_tunnel_local": "192.168.225.5", + "ip_tunnel_remote": "192.168.225.6", + "state": "present", + "_ansible_check_mode": False, } ] @@ -933,12 +936,12 @@ TESTCASE_ETHERNET_DHCP = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'dhcp_client_id': '00:11:22:AA:BB:CC:DD', - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "dhcp_client_id": "00:11:22:AA:BB:CC:DD", + "state": "present", + "_ansible_check_mode": False, } ] @@ -960,36 +963,36 @@ TESTCASE_ETHERNET_STATIC = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'dns4': ['1.1.1.1', '8.8.8.8'], - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "dns4": ["1.1.1.1", "8.8.8.8"], + "state": "present", + "_ansible_check_mode": False, } ] TESTCASE_LOOPBACK = [ { - 'type': 'loopback', - 'conn_name': 'lo', - 'ifname': 'lo', - 'ip4': '127.0.0.1/8', - 'state': 'present', - '_ansible_check_mode': False, + "type": "loopback", + "conn_name": "lo", + "ifname": "lo", + "ip4": "127.0.0.1/8", + "state": "present", + "_ansible_check_mode": False, } ] TESTCASE_LOOPBACK_MODIFY = [ { - 'type': 'loopback', - 'conn_name': 'lo', - 'ifname': 'lo', - 'ip4': ['127.0.0.1/8', '127.0.0.2/8'], - 'state': 'present', - '_ansible_check_mode': False, + "type": "loopback", + "conn_name": "lo", + "ifname": "lo", + "ip4": ["127.0.0.1/8", "127.0.0.2/8"], + "state": "present", + "_ansible_check_mode": False, } ] @@ -1028,63 +1031,63 @@ TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip4': ['10.10.10.10/32', '10.10.20.10/32'], - 'gw4': '10.10.10.1', - 'dns4': ['1.1.1.1', '8.8.8.8'], - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip4": ["10.10.10.10/32", "10.10.20.10/32"], + "gw4": "10.10.10.1", + "dns4": ["1.1.1.1", "8.8.8.8"], + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip4': ['10.10.10.10', '10.10.20.10'], - 'gw4': '10.10.10.1', - 'dns4': ['1.1.1.1', '8.8.8.8'], - 'state': 'present', - '_ansible_check_mode': False, - } + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip4": ["10.10.10.10", "10.10.20.10"], + "gw4": "10.10.10.1", + "dns4": ["1.1.1.1", "8.8.8.8"], + "state": "present", + "_ansible_check_mode": False, + }, ] TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': '2001:db8::cafe/128', - 'gw6': '2001:db8::cafa', - 'dns6': ['2001:4860:4860::8888'], - 'state': 'present', - 'ip_privacy6': 'prefer-public-addr', - 'addr_gen_mode6': 'eui64', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": "2001:db8::cafe/128", + "gw6": "2001:db8::cafa", + "dns6": ["2001:4860:4860::8888"], + "state": "present", + "ip_privacy6": "prefer-public-addr", + "addr_gen_mode6": "eui64", + "_ansible_check_mode": False, } ] TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES = [ { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': ['2001:db8::cafe/128', '2002:db8::cafe/128'], - 'gw6': '2001:db8::cafa', - 'dns6': ['2001:4860:4860::8888', '2001:4860:4860::8844'], - 'state': 'present', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": ["2001:db8::cafe/128", "2002:db8::cafe/128"], + "gw6": "2001:db8::cafa", + "dns6": ["2001:4860:4860::8888", "2001:4860:4860::8844"], + "state": "present", + "_ansible_check_mode": False, }, { - 'type': 'ethernet', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'ethernet_non_existant', - 'ip6': ['2001:db8::cafe', '2002:db8::cafe'], - 'gw6': '2001:db8::cafa', - 'dns6': ['2001:4860:4860::8888', '2001:4860:4860::8844'], - 'state': 'present', - '_ansible_check_mode': False, - } + "type": "ethernet", + "conn_name": "non_existent_nw_device", + "ifname": "ethernet_non_existant", + "ip6": ["2001:db8::cafe", "2002:db8::cafe"], + "gw6": "2001:db8::cafa", + "dns6": ["2001:4860:4860::8888", "2001:4860:4860::8844"], + "state": "present", + "_ansible_check_mode": False, + }, ] TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES_SHOW_OUTPUT = """\ @@ -1148,33 +1151,33 @@ TESTCASE_WIRELESS = [ { - 'type': 'wifi', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'wireless_non_existant', - 'ip4': '10.10.10.10/24', - 'ssid': 'Brittany', - 'wifi': { - 'hidden': True, - 'mode': 'ap', + "type": "wifi", + "conn_name": "non_existent_nw_device", + "ifname": "wireless_non_existant", + "ip4": "10.10.10.10/24", + "ssid": "Brittany", + "wifi": { + "hidden": True, + "mode": "ap", }, - 'state': 'present', - '_ansible_check_mode': False, + "state": "present", + "_ansible_check_mode": False, } ] TESTCASE_SECURE_WIRELESS = [ { - 'type': 'wifi', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'wireless_non_existant', - 'ip4': '10.10.10.10/24', - 'ssid': 'Brittany', - 'wifi_sec': { - 'key-mgmt': 'wpa-psk', - 'psk': 'VERY_SECURE_PASSWORD', + "type": "wifi", + "conn_name": "non_existent_nw_device", + "ifname": "wireless_non_existant", + "ip4": "10.10.10.10/24", + "ssid": "Brittany", + "wifi_sec": { + "key-mgmt": "wpa-psk", + "psk": "VERY_SECURE_PASSWORD", }, - 'state': 'present', - '_ansible_check_mode': False, + "state": "present", + "_ansible_check_mode": False, } ] @@ -1199,8 +1202,9 @@ 802-11-wireless.ap-isolation: -1 (default) """ -TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \ - TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT + """\ +TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = ( + TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT + + """\ 802-11-wireless-security.key-mgmt: -- 802-11-wireless-security.wep-tx-keyidx: 0 802-11-wireless-security.auth-alg: -- @@ -1222,19 +1226,20 @@ 802-11-wireless-security.wps-method: 0x0 (default) 802-11-wireless-security.fils: 0 (default) """ +) TESTCASE_DUMMY_STATIC = [ { - 'type': 'dummy', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'dummy_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'dns4': ['1.1.1.1', '8.8.8.8'], - 'ip6': '2001:db8::1/128', - 'state': 'present', - '_ansible_check_mode': False, + "type": "dummy", + "conn_name": "non_existent_nw_device", + "ifname": "dummy_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "dns4": ["1.1.1.1", "8.8.8.8"], + "ip6": "2001:db8::1/128", + "state": "present", + "_ansible_check_mode": False, } ] @@ -1319,18 +1324,18 @@ TESTCASE_GSM = [ { - 'type': 'gsm', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'gsm_non_existant', - 'gsm': { - 'apn': 'internet.telekom', - 'username': 't-mobile', - 'password': 'tm', - 'pin': '1234', + "type": "gsm", + "conn_name": "non_existent_nw_device", + "ifname": "gsm_non_existant", + "gsm": { + "apn": "internet.telekom", + "username": "t-mobile", + "password": "tm", + "pin": "1234", }, - 'method4': 'auto', - 'state': 'present', - '_ansible_check_mode': False, + "method4": "auto", + "state": "present", + "_ansible_check_mode": False, } ] @@ -1365,19 +1370,19 @@ TESTCASE_WIREGUARD = [ { - 'type': 'wireguard', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'wg_non_existant', - 'wireguard': { - 'listen-port': '51820', - 'private-key': '', + "type": "wireguard", + "conn_name": "non_existent_nw_device", + "ifname": "wg_non_existant", + "wireguard": { + "listen-port": "51820", + "private-key": "", }, - 'method4': 'manual', - 'ip4': '10.10.10.10/24', - 'method6': 'manual', - 'ip6': '2001:db8::1/128', - 'state': 'present', - '_ansible_check_mode': False, + "method4": "manual", + "ip4": "10.10.10.10/24", + "method6": "manual", + "ip6": "2001:db8::1/128", + "state": "present", + "_ansible_check_mode": False, } ] @@ -1408,22 +1413,22 @@ TESTCASE_VPN_L2TP = [ { - 'type': 'vpn', - 'conn_name': 'vpn_l2tp', - 'vpn': { - 'permissions': 'brittany', - 'service-type': 'org.freedesktop.NetworkManager.l2tp', - 'gateway': 'vpn.example.com', - 'password-flags': '2', - 'user': 'brittany', - 'ipsec-enabled': 'true', - 'ipsec-psk': 'QnJpdHRhbnkxMjM=', + "type": "vpn", + "conn_name": "vpn_l2tp", + "vpn": { + "permissions": "brittany", + "service-type": "org.freedesktop.NetworkManager.l2tp", + "gateway": "vpn.example.com", + "password-flags": "2", + "user": "brittany", + "ipsec-enabled": "true", + "ipsec-psk": "QnJpdHRhbnkxMjM=", }, - 'gw4_ignore_auto': True, - 'routes4': ['192.168.200.0/24'], - 'autoconnect': 'false', - 'state': 'present', - '_ansible_check_mode': False, + "gw4_ignore_auto": True, + "routes4": ["192.168.200.0/24"], + "autoconnect": "false", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -1450,18 +1455,18 @@ TESTCASE_VPN_PPTP = [ { - 'type': 'vpn', - 'conn_name': 'vpn_pptp', - 'vpn': { - 'permissions': 'brittany', - 'service-type': 'org.freedesktop.NetworkManager.pptp', - 'gateway': 'vpn.example.com', - 'password-flags': '2', - 'user': 'brittany', + "type": "vpn", + "conn_name": "vpn_pptp", + "vpn": { + "permissions": "brittany", + "service-type": "org.freedesktop.NetworkManager.pptp", + "gateway": "vpn.example.com", + "password-flags": "2", + "user": "brittany", }, - 'autoconnect': 'false', - 'state': 'present', - '_ansible_check_mode': False, + "autoconnect": "false", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -1484,13 +1489,13 @@ TESTCASE_INFINIBAND_STATIC = [ { - 'type': 'infiniband', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'infiniband_non_existant', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'state': 'present', - '_ansible_check_mode': False, + "type": "infiniband", + "conn_name": "non_existent_nw_device", + "ifname": "infiniband_non_existant", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "state": "present", + "_ansible_check_mode": False, } ] @@ -1515,12 +1520,11 @@ TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE = [ { - - 'type': 'infiniband', - 'conn_name': 'non_existent_nw_device', - 'transport_mode': 'connected', - 'state': 'present', - '_ansible_check_mode': False, + "type": "infiniband", + "conn_name": "non_existent_nw_device", + "transport_mode": "connected", + "state": "present", + "_ansible_check_mode": False, }, ] @@ -1532,19 +1536,19 @@ TESTCASE_MACVLAN = [ { - 'type': 'macvlan', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'macvlan_non_existant', - 'macvlan': { - 'mode': '2', - 'parent': 'non_existent_parent', + "type": "macvlan", + "conn_name": "non_existent_nw_device", + "ifname": "macvlan_non_existant", + "macvlan": { + "mode": "2", + "parent": "non_existent_parent", }, - 'method4': 'manual', - 'ip4': '10.10.10.10/24', - 'method6': 'manual', - 'ip6': '2001:db8::1/128', - 'state': 'present', - '_ansible_check_mode': False, + "method4": "manual", + "ip4": "10.10.10.10/24", + "method6": "manual", + "ip6": "2001:db8::1/128", + "state": "present", + "_ansible_check_mode": False, } ] @@ -1571,14 +1575,14 @@ TESTCASE_VRF = [ { - 'type': 'vrf', - 'conn_name': 'non_existent_nw_device', - 'ifname': 'vrf_not_exists', - 'ip4': '10.10.10.10/24', - 'gw4': '10.10.10.1', - 'table': 10, - 'state': 'present', - '_ansible_check_mode': False, + "type": "vrf", + "conn_name": "non_existent_nw_device", + "ifname": "vrf_not_exists", + "ip4": "10.10.10.10/24", + "gw4": "10.10.10.1", + "table": 10, + "state": "present", + "_ansible_check_mode": False, } ] @@ -1601,25 +1605,23 @@ """ -def mocker_set(mocker, - connection_exists=False, - execute_return=(0, "", ""), - execute_side_effect=None, - changed_return=None): +def mocker_set( + mocker, connection_exists=False, execute_return=(0, "", ""), execute_side_effect=None, changed_return=None +): """ Common mocker object """ - get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') - get_bin_path.return_value = '/usr/bin/nmcli' - connection = mocker.patch.object(nmcli.Nmcli, 'connection_exists') + get_bin_path = mocker.patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") + get_bin_path.return_value = "/usr/bin/nmcli" + connection = mocker.patch.object(nmcli.Nmcli, "connection_exists") connection.return_value = connection_exists - execute_command = mocker.patch.object(nmcli.Nmcli, 'execute_command') + execute_command = mocker.patch.object(nmcli.Nmcli, "execute_command") if execute_return: execute_command.return_value = execute_return if execute_side_effect: execute_command.side_effect = execute_side_effect if changed_return: - is_connection_changed = mocker.patch.object(nmcli.Nmcli, 'is_connection_changed') + is_connection_changed = mocker.patch.object(nmcli.Nmcli, "is_connection_changed") is_connection_changed.return_value = changed_return @@ -1635,9 +1637,7 @@ def mocked_connection_exists(mocker): @pytest.fixture def mocked_generic_connection_modify(mocker): - mocker_set(mocker, - connection_exists=True, - changed_return=(True, dict())) + mocker_set(mocker, connection_exists=True, changed_return=(True, dict())) # TODO: overridden below! @@ -1650,430 +1650,415 @@ def mocked_generic_connection_modify(mocker): @pytest.fixture def mocked_generic_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT, "")) + mocker_set( + mocker, connection_exists=True, execute_return=(0, TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT, "") + ) @pytest.fixture def mocked_generic_connection_dns_search_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT, "")) @pytest.fixture def mocked_generic_connection_dns_options_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GENERIC_DNS4_OPTIONS_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GENERIC_DNS4_OPTIONS_SHOW_OUTPUT, "")) @pytest.fixture def mocked_generic_connection_zone_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GENERIC_ZONE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GENERIC_ZONE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_bond_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_BOND_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_BOND_SHOW_OUTPUT, "")) @pytest.fixture def mocked_bridge_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_BRIDGE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_BRIDGE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_bridge_slave_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_team_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_TEAM_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_TEAM_SHOW_OUTPUT, "")) @pytest.fixture def mocked_team_runner_fast_rate_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_TEAM_RUNNER_FAST_RATE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_TEAM_RUNNER_FAST_RATE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_team_slave_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_TEAM_SLAVE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_TEAM_SLAVE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_vlan_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_VLAN_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_VLAN_SHOW_OUTPUT, "")) @pytest.fixture def mocked_vxlan_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, "")) @pytest.fixture def mocked_gre_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GRE_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GRE_SHOW_OUTPUT, "")) @pytest.fixture def mocked_ipip_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_IPIP_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_IPIP_SHOW_OUTPUT, "")) @pytest.fixture def mocked_sit_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_SIT_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_SIT_SHOW_OUTPUT, "")) @pytest.fixture def mocked_ethernet_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_DHCP, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_ETHERNET_DHCP, "")) @pytest.fixture def mocked_ethernet_connection_dhcp_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, "")) @pytest.fixture def mocked_ethernet_connection_static_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, "")) @pytest.fixture def mocked_ethernet_connection_static_multiple_ip4_addresses_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES_SHOW_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES_SHOW_OUTPUT, ""), + ) @pytest.fixture def mocked_ethernet_connection_static_ip6_privacy_and_addr_gen_mode_unchange(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE_UNCHANGED_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE_UNCHANGED_OUTPUT, ""), + ) @pytest.fixture def mocked_ethernet_connection_static_multiple_ip6_addresses_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES_SHOW_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES_SHOW_OUTPUT, ""), + ) @pytest.fixture def mocked_ethernet_connection_static_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_static_route_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_clear(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_sriov_vfs_create(mocker): - mocker_set(mocker, - execute_return=(0, TESTCASE_ETHERNET_ADD_SRIOV_VFS_SHOW_OUTPUT, "")) + mocker_set(mocker, execute_return=(0, TESTCASE_ETHERNET_ADD_SRIOV_VFS_SHOW_OUTPUT, "")) @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_with_ipv6_address_static_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_STATIC_IP6_ADDRESS_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_STATIC_IP6_ADDRESS_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_ethernet_connection_dhcp_to_static(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_wireless_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_secure_wireless_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), - (0, "", ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_secure_wireless_create_failure(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), - (1, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (1, "", ""), + ), + ) @pytest.fixture def mocked_secure_wireless_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), - (0, "", ""), - (0, "", ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_secure_wireless_modify_failure(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), - (0, "", ""), - (1, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (1, "", ""), + ), + ) @pytest.fixture def mocked_dummy_connection_static_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) @pytest.fixture def mocked_dummy_connection_static_without_mtu_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_DUMMY_STATIC_WITHOUT_MTU_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_DUMMY_STATIC_WITHOUT_MTU_SHOW_OUTPUT, "")) @pytest.fixture def mocked_dummy_connection_static_with_custom_mtu_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_DUMMY_STATIC_WITH_CUSTOM_MTU_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DUMMY_STATIC_WITH_CUSTOM_MTU_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_gsm_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GSM_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GSM_SHOW_OUTPUT, "")) @pytest.fixture def mocked_wireguard_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_WIREGUARD_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_WIREGUARD_SHOW_OUTPUT, "")) @pytest.fixture def mocked_vpn_l2tp_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_VPN_L2TP_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_VPN_L2TP_SHOW_OUTPUT, "")) @pytest.fixture def mocked_vpn_pptp_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_VPN_PPTP_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_VPN_PPTP_SHOW_OUTPUT, "")) @pytest.fixture def mocked_infiniband_connection_static_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_INFINIBAND_STATIC_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_INFINIBAND_STATIC_SHOW_OUTPUT, "")) @pytest.fixture def mocked_infiniband_connection_static_transport_mode_connected_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_macvlan_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_MACVLAN_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_MACVLAN_SHOW_OUTPUT, "")) @pytest.fixture def mocked_generic_connection_diff_check(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, "")) @pytest.fixture def mocked_loopback_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_LOOPBACK_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_LOOPBACK_SHOW_OUTPUT, "")) @pytest.fixture def mocked_loopback_connection_modify(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_LOOPBACK_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_LOOPBACK_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) @pytest.fixture def mocked_vrf_connection_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_VRF_SHOW_OUTPUT, "")) + mocker_set(mocker, connection_exists=True, execute_return=(0, TESTCASE_VRF_SHOW_OUTPUT, "")) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BOND, indirect=["patch_ansible_module"]) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ Test : Bond connection created @@ -2085,28 +2070,36 @@ def test_bond_connection_create(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'bond' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' - - for param in ['ipv4.gateway', 'primary', 'connection.autoconnect', - 'connection.interface-name', 'bond_non_existant', - 'mode', 'active-backup', 'ipv4.addresses', - '+bond.options', 'xmit_hash_policy=layer3+4']: + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "bond" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" + + for param in [ + "ipv4.gateway", + "primary", + "connection.autoconnect", + "connection.interface-name", + "bond_non_existant", + "mode", + "active-backup", + "ipv4.addresses", + "+bond.options", + "xmit_hash_policy=layer3+4", + ]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] @pytest.mark.skip(reason="Currently broken") # TODO: fix me! -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BOND, indirect=["patch_ansible_module"]) def test_bond_connection_unchanged(mocked_bond_connection_unchanged, capfd): """ Test : Bond connection unchanged @@ -2116,11 +2109,11 @@ def test_bond_connection_unchanged(mocked_bond_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC, indirect=["patch_ansible_module"]) def test_generic_connection_create(mocked_generic_connection_create, capfd): """ Test : Generic connection created @@ -2132,24 +2125,24 @@ def test_generic_connection_create(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'generic' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "generic" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" - for param in ['connection.autoconnect', 'ipv4.gateway', 'ipv4.addresses']: + for param in ["connection.autoconnect", "ipv4.gateway", "ipv4.addresses"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC, indirect=["patch_ansible_module"]) def test_generic_connection_modify(mocked_generic_connection_modify, capfd): """ Test : Generic connection modify @@ -2161,21 +2154,21 @@ def test_generic_connection_modify(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" - for param in ['ipv4.gateway', 'ipv4.addresses']: + for param in ["ipv4.gateway", "ipv4.addresses"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC, indirect=["patch_ansible_module"]) def test_generic_connection_unchanged(mocked_generic_connection_unchanged, capfd): """ Test : Generic connection unchanged @@ -2185,11 +2178,13 @@ def test_generic_connection_unchanged(mocked_generic_connection_unchanged, capfd out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_MODIFY_ROUTING_RULES, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_GENERIC_MODIFY_ROUTING_RULES, indirect=["patch_ansible_module"] +) def test_generic_connection_modify_routing_rules4(mocked_generic_connection_create, capfd): """ Test : Generic connection modified with routing-rules4 @@ -2201,15 +2196,15 @@ def test_generic_connection_modify_routing_rules4(mocked_generic_connection_crea arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.routing-rules' in args[0] + assert "ipv4.routing-rules" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_SEARCH, indirect=["patch_ansible_module"]) def test_generic_connection_create_dns_search(mocked_generic_connection_create, capfd): """ Test : Generic connection created with dns search @@ -2221,16 +2216,16 @@ def test_generic_connection_create_dns_search(mocked_generic_connection_create, arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.dns-search' in args[0] - assert 'ipv6.dns-search' in args[0] + assert "ipv4.dns-search" in args[0] + assert "ipv6.dns-search" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_SEARCH, indirect=["patch_ansible_module"]) def test_generic_connection_modify_dns_search(mocked_generic_connection_create, capfd): """ Test : Generic connection modified with dns search @@ -2242,16 +2237,16 @@ def test_generic_connection_modify_dns_search(mocked_generic_connection_create, arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.dns-search' in args[0] - assert 'ipv6.dns-search' in args[0] + assert "ipv4.dns-search" in args[0] + assert "ipv6.dns-search" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_SEARCH, indirect=["patch_ansible_module"]) def test_generic_connection_dns_search_unchanged(mocked_generic_connection_dns_search_unchanged, capfd): """ Test : Generic connection with dns search unchanged @@ -2261,11 +2256,11 @@ def test_generic_connection_dns_search_unchanged(mocked_generic_connection_dns_s out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_OPTIONS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_OPTIONS, indirect=["patch_ansible_module"]) def test_generic_connection_create_dns_options(mocked_generic_connection_create, capfd): """ Test : Generic connection created with dns options @@ -2277,16 +2272,16 @@ def test_generic_connection_create_dns_options(mocked_generic_connection_create, arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.dns-options' in args[0] - assert 'ipv6.dns-options' in args[0] + assert "ipv4.dns-options" in args[0] + assert "ipv6.dns-options" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_OPTIONS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_OPTIONS, indirect=["patch_ansible_module"]) def test_generic_connection_modify_dns_options(mocked_generic_connection_create, capfd): """ Test : Generic connection modified with dns options @@ -2298,16 +2293,16 @@ def test_generic_connection_modify_dns_options(mocked_generic_connection_create, arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.dns-options' in args[0] - assert 'ipv6.dns-options' in args[0] + assert "ipv4.dns-options" in args[0] + assert "ipv6.dns-options" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_OPTIONS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DNS4_OPTIONS, indirect=["patch_ansible_module"]) def test_generic_connection_dns_options_unchanged(mocked_generic_connection_dns_options_unchanged, capfd): """ Test : Generic connection with dns options unchanged @@ -2317,11 +2312,11 @@ def test_generic_connection_dns_options_unchanged(mocked_generic_connection_dns_ out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_CONNECTION, indirect=["patch_ansible_module"]) def test_dns4_none(mocked_connection_exists, capfd): """ Test if DNS4 param is None @@ -2331,11 +2326,11 @@ def test_dns4_none(mocked_connection_exists, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_ZONE, indirect=["patch_ansible_module"]) def test_generic_connection_create_zone(mocked_generic_connection_create, capfd): """ Test : Generic connection created with zone @@ -2347,15 +2342,15 @@ def test_generic_connection_create_zone(mocked_generic_connection_create, capfd) arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'connection.zone' in args[0] + assert "connection.zone" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_ZONE, indirect=["patch_ansible_module"]) def test_generic_connection_modify_zone(mocked_generic_connection_create, capfd): """ Test : Generic connection modified with zone @@ -2367,15 +2362,15 @@ def test_generic_connection_modify_zone(mocked_generic_connection_create, capfd) arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'connection.zone' in args[0] + assert "connection.zone" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_ZONE, indirect=["patch_ansible_module"]) def test_generic_connection_zone_unchanged(mocked_generic_connection_zone_unchanged, capfd): """ Test : Generic connection with zone unchanged @@ -2385,11 +2380,11 @@ def test_generic_connection_zone_unchanged(mocked_generic_connection_zone_unchan out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE_ONLY, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_ZONE_ONLY, indirect=["patch_ansible_module"]) def test_generic_connection_modify_zone_only(mocked_generic_connection_modify, capfd): """ Test : Generic connection modified with zone only @@ -2401,19 +2396,19 @@ def test_generic_connection_modify_zone_only(mocked_generic_connection_modify, c arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'connection.zone' in args[0] - assert 'ipv4.addresses' not in args[0] - assert 'ipv4.gateway' not in args[0] - assert 'ipv6.addresses' not in args[0] - assert 'ipv6.gateway' not in args[0] + assert "connection.zone" in args[0] + assert "ipv4.addresses" not in args[0] + assert "ipv4.gateway" not in args[0] + assert "ipv6.addresses" not in args[0] + assert "ipv6.gateway" not in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_CONNECTION, indirect=["patch_ansible_module"]) def test_zone_none(mocked_connection_exists, capfd): """ Test if zone param is None @@ -2423,11 +2418,11 @@ def test_zone_none(mocked_connection_exists, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE, indirect=["patch_ansible_module"]) def test_create_bridge(mocked_generic_connection_create, capfd): """ Test if Bridge created @@ -2439,25 +2434,34 @@ def test_create_bridge(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'bridge' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "bridge" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']: + for param in [ + "ipv4.addresses", + "10.10.10.10/24", + "ipv4.gateway", + "10.10.10.1", + "bridge.max-age", + "100", + "bridge.stp", + "yes", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE, indirect=["patch_ansible_module"]) def test_mod_bridge(mocked_generic_connection_modify, capfd): """ Test if Bridge modified @@ -2470,22 +2474,31 @@ def test_mod_bridge(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']: + for param in [ + "ipv4.addresses", + "10.10.10.10/24", + "ipv4.gateway", + "10.10.10.1", + "bridge.max-age", + "100", + "bridge.stp", + "yes", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE, indirect=["patch_ansible_module"]) def test_bridge_connection_unchanged(mocked_bridge_connection_unchanged, capfd): """ Test : Bridge connection unchanged @@ -2495,11 +2508,11 @@ def test_bridge_connection_unchanged(mocked_bridge_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE_SLAVE, indirect=["patch_ansible_module"]) def test_create_bridge_slave(mocked_generic_connection_create, capfd): """ Test if Bridge_slave created @@ -2512,25 +2525,25 @@ def test_create_bridge_slave(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'bridge-slave' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "bridge-slave" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['bridge-port.path-cost', '100']: + for param in ["bridge-port.path-cost", "100"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE_SLAVE, indirect=["patch_ansible_module"]) def test_mod_bridge_slave(mocked_generic_connection_modify, capfd): """ Test if Bridge_slave modified @@ -2543,22 +2556,22 @@ def test_mod_bridge_slave(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['bridge-port.path-cost', '100']: + for param in ["bridge-port.path-cost", "100"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_BRIDGE_SLAVE, indirect=["patch_ansible_module"]) def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd): """ Test : Bridge-slave connection unchanged @@ -2568,11 +2581,11 @@ def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM, indirect=["patch_ansible_module"]) def test_team_connection_create(mocked_generic_connection_create, capfd): """ Test : Team connection created @@ -2584,24 +2597,24 @@ def test_team_connection_create(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'team' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "team" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" - for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant']: + for param in ["connection.autoconnect", "connection.interface-name", "team0_non_existant"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM, indirect=["patch_ansible_module"]) def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): """ Test : Team connection unchanged @@ -2611,11 +2624,11 @@ def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_HWADDR_POLICY_FAILS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM_HWADDR_POLICY_FAILS, indirect=["patch_ansible_module"]) def test_team_connection_create_hwaddr_policy_fails(mocked_generic_connection_create, capfd): """ Test : Team connection created @@ -2625,11 +2638,11 @@ def test_team_connection_create_hwaddr_policy_fails(mocked_generic_connection_cr out, err = capfd.readouterr() results = json.loads(out) - assert results.get('failed') - assert results['msg'] == "Runner-hwaddr-policy is only allowed for runner activebackup" + assert results.get("failed") + assert results["msg"] == "Runner-hwaddr-policy is only allowed for runner activebackup" -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=["patch_ansible_module"]) def test_team_runner_fast_rate_connection_create(mocked_generic_connection_create, capfd): """ Test : Team connection created with runner_fast_rate parameter @@ -2641,24 +2654,32 @@ def test_team_runner_fast_rate_connection_create(mocked_generic_connection_creat arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'team' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' - - for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant', 'team.runner', 'lacp', 'team.runner-fast-rate', 'yes']: + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "team" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" + + for param in [ + "connection.autoconnect", + "connection.interface-name", + "team0_non_existant", + "team.runner", + "lacp", + "team.runner-fast-rate", + "yes", + ]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=["patch_ansible_module"]) def test_team_runner_fast_rate_connection_unchanged(mocked_team_runner_fast_rate_connection_unchanged, capfd): """ Test : Team connection unchanged with runner_fast_rate parameter @@ -2668,11 +2689,13 @@ def test_team_runner_fast_rate_connection_unchanged(mocked_team_runner_fast_rate out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE_FAILS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_TEAM_RUNNER_FAST_RATE_FAILS, indirect=["patch_ansible_module"] +) def test_team_connection_create_runner_fast_rate_fails(mocked_generic_connection_create, capfd): """ Test : Team connection with runner_fast_rate enabled @@ -2682,11 +2705,11 @@ def test_team_connection_create_runner_fast_rate_fails(mocked_generic_connection out, err = capfd.readouterr() results = json.loads(out) - assert results.get('failed') - assert results['msg'] == "runner-fast-rate is only allowed for runner lacp" + assert results.get("failed") + assert results["msg"] == "runner-fast-rate is only allowed for runner lacp" -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM_SLAVE, indirect=["patch_ansible_module"]) def test_create_team_slave(mocked_generic_connection_create, capfd): """ Test if Team_slave created @@ -2699,24 +2722,30 @@ def test_create_team_slave(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'team-slave' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_slaved_device' - - for param in ['connection.autoconnect', 'connection.interface-name', 'connection.master', 'team0_non_existant', 'connection.slave-type']: + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "team-slave" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_slaved_device" + + for param in [ + "connection.autoconnect", + "connection.interface-name", + "connection.master", + "team0_non_existant", + "connection.slave-type", + ]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_TEAM_SLAVE, indirect=["patch_ansible_module"]) def test_team_slave_connection_unchanged(mocked_team_slave_connection_unchanged, capfd): """ Test : Team slave connection unchanged @@ -2726,11 +2755,11 @@ def test_team_slave_connection_unchanged(mocked_team_slave_connection_unchanged, out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VLAN, indirect=["patch_ansible_module"]) def test_create_vlan_con(mocked_generic_connection_create, capfd): """ Test if VLAN created @@ -2743,25 +2772,25 @@ def test_create_vlan_con(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'vlan' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "vlan" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']: + for param in ["ipv4.addresses", "10.10.10.10/24", "ipv4.gateway", "10.10.10.1", "vlan.id", "10"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VLAN, indirect=["patch_ansible_module"]) def test_mod_vlan_conn(mocked_generic_connection_modify, capfd): """ Test if VLAN modified @@ -2774,22 +2803,22 @@ def test_mod_vlan_conn(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']: + for param in ["ipv4.addresses", "10.10.10.10/24", "ipv4.gateway", "10.10.10.1", "vlan.id", "10"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VLAN, indirect=["patch_ansible_module"]) def test_vlan_connection_unchanged(mocked_vlan_connection_unchanged, capfd): """ Test : VLAN connection unchanged @@ -2799,11 +2828,11 @@ def test_vlan_connection_unchanged(mocked_vlan_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VXLAN, indirect=["patch_ansible_module"]) def test_create_vxlan(mocked_generic_connection_create, capfd): """ Test if vxlan created @@ -2815,26 +2844,34 @@ def test_create_vxlan(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'vxlan' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "vxlan" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['connection.interface-name', 'vxlan-existent_nw_device', - 'vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']: + for param in [ + "connection.interface-name", + "vxlan-existent_nw_device", + "vxlan.local", + "192.168.225.5", + "vxlan.remote", + "192.168.225.6", + "vxlan.id", + "11", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VXLAN, indirect=["patch_ansible_module"]) def test_vxlan_mod(mocked_generic_connection_modify, capfd): """ Test if vxlan modified @@ -2846,22 +2883,22 @@ def test_vxlan_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']: + for param in ["vxlan.local", "192.168.225.5", "vxlan.remote", "192.168.225.6", "vxlan.id", "11"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VXLAN, indirect=["patch_ansible_module"]) def test_vxlan_connection_unchanged(mocked_vxlan_connection_unchanged, capfd): """ Test : VxLAN connection unchanged @@ -2871,11 +2908,11 @@ def test_vxlan_connection_unchanged(mocked_vxlan_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_IPIP, indirect=["patch_ansible_module"]) def test_create_ipip(mocked_generic_connection_create, capfd): """ Test if ipip created @@ -2887,29 +2924,36 @@ def test_create_ipip(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ip-tunnel' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ip-tunnel" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['connection.interface-name', 'ipip-existent_nw_device', - 'ip-tunnel.local', '192.168.225.5', - 'ip-tunnel.mode', 'ipip', - 'ip-tunnel.parent', 'non_existent_ipip_device', - 'ip-tunnel.remote', '192.168.225.6']: + for param in [ + "connection.interface-name", + "ipip-existent_nw_device", + "ip-tunnel.local", + "192.168.225.5", + "ip-tunnel.mode", + "ipip", + "ip-tunnel.parent", + "non_existent_ipip_device", + "ip-tunnel.remote", + "192.168.225.6", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_IPIP, indirect=["patch_ansible_module"]) def test_ipip_mod(mocked_generic_connection_modify, capfd): """ Test if ipip modified @@ -2921,22 +2965,22 @@ def test_ipip_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']: + for param in ["ip-tunnel.local", "192.168.225.5", "ip-tunnel.remote", "192.168.225.6"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_IPIP, indirect=["patch_ansible_module"]) def test_ipip_connection_unchanged(mocked_ipip_connection_unchanged, capfd): """ Test : IPIP connection unchanged @@ -2946,11 +2990,11 @@ def test_ipip_connection_unchanged(mocked_ipip_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SIT, indirect=["patch_ansible_module"]) def test_create_sit(mocked_generic_connection_create, capfd): """ Test if sit created @@ -2962,29 +3006,36 @@ def test_create_sit(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ip-tunnel' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ip-tunnel" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['connection.interface-name', 'sit-existent_nw_device', - 'ip-tunnel.local', '192.168.225.5', - 'ip-tunnel.mode', 'sit', - 'ip-tunnel.parent', 'non_existent_sit_device', - 'ip-tunnel.remote', '192.168.225.6']: + for param in [ + "connection.interface-name", + "sit-existent_nw_device", + "ip-tunnel.local", + "192.168.225.5", + "ip-tunnel.mode", + "sit", + "ip-tunnel.parent", + "non_existent_sit_device", + "ip-tunnel.remote", + "192.168.225.6", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SIT, indirect=["patch_ansible_module"]) def test_sit_mod(mocked_generic_connection_modify, capfd): """ Test if sit modified @@ -2996,22 +3047,22 @@ def test_sit_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']: + for param in ["ip-tunnel.local", "192.168.225.5", "ip-tunnel.remote", "192.168.225.6"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SIT, indirect=["patch_ansible_module"]) def test_sit_connection_unchanged(mocked_sit_connection_unchanged, capfd): """ Test : SIT connection unchanged @@ -3021,11 +3072,11 @@ def test_sit_connection_unchanged(mocked_sit_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_DHCP, indirect=["patch_ansible_module"]) def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd): """ Test : Ethernet connection created with DHCP_CLIENT_ID @@ -3037,15 +3088,15 @@ def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert 'ipv4.dhcp-client-id' in args[0] + assert "ipv4.dhcp-client-id" in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GRE, indirect=["patch_ansible_module"]) def test_create_gre(mocked_generic_connection_create, capfd): """ Test if gre created @@ -3057,31 +3108,40 @@ def test_create_gre(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ip-tunnel' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ip-tunnel" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['connection.interface-name', 'gre-existent_nw_device', - 'ip-tunnel.local', '192.168.225.5', - 'ip-tunnel.mode', 'gre', - 'ip-tunnel.parent', 'non_existent_gre_device', - 'ip-tunnel.remote', '192.168.225.6', - 'ip-tunnel.input-key', '1', - 'ip-tunnel.output-key', '2']: + for param in [ + "connection.interface-name", + "gre-existent_nw_device", + "ip-tunnel.local", + "192.168.225.5", + "ip-tunnel.mode", + "gre", + "ip-tunnel.parent", + "non_existent_gre_device", + "ip-tunnel.remote", + "192.168.225.6", + "ip-tunnel.input-key", + "1", + "ip-tunnel.output-key", + "2", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GRE, indirect=["patch_ansible_module"]) def test_gre_mod(mocked_generic_connection_modify, capfd): """ Test if gre modified @@ -3093,22 +3153,22 @@ def test_gre_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']: + for param in ["ip-tunnel.local", "192.168.225.5", "ip-tunnel.remote", "192.168.225.6"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GRE, indirect=["patch_ansible_module"]) def test_gre_connection_unchanged(mocked_gre_connection_unchanged, capfd): """ Test : GRE connection unchanged @@ -3118,11 +3178,11 @@ def test_gre_connection_unchanged(mocked_gre_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_DHCP, indirect=["patch_ansible_module"]) def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unchanged, capfd): """ Test : Ethernet connection with DHCP_CLIENT_ID unchanged @@ -3132,11 +3192,11 @@ def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unch out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_STATIC, indirect=["patch_ansible_module"]) def test_modify_ethernet_dhcp_to_static(mocked_ethernet_connection_dhcp_to_static, capfd): """ Test : Modify ethernet connection from DHCP to static @@ -3148,21 +3208,21 @@ def test_modify_ethernet_dhcp_to_static(mocked_ethernet_connection_dhcp_to_stati arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[1] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" - for param in ['ipv4.method', 'ipv4.gateway', 'ipv4.addresses']: + for param in ["ipv4.method", "ipv4.gateway", "ipv4.addresses"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_STATIC, indirect=["patch_ansible_module"]) def test_create_ethernet_static(mocked_generic_connection_create, capfd): """ Test : Create ethernet connection with static IP configuration @@ -3175,34 +3235,40 @@ def test_create_ethernet_static(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - 'ipv4.gateway', '10.10.10.1', - 'ipv4.dns', '1.1.1.1,8.8.8.8']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "ipv4.gateway", + "10.10.10.1", + "ipv4.dns", + "1.1.1.1,8.8.8.8", + ]: assert param in add_args_text up_args, up_kw = arg_list[1] - assert up_args[0][0] == '/usr/bin/nmcli' - assert up_args[0][1] == 'con' - assert up_args[0][2] == 'up' - assert up_args[0][3] == 'non_existent_nw_device' + assert up_args[0][0] == "/usr/bin/nmcli" + assert up_args[0][1] == "con" + assert up_args[0][2] == "up" + assert up_args[0][3] == "non_existent_nw_device" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_STATIC, indirect=["patch_ansible_module"]) def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_unchanged, capfd): """ Test : Ethernet connection with static IP configuration unchanged @@ -3212,13 +3278,16 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_ out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC, indirect=["patch_ansible_module"] +) def test_ethernet_connection_static_ipv4_address_static_route_with_metric_modify( - mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify, capfd): + mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify, capfd +): """ Test : Modify ethernet connection with static IPv4 address and static route """ @@ -3228,27 +3297,31 @@ def test_ethernet_connection_static_ipv4_address_static_route_with_metric_modify arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['ipv4.routes', '192.168.200.0/24 192.168.1.1', - 'ipv4.route-metric', '10']: + for param in ["ipv4.routes", "192.168.200.0/24 192.168.1.1", "ipv4.route-metric", "10"]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert results.get('changed') is True - assert not results.get('failed') + assert results.get("changed") is True + assert not results.get("failed") -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_CLEAR, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", + TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_CLEAR, + indirect=["patch_ansible_module"], +) def test_ethernet_connection_static_ipv4_address_static_route_with_metric_clear( - mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_clear, capfd): + mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_clear, capfd +): """ Test : Modify ethernet connection with static IPv4 address and static route """ @@ -3258,28 +3331,32 @@ def test_ethernet_connection_static_ipv4_address_static_route_with_metric_clear( arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['ipv4.routes', '']: + for param in ["ipv4.routes", ""]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert 'ipv4.routes' in results['diff']['before'] - assert 'ipv4.routes' in results['diff']['after'] + assert "ipv4.routes" in results["diff"]["before"] + assert "ipv4.routes" in results["diff"]["after"] - assert results.get('changed') is True - assert not results.get('failed') + assert results.get("changed") is True + assert not results.get("failed") -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE, indirect=['patch_ansible_module']) -def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd): +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE, indirect=["patch_ansible_module"] +) +def test_ethernet_connection_static_ipv6_address_static_route_create( + mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd +): """ Test : Create ethernet connection with static IPv6 address and static route """ @@ -3290,32 +3367,42 @@ def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethe arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'con-name', 'non_existent_nw_device', - 'ipv6.addresses', '2001:beef:cafe:10::1/64', - 'ipv6.method', 'manual', - 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "con-name", + "non_existent_nw_device", + "ipv6.addresses", + "2001:beef:cafe:10::1/64", + "ipv6.method", + "manual", + "ipv6.routes", + "fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=["patch_ansible_module"] +) def test_ethernet_connection_static_ipv6_address_static_route_metric_modify( - mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify, capfd): + mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify, capfd +): """ Test : Modify ethernet connection with static IPv6 address and static route """ @@ -3325,27 +3412,29 @@ def test_ethernet_connection_static_ipv6_address_static_route_metric_modify( arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', - 'ipv6.route-metric', '10']: + for param in ["ipv6.routes", "fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2", "ipv6.route-metric", "10"]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert results.get('changed') is True - assert not results.get('failed') + assert results.get("changed") is True + assert not results.get("failed") -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES, indirect=["patch_ansible_module"] +) def test_ethernet_connection_static_ipv6_address_multiple_static_routes_with_metric_create( - mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create, capfd): + mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create, capfd +): """ Test : Create ethernet connection with static IPv6 address and multiple static routes """ @@ -3356,32 +3445,38 @@ def test_ethernet_connection_static_ipv6_address_multiple_static_routes_with_met arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'con-name', 'non_existent_nw_device', - 'ipv6.addresses', '2001:beef:cafe:10::1/64', - 'ipv6.method', 'manual', - 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "con-name", + "non_existent_nw_device", + "ipv6.addresses", + "2001:beef:cafe:10::1/64", + "ipv6.method", + "manual", + "ipv6.routes", + "fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_SRIOV_VFS, indirect=['patch_ansible_module']) -def test_ethernet_connection_sriov_vfs_create( - mocked_ethernet_connection_with_sriov_vfs_create, capfd): +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_ETHERNET_ADD_SRIOV_VFS, indirect=["patch_ansible_module"]) +def test_ethernet_connection_sriov_vfs_create(mocked_ethernet_connection_with_sriov_vfs_create, capfd): """ Test : Create ethernet connection with SR-IOV VFs """ @@ -3392,31 +3487,40 @@ def test_ethernet_connection_sriov_vfs_create( arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'con-name', 'non_existent_nw_device', - 'sriov.total-vfs', '16', - 'sriov.vfs', '0 spoof-check=true vlans=100']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "con-name", + "non_existent_nw_device", + "sriov.total-vfs", + "16", + "sriov.vfs", + "0 spoof-check=true vlans=100", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=["patch_ansible_module"] +) def test_ethernet_connection_static_ipv6_address_static_route_with_metric_create( - mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create, capfd): + mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create, capfd +): """ Test : Create ethernet connection with static IPv6 address and static route with metric """ @@ -3427,32 +3531,46 @@ def test_ethernet_connection_static_ipv6_address_static_route_with_metric_create arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'con-name', 'non_existent_nw_device', - 'ipv6.addresses', '2001:beef:cafe:10::1/64', - 'ipv6.method', 'manual', - 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', - 'ipv6.route-metric', '5']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "con-name", + "non_existent_nw_device", + "ipv6.addresses", + "2001:beef:cafe:10::1/64", + "ipv6.method", + "manual", + "ipv6.routes", + "fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2", + "ipv6.route-metric", + "5", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC, indirect=['patch_ansible_module']) -def test_ethernet_connection_static_ipv6_address_static_route_create_2(mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd): +@pytest.mark.parametrize( + "patch_ansible_module", + TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC, + indirect=["patch_ansible_module"], +) +def test_ethernet_connection_static_ipv6_address_static_route_create_2( + mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd +): """ Test : Create ethernet connection with static IPv6 address and multiple static routes with metric """ @@ -3463,31 +3581,39 @@ def test_ethernet_connection_static_ipv6_address_static_route_create_2(mocked_et arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'con-name', 'non_existent_nw_device', - 'ipv6.addresses', '2001:beef:cafe:10::1/64', - 'ipv6.method', 'manual', - 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5', - 'ipv6.route-metric', '5']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "con-name", + "non_existent_nw_device", + "ipv6.addresses", + "2001:beef:cafe:10::1/64", + "ipv6.method", + "manual", + "ipv6.routes", + "fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5", + "ipv6.route-metric", + "5", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_WIRELESS, indirect=["patch_ansible_module"]) def test_create_wireless(mocked_wireless_create, capfd): """ Test : Create wireless connection @@ -3500,41 +3626,47 @@ def test_create_wireless(mocked_wireless_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list get_available_options_args, get_available_options_kw = arg_list[0] - assert get_available_options_args[0][0] == '/usr/bin/nmcli' - assert get_available_options_args[0][1] == 'con' - assert get_available_options_args[0][2] == 'edit' - assert get_available_options_args[0][3] == 'type' - assert get_available_options_args[0][4] == 'wifi' - - get_available_options_data = get_available_options_kw['data'].split() - for param in ['print', '802-11-wireless', - 'quit', 'yes']: + assert get_available_options_args[0][0] == "/usr/bin/nmcli" + assert get_available_options_args[0][1] == "con" + assert get_available_options_args[0][2] == "edit" + assert get_available_options_args[0][3] == "type" + assert get_available_options_args[0][4] == "wifi" + + get_available_options_data = get_available_options_kw["data"].split() + for param in ["print", "802-11-wireless", "quit", "yes"]: assert param in get_available_options_data add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'wifi' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "wifi" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wireless_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - '802-11-wireless.ssid', 'Brittany', - '802-11-wireless.mode', 'ap', - '802-11-wireless.hidden', 'yes']: + for param in [ + "connection.interface-name", + "wireless_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "802-11-wireless.ssid", + "Brittany", + "802-11-wireless.mode", + "ap", + "802-11-wireless.hidden", + "yes", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SECURE_WIRELESS, indirect=["patch_ansible_module"]) def test_create_secure_wireless(mocked_secure_wireless_create, capfd): """ Test : Create secure wireless connection @@ -3547,52 +3679,55 @@ def test_create_secure_wireless(mocked_secure_wireless_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list get_available_options_args, get_available_options_kw = arg_list[0] - assert get_available_options_args[0][0] == '/usr/bin/nmcli' - assert get_available_options_args[0][1] == 'con' - assert get_available_options_args[0][2] == 'edit' - assert get_available_options_args[0][3] == 'type' - assert get_available_options_args[0][4] == 'wifi' - - get_available_options_data = get_available_options_kw['data'].split() - for param in ['print', '802-11-wireless-security', - 'quit', 'yes']: + assert get_available_options_args[0][0] == "/usr/bin/nmcli" + assert get_available_options_args[0][1] == "con" + assert get_available_options_args[0][2] == "edit" + assert get_available_options_args[0][3] == "type" + assert get_available_options_args[0][4] == "wifi" + + get_available_options_data = get_available_options_kw["data"].split() + for param in ["print", "802-11-wireless-security", "quit", "yes"]: assert param in get_available_options_data add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'wifi' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "wifi" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wireless_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk']: + for param in [ + "connection.interface-name", + "wireless_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "802-11-wireless.ssid", + "Brittany", + "802-11-wireless-security.key-mgmt", + "wpa-psk", + ]: assert param in add_args_text edit_args, edit_kw = arg_list[2] - assert edit_args[0][0] == '/usr/bin/nmcli' - assert edit_args[0][1] == 'con' - assert edit_args[0][2] == 'edit' - assert edit_args[0][3] == 'non_existent_nw_device' - - edit_kw_data = edit_kw['data'].split() - for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', - 'save', - 'quit']: + assert edit_args[0][0] == "/usr/bin/nmcli" + assert edit_args[0][1] == "con" + assert edit_args[0][2] == "edit" + assert edit_args[0][3] == "non_existent_nw_device" + + edit_kw_data = edit_kw["data"].split() + for param in ["802-11-wireless-security.psk", "VERY_SECURE_PASSWORD", "save", "quit"]: assert param in edit_kw_data out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SECURE_WIRELESS, indirect=["patch_ansible_module"]) def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, capfd): """ Test : Create secure wireless connection w/failure @@ -3605,40 +3740,45 @@ def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, c arg_list = nmcli.Nmcli.execute_command.call_args_list get_available_options_args, get_available_options_kw = arg_list[0] - assert get_available_options_args[0][0] == '/usr/bin/nmcli' - assert get_available_options_args[0][1] == 'con' - assert get_available_options_args[0][2] == 'edit' - assert get_available_options_args[0][3] == 'type' - assert get_available_options_args[0][4] == 'wifi' - - get_available_options_data = get_available_options_kw['data'].split() - for param in ['print', '802-11-wireless-security', - 'quit', 'yes']: + assert get_available_options_args[0][0] == "/usr/bin/nmcli" + assert get_available_options_args[0][1] == "con" + assert get_available_options_args[0][2] == "edit" + assert get_available_options_args[0][3] == "type" + assert get_available_options_args[0][4] == "wifi" + + get_available_options_data = get_available_options_kw["data"].split() + for param in ["print", "802-11-wireless-security", "quit", "yes"]: assert param in get_available_options_data add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'wifi' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "wifi" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wireless_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk']: + for param in [ + "connection.interface-name", + "wireless_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "802-11-wireless.ssid", + "Brittany", + "802-11-wireless-security.key-mgmt", + "wpa-psk", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert results.get('failed') - assert 'changed' not in results + assert results.get("failed") + assert "changed" not in results -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SECURE_WIRELESS, indirect=["patch_ansible_module"]) def test_modify_secure_wireless(mocked_secure_wireless_modify, capfd): """ Test : Modify secure wireless connection @@ -3650,56 +3790,59 @@ def test_modify_secure_wireless(mocked_secure_wireless_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list get_available_options_args, get_available_options_kw = arg_list[0] - assert get_available_options_args[0][0] == '/usr/bin/nmcli' - assert get_available_options_args[0][1] == 'con' - assert get_available_options_args[0][2] == 'edit' - assert get_available_options_args[0][3] == 'type' - assert get_available_options_args[0][4] == 'wifi' - - get_available_options_data = get_available_options_kw['data'].split() - for param in ['print', '802-11-wireless-security', - 'quit', 'yes']: + assert get_available_options_args[0][0] == "/usr/bin/nmcli" + assert get_available_options_args[0][1] == "con" + assert get_available_options_args[0][2] == "edit" + assert get_available_options_args[0][3] == "type" + assert get_available_options_args[0][4] == "wifi" + + get_available_options_data = get_available_options_kw["data"].split() + for param in ["print", "802-11-wireless-security", "quit", "yes"]: assert param in get_available_options_data show_args, show_kw = arg_list[1] - assert show_args[0][0] == '/usr/bin/nmcli' - assert show_args[0][1] == '--show-secrets' - assert show_args[0][2] == 'con' - assert show_args[0][3] == 'show' - assert show_args[0][4] == 'non_existent_nw_device' + assert show_args[0][0] == "/usr/bin/nmcli" + assert show_args[0][1] == "--show-secrets" + assert show_args[0][2] == "con" + assert show_args[0][3] == "show" + assert show_args[0][4] == "non_existent_nw_device" add_args, add_kw = arg_list[2] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wireless_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk']: + for param in [ + "connection.interface-name", + "wireless_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "802-11-wireless.ssid", + "Brittany", + "802-11-wireless-security.key-mgmt", + "wpa-psk", + ]: assert param in add_args_text edit_args, edit_kw = arg_list[3] - assert edit_args[0][0] == '/usr/bin/nmcli' - assert edit_args[0][1] == 'con' - assert edit_args[0][2] == 'edit' - assert edit_args[0][3] == 'non_existent_nw_device' - - edit_kw_data = edit_kw['data'].split() - for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', - 'save', - 'quit']: + assert edit_args[0][0] == "/usr/bin/nmcli" + assert edit_args[0][1] == "con" + assert edit_args[0][2] == "edit" + assert edit_args[0][3] == "non_existent_nw_device" + + edit_kw_data = edit_kw["data"].split() + for param in ["802-11-wireless-security.psk", "VERY_SECURE_PASSWORD", "save", "quit"]: assert param in edit_kw_data out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SECURE_WIRELESS, indirect=["patch_ansible_module"]) def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, capfd): """ Test : Modify secure wireless connection w/failure @@ -3712,44 +3855,49 @@ def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, c arg_list = nmcli.Nmcli.execute_command.call_args_list get_available_options_args, get_available_options_kw = arg_list[0] - assert get_available_options_args[0][0] == '/usr/bin/nmcli' - assert get_available_options_args[0][1] == 'con' - assert get_available_options_args[0][2] == 'edit' - assert get_available_options_args[0][3] == 'type' - assert get_available_options_args[0][4] == 'wifi' - - get_available_options_data = get_available_options_kw['data'].split() - for param in ['print', '802-11-wireless-security', - 'quit', 'yes']: + assert get_available_options_args[0][0] == "/usr/bin/nmcli" + assert get_available_options_args[0][1] == "con" + assert get_available_options_args[0][2] == "edit" + assert get_available_options_args[0][3] == "type" + assert get_available_options_args[0][4] == "wifi" + + get_available_options_data = get_available_options_kw["data"].split() + for param in ["print", "802-11-wireless-security", "quit", "yes"]: assert param in get_available_options_data show_args, show_kw = arg_list[1] - assert show_args[0][0] == '/usr/bin/nmcli' - assert show_args[0][1] == '--show-secrets' - assert show_args[0][2] == 'con' - assert show_args[0][3] == 'show' - assert show_args[0][4] == 'non_existent_nw_device' + assert show_args[0][0] == "/usr/bin/nmcli" + assert show_args[0][1] == "--show-secrets" + assert show_args[0][2] == "con" + assert show_args[0][3] == "show" + assert show_args[0][4] == "non_existent_nw_device" add_args, add_kw = arg_list[2] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wireless_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk']: + for param in [ + "connection.interface-name", + "wireless_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "802-11-wireless.ssid", + "Brittany", + "802-11-wireless-security.key-mgmt", + "wpa-psk", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert results.get('failed') - assert 'changed' not in results + assert results.get("failed") + assert "changed" not in results -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_DUMMY_STATIC, indirect=["patch_ansible_module"]) def test_create_dummy_static(mocked_generic_connection_create, capfd): """ Test : Create dummy connection with static IP configuration @@ -3762,35 +3910,42 @@ def test_create_dummy_static(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'dummy' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "dummy" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'dummy_non_existant', - 'ipv4.addresses', '10.10.10.10/24', - 'ipv4.gateway', '10.10.10.1', - 'ipv4.dns', '1.1.1.1,8.8.8.8', - 'ipv6.addresses', '2001:db8::1/128']: + for param in [ + "connection.interface-name", + "dummy_non_existant", + "ipv4.addresses", + "10.10.10.10/24", + "ipv4.gateway", + "10.10.10.1", + "ipv4.dns", + "1.1.1.1,8.8.8.8", + "ipv6.addresses", + "2001:db8::1/128", + ]: assert param in add_args_text up_args, up_kw = arg_list[1] - assert up_args[0][0] == '/usr/bin/nmcli' - assert up_args[0][1] == 'con' - assert up_args[0][2] == 'up' - assert up_args[0][3] == 'non_existent_nw_device' + assert up_args[0][0] == "/usr/bin/nmcli" + assert up_args[0][1] == "con" + assert up_args[0][2] == "up" + assert up_args[0][3] == "non_existent_nw_device" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_DUMMY_STATIC, indirect=["patch_ansible_module"]) def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchanged, capfd): """ Test : Dummy connection with static IP configuration unchanged @@ -3800,11 +3955,11 @@ def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchan out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_DUMMY_STATIC, indirect=["patch_ansible_module"]) def test_dummy_connection_static_without_mtu_unchanged(mocked_dummy_connection_static_without_mtu_unchanged, capfd): """ Test : Dummy connection with static IP configuration and no mtu set unchanged @@ -3814,11 +3969,11 @@ def test_dummy_connection_static_without_mtu_unchanged(mocked_dummy_connection_s out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_DUMMY_STATIC, indirect=["patch_ansible_module"]) def test_dummy_connection_static_with_custom_mtu_modify(mocked_dummy_connection_static_with_custom_mtu_modify, capfd): """ Test : Dummy connection with static IP configuration and no mtu set modify @@ -3831,22 +3986,22 @@ def test_dummy_connection_static_with_custom_mtu_modify(mocked_dummy_connection_ arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[1] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['802-3-ethernet.mtu', '0']: + for param in ["802-3-ethernet.mtu", "0"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GSM, indirect=["patch_ansible_module"]) def test_create_gsm(mocked_generic_connection_create, capfd): """ Test if gsm created @@ -3858,29 +4013,36 @@ def test_create_gsm(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'gsm' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "gsm" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['connection.interface-name', 'gsm_non_existant', - 'gsm.apn', 'internet.telekom', - 'gsm.username', 't-mobile', - 'gsm.password', 'tm', - 'gsm.pin', '1234']: + for param in [ + "connection.interface-name", + "gsm_non_existant", + "gsm.apn", + "internet.telekom", + "gsm.username", + "t-mobile", + "gsm.password", + "tm", + "gsm.pin", + "1234", + ]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GSM, indirect=["patch_ansible_module"]) def test_gsm_mod(mocked_generic_connection_modify, capfd): """ Test if gsm modified @@ -3892,23 +4054,22 @@ def test_gsm_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['gsm.username', 't-mobile', - 'gsm.password', 'tm']: + for param in ["gsm.username", "t-mobile", "gsm.password", "tm"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GSM, indirect=["patch_ansible_module"]) def test_gsm_connection_unchanged(mocked_gsm_connection_unchanged, capfd): """ Test if gsm connection unchanged @@ -3918,11 +4079,13 @@ def test_gsm_connection_unchanged(mocked_gsm_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=["patch_ansible_module"] +) def test_create_ethernet_with_multiple_ip4_addresses_static(mocked_generic_connection_create, capfd): """ Test : Create ethernet connection with static IP configuration @@ -3935,34 +4098,42 @@ def test_create_ethernet_with_multiple_ip4_addresses_static(mocked_generic_conne arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'ipv4.addresses', '10.10.10.10/32,10.10.20.10/32', - 'ipv4.gateway', '10.10.10.1', - 'ipv4.dns', '1.1.1.1,8.8.8.8']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "ipv4.addresses", + "10.10.10.10/32,10.10.20.10/32", + "ipv4.gateway", + "10.10.10.1", + "ipv4.dns", + "1.1.1.1,8.8.8.8", + ]: assert param in add_args_text up_args, up_kw = arg_list[1] - assert up_args[0][0] == '/usr/bin/nmcli' - assert up_args[0][1] == 'con' - assert up_args[0][2] == 'up' - assert up_args[0][3] == 'non_existent_nw_device' + assert up_args[0][0] == "/usr/bin/nmcli" + assert up_args[0][1] == "con" + assert up_args[0][2] == "up" + assert up_args[0][3] == "non_existent_nw_device" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=["patch_ansible_module"] +) def test_create_ethernet_with_multiple_ip6_addresses_static(mocked_generic_connection_create, capfd): """ Test : Create ethernet connection with multiple IPv6 addresses configuration @@ -3975,35 +4146,45 @@ def test_create_ethernet_with_multiple_ip6_addresses_static(mocked_generic_conne arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'ipv6.addresses', '2001:db8::cafe/128,2002:db8::cafe/128', - 'ipv6.gateway', '2001:db8::cafa', - 'ipv6.dns', '2001:4860:4860::8888,2001:4860:4860::8844']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "ipv6.addresses", + "2001:db8::cafe/128,2002:db8::cafe/128", + "ipv6.gateway", + "2001:db8::cafa", + "ipv6.dns", + "2001:4860:4860::8888,2001:4860:4860::8844", + ]: assert param in add_args_text up_args, up_kw = arg_list[1] - assert up_args[0][0] == '/usr/bin/nmcli' - assert up_args[0][1] == 'con' - assert up_args[0][2] == 'up' - assert up_args[0][3] == 'non_existent_nw_device' + assert up_args[0][0] == "/usr/bin/nmcli" + assert up_args[0][1] == "con" + assert up_args[0][2] == "up" + assert up_args[0][3] == "non_existent_nw_device" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module']) -def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged(mocked_ethernet_connection_static_multiple_ip4_addresses_unchanged, capfd): +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=["patch_ansible_module"] +) +def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged( + mocked_ethernet_connection_static_multiple_ip4_addresses_unchanged, capfd +): """ Test : Ethernet connection with static IP configuration unchanged """ @@ -4012,12 +4193,16 @@ def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged(mocked out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=['patch_ansible_module']) -def test_ethernet_connection_static_with_multiple_ip6_addresses_unchanged(mocked_ethernet_connection_static_multiple_ip6_addresses_unchanged, capfd): +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=["patch_ansible_module"] +) +def test_ethernet_connection_static_with_multiple_ip6_addresses_unchanged( + mocked_ethernet_connection_static_multiple_ip6_addresses_unchanged, capfd +): """ Test : Ethernet connection with multiple IPv6 addresses configuration unchanged """ @@ -4026,11 +4211,13 @@ def test_ethernet_connection_static_with_multiple_ip6_addresses_unchanged(mocked out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=["patch_ansible_module"] +) def test_add_second_ip4_address_to_ethernet_connection(mocked_ethernet_connection_static_modify, capfd): """ Test : Modify ethernet connection from DHCP to static @@ -4042,21 +4229,23 @@ def test_add_second_ip4_address_to_ethernet_connection(mocked_ethernet_connectio arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[1] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" - for param in ['ipv4.addresses', '10.10.10.10/32,10.10.20.10/32']: + for param in ["ipv4.addresses", "10.10.10.10/32,10.10.20.10/32"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=["patch_ansible_module"] +) def test_create_ethernet_addr_gen_mode_and_ip6_privacy_static(mocked_generic_connection_create, capfd): """ Test : Create ethernet connection with static IP configuration @@ -4069,37 +4258,49 @@ def test_create_ethernet_addr_gen_mode_and_ip6_privacy_static(mocked_generic_con arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'ethernet' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "ethernet" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'ethernet_non_existant', - 'ipv6.addresses', '2001:db8::cafe/128', - 'ipv6.gateway', '2001:db8::cafa', - 'ipv6.dns', '2001:4860:4860::8888', - 'ipv6.ip6-privacy', 'prefer-public-addr', - 'ipv6.addr-gen-mode', 'eui64']: + for param in [ + "connection.interface-name", + "ethernet_non_existant", + "ipv6.addresses", + "2001:db8::cafe/128", + "ipv6.gateway", + "2001:db8::cafa", + "ipv6.dns", + "2001:4860:4860::8888", + "ipv6.ip6-privacy", + "prefer-public-addr", + "ipv6.addr-gen-mode", + "eui64", + ]: assert param in add_args_text up_args, up_kw = arg_list[1] - assert up_args[0][0] == '/usr/bin/nmcli' - assert up_args[0][1] == 'con' - assert up_args[0][2] == 'up' - assert up_args[0][3] == 'non_existent_nw_device' + assert up_args[0][0] == "/usr/bin/nmcli" + assert up_args[0][1] == "con" + assert up_args[0][2] == "up" + assert up_args[0][3] == "non_existent_nw_device" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=['patch_ansible_module']) -def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged_2(mocked_ethernet_connection_static_ip6_privacy_and_addr_gen_mode_unchange, capfd): +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=["patch_ansible_module"] +) +def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged_2( + mocked_ethernet_connection_static_ip6_privacy_and_addr_gen_mode_unchange, capfd +): """ Test : Ethernet connection with static IP configuration unchanged """ @@ -4108,11 +4309,11 @@ def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged_2(mock out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_WIREGUARD, indirect=["patch_ansible_module"]) def test_create_wireguard(mocked_generic_connection_create, capfd): """ Test : Create wireguard connection with static IP configuration @@ -4125,31 +4326,40 @@ def test_create_wireguard(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'wireguard' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "wireguard" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'wg_non_existant', - 'ipv4.method', 'manual', - 'ipv4.addresses', '10.10.10.10/24', - 'ipv6.method', 'manual', - 'ipv6.addresses', '2001:db8::1/128', - 'wireguard.listen-port', '51820', - 'wireguard.private-key', '']: + for param in [ + "connection.interface-name", + "wg_non_existant", + "ipv4.method", + "manual", + "ipv4.addresses", + "10.10.10.10/24", + "ipv6.method", + "manual", + "ipv6.addresses", + "2001:db8::1/128", + "wireguard.listen-port", + "51820", + "wireguard.private-key", + "", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_WIREGUARD, indirect=["patch_ansible_module"]) def test_wireguard_connection_unchanged(mocked_wireguard_connection_unchanged, capfd): """ Test : Wireguard connection with static IP configuration unchanged @@ -4159,11 +4369,11 @@ def test_wireguard_connection_unchanged(mocked_wireguard_connection_unchanged, c out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_WIREGUARD, indirect=["patch_ansible_module"]) def test_wireguard_mod(mocked_generic_connection_modify, capfd): """ Test : Modify wireguard connection @@ -4175,22 +4385,22 @@ def test_wireguard_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['wireguard.listen-port', '51820']: + for param in ["wireguard.listen-port", "51820"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_L2TP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VPN_L2TP, indirect=["patch_ansible_module"]) def test_vpn_l2tp_connection_unchanged(mocked_vpn_l2tp_connection_unchanged, capfd): """ Test : L2TP VPN connection unchanged @@ -4200,11 +4410,11 @@ def test_vpn_l2tp_connection_unchanged(mocked_vpn_l2tp_connection_unchanged, cap out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_PPTP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VPN_PPTP, indirect=["patch_ansible_module"]) def test_vpn_pptp_connection_unchanged(mocked_vpn_pptp_connection_unchanged, capfd): """ Test : PPTP VPN connection unchanged @@ -4214,11 +4424,11 @@ def test_vpn_pptp_connection_unchanged(mocked_vpn_pptp_connection_unchanged, cap out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_L2TP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VPN_L2TP, indirect=["patch_ansible_module"]) def test_create_vpn_l2tp(mocked_generic_connection_create, capfd): """ Test : Create L2TP VPN connection @@ -4231,34 +4441,45 @@ def test_create_vpn_l2tp(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'vpn' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'vpn_l2tp' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "vpn" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "vpn_l2tp" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.autoconnect', 'no', - 'connection.permissions', 'brittany', - 'vpn.data', 'vpn.service-type', 'org.freedesktop.NetworkManager.l2tp', - ]: + for param in [ + "connection.autoconnect", + "no", + "connection.permissions", + "brittany", + "vpn.data", + "vpn.service-type", + "org.freedesktop.NetworkManager.l2tp", + ]: assert param in add_args_text - vpn_data_index = add_args_text.index('vpn.data') + 1 + vpn_data_index = add_args_text.index("vpn.data") + 1 args_vpn_data = add_args_text[vpn_data_index] - for vpn_data in ['gateway=vpn.example.com', 'password-flags=2', 'user=brittany', 'ipsec-enabled=true', 'ipsec-psk=QnJpdHRhbnkxMjM=']: + for vpn_data in [ + "gateway=vpn.example.com", + "password-flags=2", + "user=brittany", + "ipsec-enabled=true", + "ipsec-psk=QnJpdHRhbnkxMjM=", + ]: assert vpn_data in args_vpn_data out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_PPTP, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VPN_PPTP, indirect=["patch_ansible_module"]) def test_create_vpn_pptp(mocked_generic_connection_create, capfd): """ Test : Create PPTP VPN connection @@ -4271,34 +4492,39 @@ def test_create_vpn_pptp(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'vpn' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'vpn_pptp' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "vpn" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "vpn_pptp" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.autoconnect', 'no', - 'connection.permissions', 'brittany', - 'vpn.data', 'vpn.service-type', 'org.freedesktop.NetworkManager.pptp', - ]: + for param in [ + "connection.autoconnect", + "no", + "connection.permissions", + "brittany", + "vpn.data", + "vpn.service-type", + "org.freedesktop.NetworkManager.pptp", + ]: assert param in add_args_text - vpn_data_index = add_args_text.index('vpn.data') + 1 + vpn_data_index = add_args_text.index("vpn.data") + 1 args_vpn_data = add_args_text[vpn_data_index] - for vpn_data in ['password-flags=2', 'gateway=vpn.example.com', 'user=brittany']: + for vpn_data in ["password-flags=2", "gateway=vpn.example.com", "user=brittany"]: assert vpn_data in args_vpn_data out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_INFINIBAND_STATIC, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_INFINIBAND_STATIC, indirect=["patch_ansible_module"]) def test_infiniband_connection_static_unchanged(mocked_infiniband_connection_static_unchanged, capfd): """ Test : Infiniband connection unchanged @@ -4308,13 +4534,16 @@ def test_infiniband_connection_static_unchanged(mocked_infiniband_connection_sta out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE, indirect=["patch_ansible_module"] +) def test_infiniband_connection_static_transport_mode_connected( - mocked_infiniband_connection_static_transport_mode_connected_modify, capfd): + mocked_infiniband_connection_static_transport_mode_connected_modify, capfd +): """ Test : Modify Infiniband connection to use connected as transport_mode """ @@ -4324,24 +4553,24 @@ def test_infiniband_connection_static_transport_mode_connected( arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[1] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'modify' - assert add_args[0][3] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "modify" + assert add_args[0][3] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['infiniband.transport-mode', 'connected']: + for param in ["infiniband.transport-mode", "connected"]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert results.get('changed') is True - assert not results.get('failed') + assert results.get("changed") is True + assert not results.get("failed") -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DIFF_CHECK, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_GENERIC_DIFF_CHECK, indirect=["patch_ansible_module"]) def test_bond_connection_unchanged_2(mocked_generic_connection_diff_check, capfd): """ Test : Bond connection unchanged @@ -4349,157 +4578,179 @@ def test_bond_connection_unchanged_2(mocked_generic_connection_diff_check, capfd module = AnsibleModule( argument_spec=dict( - ignore_unsupported_suboptions=dict(type='bool', default=False), - autoconnect=dict(type='bool', default=True), - autoconnect_priority=dict(type='int'), - autoconnect_retries=dict(type='int'), - state=dict(type='str', required=True, choices=['absent', 'present']), - conn_name=dict(type='str', required=True), - conn_reload=dict(type='bool', required=False, default=False), - master=dict(type='str'), - slave_type=dict(type=str, choices=['bond', 'bridge', 'team']), - ifname=dict(type='str'), - type=dict(type='str', - choices=[ - 'bond', - 'bond-slave', - 'bridge', - 'bridge-slave', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'team-slave', - 'vlan', - 'vxlan', - 'wifi', - 'gsm', - 'macvlan', - 'wireguard', - 'vpn', - ]), - ip4=dict(type='list', elements='str'), - gw4=dict(type='str'), - gw4_ignore_auto=dict(type='bool', default=False), - routes4=dict(type='list', elements='str'), - routes4_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - tos=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric4=dict(type='int'), - routing_rules4=dict(type='list', elements='str'), - never_default4=dict(type='bool', default=False), - dns4=dict(type='list', elements='str'), - dns4_search=dict(type='list', elements='str'), - dns4_options=dict(type='list', elements='str'), - dns4_ignore_auto=dict(type='bool', default=False), - method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), - may_fail4=dict(type='bool', default=True), - dhcp_client_id=dict(type='str'), - ip6=dict(type='list', elements='str'), - gw6=dict(type='str'), - gw6_ignore_auto=dict(type='bool', default=False), - dns6=dict(type='list', elements='str'), - dns6_search=dict(type='list', elements='str'), - dns6_options=dict(type='list', elements='str'), - dns6_ignore_auto=dict(type='bool', default=False), - routes6=dict(type='list', elements='str'), - routes6_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric6=dict(type='int'), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), - ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), - addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']), + ignore_unsupported_suboptions=dict(type="bool", default=False), + autoconnect=dict(type="bool", default=True), + autoconnect_priority=dict(type="int"), + autoconnect_retries=dict(type="int"), + state=dict(type="str", required=True, choices=["absent", "present"]), + conn_name=dict(type="str", required=True), + conn_reload=dict(type="bool", required=False, default=False), + master=dict(type="str"), + slave_type=dict(type=str, choices=["bond", "bridge", "team"]), + ifname=dict(type="str"), + type=dict( + type="str", + choices=[ + "bond", + "bond-slave", + "bridge", + "bridge-slave", + "dummy", + "ethernet", + "generic", + "gre", + "infiniband", + "ipip", + "sit", + "team", + "team-slave", + "vlan", + "vxlan", + "wifi", + "gsm", + "macvlan", + "wireguard", + "vpn", + ], + ), + ip4=dict(type="list", elements="str"), + gw4=dict(type="str"), + gw4_ignore_auto=dict(type="bool", default=False), + routes4=dict(type="list", elements="str"), + routes4_extended=dict( + type="list", + elements="dict", + options=dict( + ip=dict(type="str", required=True), + next_hop=dict(type="str"), + metric=dict(type="int"), + table=dict(type="int"), + tos=dict(type="int"), + cwnd=dict(type="int"), + mtu=dict(type="int"), + onlink=dict(type="bool"), + ), + ), + route_metric4=dict(type="int"), + routing_rules4=dict(type="list", elements="str"), + never_default4=dict(type="bool", default=False), + dns4=dict(type="list", elements="str"), + dns4_search=dict(type="list", elements="str"), + dns4_options=dict(type="list", elements="str"), + dns4_ignore_auto=dict(type="bool", default=False), + method4=dict(type="str", choices=["auto", "link-local", "manual", "shared", "disabled"]), + may_fail4=dict(type="bool", default=True), + dhcp_client_id=dict(type="str"), + ip6=dict(type="list", elements="str"), + gw6=dict(type="str"), + gw6_ignore_auto=dict(type="bool", default=False), + dns6=dict(type="list", elements="str"), + dns6_search=dict(type="list", elements="str"), + dns6_options=dict(type="list", elements="str"), + dns6_ignore_auto=dict(type="bool", default=False), + routes6=dict(type="list", elements="str"), + routes6_extended=dict( + type="list", + elements="dict", + options=dict( + ip=dict(type="str", required=True), + next_hop=dict(type="str"), + metric=dict(type="int"), + table=dict(type="int"), + cwnd=dict(type="int"), + mtu=dict(type="int"), + onlink=dict(type="bool"), + ), + ), + route_metric6=dict(type="int"), + method6=dict(type="str", choices=["ignore", "auto", "dhcp", "link-local", "manual", "shared", "disabled"]), + ip_privacy6=dict(type="str", choices=["disabled", "prefer-public-addr", "prefer-temp-addr", "unknown"]), + addr_gen_mode6=dict(type="str", choices=["default", "default-or-eui64", "eui64", "stable-privacy"]), # Bond Specific vars - mode=dict(type='str', default='balance-rr', - choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), - miimon=dict(type='int'), - downdelay=dict(type='int'), - updelay=dict(type='int'), - xmit_hash_policy=dict(type='str'), - fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), - arp_interval=dict(type='int'), - arp_ip_target=dict(type='str'), - primary=dict(type='str'), + mode=dict( + type="str", + default="balance-rr", + choices=[ + "802.3ad", + "active-backup", + "balance-alb", + "balance-rr", + "balance-tlb", + "balance-xor", + "broadcast", + ], + ), + miimon=dict(type="int"), + downdelay=dict(type="int"), + updelay=dict(type="int"), + xmit_hash_policy=dict(type="str"), + fail_over_mac=dict(type="str", choices=["none", "active", "follow"]), + arp_interval=dict(type="int"), + arp_ip_target=dict(type="str"), + primary=dict(type="str"), # general usage - mtu=dict(type='int'), - mac=dict(type='str'), - zone=dict(type='str'), + mtu=dict(type="int"), + mac=dict(type="str"), + zone=dict(type="str"), # bridge specific vars - stp=dict(type='bool', default=True), - priority=dict(type='int', default=128), - slavepriority=dict(type='int', default=32), - forwarddelay=dict(type='int', default=15), - hellotime=dict(type='int', default=2), - maxage=dict(type='int', default=20), - ageingtime=dict(type='int', default=300), - hairpin=dict(type='bool'), - path_cost=dict(type='int', default=100), + stp=dict(type="bool", default=True), + priority=dict(type="int", default=128), + slavepriority=dict(type="int", default=32), + forwarddelay=dict(type="int", default=15), + hellotime=dict(type="int", default=2), + maxage=dict(type="int", default=20), + ageingtime=dict(type="int", default=300), + hairpin=dict(type="bool"), + path_cost=dict(type="int", default=100), # team specific vars - runner=dict(type='str', default='roundrobin', - choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + runner=dict( + type="str", + default="roundrobin", + choices=["broadcast", "roundrobin", "activebackup", "loadbalance", "lacp"], + ), # team active-backup runner specific options - runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), + runner_hwaddr_policy=dict(type="str", choices=["same_all", "by_active", "only_active"]), # team lacp runner specific options - runner_fast_rate=dict(type='bool'), + runner_fast_rate=dict(type="bool"), # vlan specific vars - vlanid=dict(type='int'), - vlandev=dict(type='str'), - flags=dict(type='str'), - ingress=dict(type='str'), - egress=dict(type='str'), + vlanid=dict(type="int"), + vlandev=dict(type="str"), + flags=dict(type="str"), + ingress=dict(type="str"), + egress=dict(type="str"), # vxlan specific vars - vxlan_id=dict(type='int'), - vxlan_local=dict(type='str'), - vxlan_remote=dict(type='str'), + vxlan_id=dict(type="int"), + vxlan_local=dict(type="str"), + vxlan_remote=dict(type="str"), # ip-tunnel specific vars - ip_tunnel_dev=dict(type='str'), - ip_tunnel_local=dict(type='str'), - ip_tunnel_remote=dict(type='str'), + ip_tunnel_dev=dict(type="str"), + ip_tunnel_local=dict(type="str"), + ip_tunnel_remote=dict(type="str"), # ip-tunnel type gre specific vars - ip_tunnel_input_key=dict(type='str', no_log=True), - ip_tunnel_output_key=dict(type='str', no_log=True), + ip_tunnel_input_key=dict(type="str", no_log=True), + ip_tunnel_output_key=dict(type="str", no_log=True), # 802-11-wireless* specific vars - ssid=dict(type='str'), - wifi=dict(type='dict'), - wifi_sec=dict(type='dict', no_log=True), - gsm=dict(type='dict'), - macvlan=dict(type='dict'), - wireguard=dict(type='dict'), - vpn=dict(type='dict'), - sriov=dict(type='dict'), + ssid=dict(type="str"), + wifi=dict(type="dict"), + wifi_sec=dict(type="dict", no_log=True), + gsm=dict(type="dict"), + macvlan=dict(type="dict"), + wireguard=dict(type="dict"), + vpn=dict(type="dict"), + sriov=dict(type="dict"), # infiniband specific vars - transport_mode=dict(type='str', choices=['datagram', 'connected']), - infiniband_mac=dict(type='str'), + transport_mode=dict(type="str", choices=["datagram", "connected"]), + infiniband_mac=dict(type="str"), ), - mutually_exclusive=[['never_default4', 'gw4'], - ['routes4_extended', 'routes4'], - ['routes6_extended', 'routes6']], + mutually_exclusive=[ + ["never_default4", "gw4"], + ["routes4_extended", "routes4"], + ["routes6_extended", "routes6"], + ], required_if=[("type", "wifi", [("ssid")])], supports_check_mode=True, ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C") nmcli_module = nmcli.Nmcli(module) @@ -4508,14 +4759,14 @@ def test_bond_connection_unchanged_2(mocked_generic_connection_diff_check, capfd assert changed num_of_diff_params = 0 - for parameter, value in diff.get('before').items(): - if value != diff['after'][parameter]: + for parameter, value in diff.get("before").items(): + if value != diff["after"][parameter]: num_of_diff_params += 1 assert num_of_diff_params == 1 -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_MACVLAN, indirect=["patch_ansible_module"]) def test_create_macvlan(mocked_generic_connection_create, capfd): """ Test : Create macvlan connection with static IP configuration @@ -4528,31 +4779,40 @@ def test_create_macvlan(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'macvlan' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'non_existent_nw_device' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "macvlan" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "non_existent_nw_device" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'macvlan_non_existant', - 'ipv4.method', 'manual', - 'ipv4.addresses', '10.10.10.10/24', - 'ipv6.method', 'manual', - 'ipv6.addresses', '2001:db8::1/128', - 'macvlan.mode', '2', - 'macvlan.parent', 'non_existent_parent']: + for param in [ + "connection.interface-name", + "macvlan_non_existant", + "ipv4.method", + "manual", + "ipv4.addresses", + "10.10.10.10/24", + "ipv6.method", + "manual", + "ipv6.addresses", + "2001:db8::1/128", + "macvlan.mode", + "2", + "macvlan.parent", + "non_existent_parent", + ]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_MACVLAN, indirect=["patch_ansible_module"]) def test_macvlan_connection_unchanged(mocked_macvlan_connection_unchanged, capfd): """ Test : Macvlan connection with static IP configuration unchanged @@ -4562,11 +4822,11 @@ def test_macvlan_connection_unchanged(mocked_macvlan_connection_unchanged, capfd out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_MACVLAN, indirect=["patch_ansible_module"]) def test_macvlan_mod(mocked_generic_connection_modify, capfd): """ Test : Modify macvlan connection @@ -4578,30 +4838,30 @@ def test_macvlan_mod(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['macvlan.mode', '2']: + for param in ["macvlan.mode", "2"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION = [ { - 'type': 'ethernet', - 'conn_name': 'fake_conn', - 'ifname': 'fake_eth0', - 'state': 'present', - 'slave_type': 'bridge', - 'master': 'fake_br0', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "fake_conn", + "ifname": "fake_eth0", + "state": "present", + "slave_type": "bridge", + "master": "fake_br0", + "_ansible_check_mode": False, } ] @@ -4630,15 +4890,19 @@ def test_macvlan_mod(mocked_generic_connection_modify, capfd): @pytest.fixture def mocked_slave_type_bridge_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION, indirect=["patch_ansible_module"] +) def test_create_slave_type_bridge(mocked_slave_type_bridge_create, capfd): """ Test : slave for bridge created @@ -4651,32 +4915,36 @@ def test_create_slave_type_bridge(mocked_slave_type_bridge_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ethernet' - assert args[0][5] == 'con-name' - assert args[0][6] == 'fake_conn' - con_master_index = args[0].index('connection.master') - slave_type_index = args[0].index('connection.slave-type') - assert args[0][con_master_index + 1] == 'fake_br0' - assert args[0][slave_type_index + 1] == 'bridge' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ethernet" + assert args[0][5] == "con-name" + assert args[0][6] == "fake_conn" + con_master_index = args[0].index("connection.master") + slave_type_index = args[0].index("connection.slave-type") + assert args[0][con_master_index + 1] == "fake_br0" + assert args[0][slave_type_index + 1] == "bridge" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] @pytest.fixture def mocked_create_slave_type_bridge_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION_UNCHANGED_SHOW_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION_UNCHANGED_SHOW_OUTPUT, ""), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize( + "patch_ansible_module", TESTCASE_SLAVE_TYPE_BRIDGE_CONNECTION, indirect=["patch_ansible_module"] +) def test_slave_type_bridge_unchanged(mocked_create_slave_type_bridge_unchanged, capfd): """ Test : Existent slave for bridge unchanged @@ -4686,19 +4954,19 @@ def test_slave_type_bridge_unchanged(mocked_create_slave_type_bridge_unchanged, out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] TESTCASE_SLAVE_TYPE_BOND_CONNECTION = [ { - 'type': 'ethernet', - 'conn_name': 'fake_conn', - 'ifname': 'fake_eth0', - 'state': 'present', - 'slave_type': 'bond', - 'master': 'fake_bond0', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "fake_conn", + "ifname": "fake_eth0", + "state": "present", + "slave_type": "bond", + "master": "fake_bond0", + "_ansible_check_mode": False, } ] @@ -4727,15 +4995,17 @@ def test_slave_type_bridge_unchanged(mocked_create_slave_type_bridge_unchanged, @pytest.fixture def mocked_slave_type_bond_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_SLAVE_TYPE_BOND_CONNECTION_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_SLAVE_TYPE_BOND_CONNECTION_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_BOND_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SLAVE_TYPE_BOND_CONNECTION, indirect=["patch_ansible_module"]) def test_create_slave_type_bond(mocked_slave_type_bond_create, capfd): """ Test : slave for bond created @@ -4748,32 +5018,34 @@ def test_create_slave_type_bond(mocked_slave_type_bond_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ethernet' - assert args[0][5] == 'con-name' - assert args[0][6] == 'fake_conn' - con_master_index = args[0].index('connection.master') - slave_type_index = args[0].index('connection.slave-type') - assert args[0][con_master_index + 1] == 'fake_bond0' - assert args[0][slave_type_index + 1] == 'bond' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ethernet" + assert args[0][5] == "con-name" + assert args[0][6] == "fake_conn" + con_master_index = args[0].index("connection.master") + slave_type_index = args[0].index("connection.slave-type") + assert args[0][con_master_index + 1] == "fake_bond0" + assert args[0][slave_type_index + 1] == "bond" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] @pytest.fixture def mocked_create_slave_type_bond_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_SLAVE_TYPE_BOND_CONNECTION_UNCHANGED_SHOW_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_SLAVE_TYPE_BOND_CONNECTION_UNCHANGED_SHOW_OUTPUT, ""), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_BOND_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SLAVE_TYPE_BOND_CONNECTION, indirect=["patch_ansible_module"]) def test_slave_type_bond_unchanged(mocked_create_slave_type_bond_unchanged, capfd): """ Test : Existent slave for bridge unchanged @@ -4783,19 +5055,19 @@ def test_slave_type_bond_unchanged(mocked_create_slave_type_bond_unchanged, capf out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] TESTCASE_SLAVE_TYPE_TEAM_CONNECTION = [ { - 'type': 'ethernet', - 'conn_name': 'fake_conn', - 'ifname': 'fake_eth0', - 'state': 'present', - 'slave_type': 'team', - 'master': 'fake_team0', - '_ansible_check_mode': False, + "type": "ethernet", + "conn_name": "fake_conn", + "ifname": "fake_eth0", + "state": "present", + "slave_type": "team", + "master": "fake_team0", + "_ansible_check_mode": False, } ] @@ -4824,15 +5096,17 @@ def test_slave_type_bond_unchanged(mocked_create_slave_type_bond_unchanged, capf @pytest.fixture def mocked_slave_type_team_create(mocker): - mocker_set(mocker, - execute_return=None, - execute_side_effect=( - (0, TESTCASE_SLAVE_TYPE_TEAM_CONNECTION_SHOW_OUTPUT, ""), - (0, "", ""), - )) + mocker_set( + mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_SLAVE_TYPE_TEAM_CONNECTION_SHOW_OUTPUT, ""), + (0, "", ""), + ), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_TEAM_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SLAVE_TYPE_TEAM_CONNECTION, indirect=["patch_ansible_module"]) def test_create_slave_type_team(mocked_slave_type_team_create, capfd): """ Test : slave for bond created @@ -4845,32 +5119,34 @@ def test_create_slave_type_team(mocked_slave_type_team_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'ethernet' - assert args[0][5] == 'con-name' - assert args[0][6] == 'fake_conn' - con_master_index = args[0].index('connection.master') - slave_type_index = args[0].index('connection.slave-type') - assert args[0][con_master_index + 1] == 'fake_team0' - assert args[0][slave_type_index + 1] == 'team' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "ethernet" + assert args[0][5] == "con-name" + assert args[0][6] == "fake_conn" + con_master_index = args[0].index("connection.master") + slave_type_index = args[0].index("connection.slave-type") + assert args[0][con_master_index + 1] == "fake_team0" + assert args[0][slave_type_index + 1] == "team" out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] @pytest.fixture def mocked_create_slave_type_team_unchanged(mocker): - mocker_set(mocker, - connection_exists=True, - execute_return=(0, TESTCASE_SLAVE_TYPE_TEAM_CONNECTION_UNCHANGED_SHOW_OUTPUT, "")) + mocker_set( + mocker, + connection_exists=True, + execute_return=(0, TESTCASE_SLAVE_TYPE_TEAM_CONNECTION_UNCHANGED_SHOW_OUTPUT, ""), + ) -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SLAVE_TYPE_TEAM_CONNECTION, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_SLAVE_TYPE_TEAM_CONNECTION, indirect=["patch_ansible_module"]) def test_slave_type_team_unchanged(mocked_create_slave_type_team_unchanged, capfd): """ Test : Existent slave for bridge unchanged @@ -4880,11 +5156,11 @@ def test_slave_type_team_unchanged(mocked_create_slave_type_team_unchanged, capf out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_LOOPBACK, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_LOOPBACK, indirect=["patch_ansible_module"]) def test_create_loopback(mocked_generic_connection_create, capfd): """ Test : Create loopback connection @@ -4897,26 +5173,25 @@ def test_create_loopback(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list add_args, add_kw = arg_list[0] - assert add_args[0][0] == '/usr/bin/nmcli' - assert add_args[0][1] == 'con' - assert add_args[0][2] == 'add' - assert add_args[0][3] == 'type' - assert add_args[0][4] == 'loopback' - assert add_args[0][5] == 'con-name' - assert add_args[0][6] == 'lo' + assert add_args[0][0] == "/usr/bin/nmcli" + assert add_args[0][1] == "con" + assert add_args[0][2] == "add" + assert add_args[0][3] == "type" + assert add_args[0][4] == "loopback" + assert add_args[0][5] == "con-name" + assert add_args[0][6] == "lo" add_args_text = list(map(to_text, add_args[0])) - for param in ['connection.interface-name', 'lo', - 'ipv4.addresses', '127.0.0.1/8']: + for param in ["connection.interface-name", "lo", "ipv4.addresses", "127.0.0.1/8"]: assert param in add_args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_LOOPBACK, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_LOOPBACK, indirect=["patch_ansible_module"]) def test_unchanged_loopback(mocked_loopback_connection_unchanged, capfd): """ Test : loopback connection unchanged @@ -4926,11 +5201,11 @@ def test_unchanged_loopback(mocked_loopback_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_LOOPBACK_MODIFY, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_LOOPBACK_MODIFY, indirect=["patch_ansible_module"]) def test_add_second_ip4_address_to_loopback_connection(mocked_loopback_connection_modify, capfd): """ Test : Modify loopback connection @@ -4942,21 +5217,21 @@ def test_add_second_ip4_address_to_loopback_connection(mocked_loopback_connectio arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[1] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'lo' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "lo" - for param in ['ipv4.addresses', '127.0.0.1/8,127.0.0.2/8']: + for param in ["ipv4.addresses", "127.0.0.1/8,127.0.0.2/8"]: assert param in args[0] out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VRF, indirect=["patch_ansible_module"]) def test_create_vrf_con(mocked_generic_connection_create, capfd): """ Test if VRF created @@ -4969,25 +5244,25 @@ def test_create_vrf_con(mocked_generic_connection_create, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'add' - assert args[0][3] == 'type' - assert args[0][4] == 'vrf' - assert args[0][5] == 'con-name' - assert args[0][6] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "add" + assert args[0][3] == "type" + assert args[0][4] == "vrf" + assert args[0][5] == "con-name" + assert args[0][6] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'table', '10']: + for param in ["ipv4.addresses", "10.10.10.10/24", "ipv4.gateway", "10.10.10.1", "table", "10"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VRF, indirect=["patch_ansible_module"]) def test_mod_vrf_conn(mocked_generic_connection_modify, capfd): """ Test if VRF modified @@ -5000,22 +5275,22 @@ def test_mod_vrf_conn(mocked_generic_connection_modify, capfd): arg_list = nmcli.Nmcli.execute_command.call_args_list args, kwargs = arg_list[0] - assert args[0][0] == '/usr/bin/nmcli' - assert args[0][1] == 'con' - assert args[0][2] == 'modify' - assert args[0][3] == 'non_existent_nw_device' + assert args[0][0] == "/usr/bin/nmcli" + assert args[0][1] == "con" + assert args[0][2] == "modify" + assert args[0][3] == "non_existent_nw_device" args_text = list(map(to_text, args[0])) - for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'table', '10']: + for param in ["ipv4.addresses", "10.10.10.10/24", "ipv4.gateway", "10.10.10.1", "table", "10"]: assert param in args_text out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert results['changed'] + assert not results.get("failed") + assert results["changed"] -@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VRF, indirect=['patch_ansible_module']) +@pytest.mark.parametrize("patch_ansible_module", TESTCASE_VRF, indirect=["patch_ansible_module"]) def test_vrf_connection_unchanged(mocked_vrf_connection_unchanged, capfd): """ Test : VRF connection unchanged @@ -5025,5 +5300,5 @@ def test_vrf_connection_unchanged(mocked_vrf_connection_unchanged, capfd): out, err = capfd.readouterr() results = json.loads(out) - assert not results.get('failed') - assert not results['changed'] + assert not results.get("failed") + assert not results["changed"] diff --git a/tests/unit/plugins/modules/test_nomad_token.py b/tests/unit/plugins/modules/test_nomad_token.py index 158bdad2940..8986a02923b 100644 --- a/tests/unit/plugins/modules/test_nomad_token.py +++ b/tests/unit/plugins/modules/test_nomad_token.py @@ -1,4 +1,3 @@ - # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -9,7 +8,12 @@ import nomad from ansible_collections.community.general.plugins.modules import nomad_token -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) def mock_acl_get_tokens(empty_list=False): @@ -18,27 +22,31 @@ def mock_acl_get_tokens(empty_list=False): if not empty_list: response_object = [ { - 'AccessorID': 'bac2b162-2a63-efa2-4e68-55d79dcb7721', - 'Name': 'Bootstrap Token', 'Type': 'management', - 'Policies': None, 'Roles': None, 'Global': True, - 'Hash': 'BUJ3BerTfrqFVm1P+vZr1gz9ubOkd+JAvYjNAJyaU9Y=', - 'CreateTime': '2023-11-12T18:44:39.740562185Z', - 'ExpirationTime': None, - 'CreateIndex': 9, - 'ModifyIndex': 9 + "AccessorID": "bac2b162-2a63-efa2-4e68-55d79dcb7721", + "Name": "Bootstrap Token", + "Type": "management", + "Policies": None, + "Roles": None, + "Global": True, + "Hash": "BUJ3BerTfrqFVm1P+vZr1gz9ubOkd+JAvYjNAJyaU9Y=", + "CreateTime": "2023-11-12T18:44:39.740562185Z", + "ExpirationTime": None, + "CreateIndex": 9, + "ModifyIndex": 9, }, { - 'AccessorID': '0d01c55f-8d63-f832-04ff-1866d4eb594e', - 'Name': 'devs', - 'Type': 'client', 'Policies': ['readonly'], - 'Roles': None, - 'Global': True, - 'Hash': 'eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=', - 'CreateTime': '2023-11-12T18:48:34.248857001Z', - 'ExpirationTime': None, - 'CreateIndex': 14, - 'ModifyIndex': 836 - } + "AccessorID": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "Name": "devs", + "Type": "client", + "Policies": ["readonly"], + "Roles": None, + "Global": True, + "Hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", + "CreateTime": "2023-11-12T18:48:34.248857001Z", + "ExpirationTime": None, + "CreateIndex": 14, + "ModifyIndex": 836, + }, ] return response_object @@ -46,38 +54,38 @@ def mock_acl_get_tokens(empty_list=False): def mock_acl_generate_bootstrap(): response_object = { - 'AccessorID': '0d01c55f-8d63-f832-04ff-1866d4eb594e', - 'Name': 'Bootstrap Token', - 'Type': 'management', - 'Policies': None, - 'Roles': None, - 'Global': True, - 'Hash': 'BUJ3BerTfrqFVm1P+vZr1gz9ubOkd+JAvYjNAJyaU9Y=', - 'CreateTime': '2023-11-12T18:48:34.248857001Z', - 'ExpirationTime': None, - 'ExpirationTTL': '', - 'CreateIndex': 14, - 'ModifyIndex': 836, - 'SecretID': 'd539a03d-337a-8504-6d12-000f861337bc' + "AccessorID": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "Name": "Bootstrap Token", + "Type": "management", + "Policies": None, + "Roles": None, + "Global": True, + "Hash": "BUJ3BerTfrqFVm1P+vZr1gz9ubOkd+JAvYjNAJyaU9Y=", + "CreateTime": "2023-11-12T18:48:34.248857001Z", + "ExpirationTime": None, + "ExpirationTTL": "", + "CreateIndex": 14, + "ModifyIndex": 836, + "SecretID": "d539a03d-337a-8504-6d12-000f861337bc", } return response_object def mock_acl_create_update_token(): response_object = { - 'AccessorID': '0d01c55f-8d63-f832-04ff-1866d4eb594e', - 'Name': 'dev', - 'Type': 'client', - 'Policies': ['readonly'], - 'Roles': None, - 'Global': True, - 'Hash': 'eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=', - 'CreateTime': '2023-11-12T18:48:34.248857001Z', - 'ExpirationTime': None, - 'ExpirationTTL': '', - 'CreateIndex': 14, - 'ModifyIndex': 836, - 'SecretID': 'd539a03d-337a-8504-6d12-000f861337bc' + "AccessorID": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "Name": "dev", + "Type": "client", + "Policies": ["readonly"], + "Roles": None, + "Global": True, + "Hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", + "CreateTime": "2023-11-12T18:48:34.248857001Z", + "ExpirationTime": None, + "ExpirationTTL": "", + "CreateIndex": 14, + "ModifyIndex": 836, + "SecretID": "d539a03d-337a-8504-6d12-000f861337bc", } return response_object @@ -88,7 +96,6 @@ def mock_acl_delete_token(): class TestNomadTokenModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = nomad_token @@ -102,17 +109,13 @@ def test_should_fail_without_parameters(self): self.module.main() def test_should_create_token_type_client(self): - module_args = { - 'host': 'localhost', - 'name': 'Dev token', - 'token_type': 'client', - 'state': 'present' - } + module_args = {"host": "localhost", "name": "Dev token", "token_type": "client", "state": "present"} with set_module_args(module_args): - with patch.object(nomad.api.acl.Acl, 'get_tokens', return_value=mock_acl_get_tokens()) as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'create_token', return_value=mock_acl_create_update_token()) as \ - mock_create_update_token: + with patch.object(nomad.api.acl.Acl, "get_tokens", return_value=mock_acl_get_tokens()) as mock_get_tokens: + with patch.object( + nomad.api.acl.Acl, "create_token", return_value=mock_acl_create_update_token() + ) as mock_create_update_token: with self.assertRaises(AnsibleExitJson): self.module.main() @@ -120,16 +123,11 @@ def test_should_create_token_type_client(self): self.assertIs(mock_create_update_token.call_count, 1) def test_should_create_token_type_bootstrap(self): - module_args = { - 'host': 'localhost', - 'token_type': 'bootstrap', - 'state': 'present' - } + module_args = {"host": "localhost", "token_type": "bootstrap", "state": "present"} with set_module_args(module_args): - - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.Acl, 'generate_bootstrap') as mock_generate_bootstrap: + with patch.object(nomad.api.acl.Acl, "get_tokens") as mock_get_tokens: + with patch.object(nomad.api.Acl, "generate_bootstrap") as mock_generate_bootstrap: mock_get_tokens.return_value = mock_acl_get_tokens(empty_list=True) mock_generate_bootstrap.return_value = mock_acl_generate_bootstrap() @@ -140,14 +138,11 @@ def test_should_create_token_type_bootstrap(self): self.assertIs(mock_generate_bootstrap.call_count, 1) def test_should_fail_delete_without_name_parameter(self): - module_args = { - 'host': 'localhost', - 'state': 'absent' - } + module_args = {"host": "localhost", "state": "absent"} with set_module_args(module_args): - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: + with patch.object(nomad.api.acl.Acl, "get_tokens") as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, "delete_token") as mock_delete_token: mock_get_tokens.return_value = mock_acl_get_tokens() mock_delete_token.return_value = mock_acl_delete_token() @@ -155,40 +150,25 @@ def test_should_fail_delete_without_name_parameter(self): self.module.main() def test_should_fail_delete_bootstrap_token(self): - module_args = { - 'host': 'localhost', - 'token_type': 'boostrap', - 'state': 'absent' - } + module_args = {"host": "localhost", "token_type": "boostrap", "state": "absent"} with set_module_args(module_args): - with self.assertRaises(AnsibleFailJson): self.module.main() def test_should_fail_delete_boostrap_token_by_name(self): - module_args = { - 'host': 'localhost', - 'name': 'Bootstrap Token', - 'state': 'absent' - } + module_args = {"host": "localhost", "name": "Bootstrap Token", "state": "absent"} with set_module_args(module_args): - with self.assertRaises(AnsibleFailJson): self.module.main() def test_should_delete_client_token(self): - module_args = { - 'host': 'localhost', - 'name': 'devs', - 'state': 'absent' - } + module_args = {"host": "localhost", "name": "devs", "state": "absent"} with set_module_args(module_args): - - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'delete_token') as mock_delete_token: + with patch.object(nomad.api.acl.Acl, "get_tokens") as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, "delete_token") as mock_delete_token: mock_get_tokens.return_value = mock_acl_get_tokens() mock_delete_token.return_value = mock_acl_delete_token() @@ -198,17 +178,11 @@ def test_should_delete_client_token(self): self.assertIs(mock_delete_token.call_count, 1) def test_should_update_client_token(self): - module_args = { - 'host': 'localhost', - 'name': 'devs', - 'token_type': 'client', - 'state': 'present' - } + module_args = {"host": "localhost", "name": "devs", "token_type": "client", "state": "present"} with set_module_args(module_args): - - with patch.object(nomad.api.acl.Acl, 'get_tokens') as mock_get_tokens: - with patch.object(nomad.api.acl.Acl, 'update_token') as mock_create_update_token: + with patch.object(nomad.api.acl.Acl, "get_tokens") as mock_get_tokens: + with patch.object(nomad.api.acl.Acl, "update_token") as mock_create_update_token: mock_get_tokens.return_value = mock_acl_get_tokens() mock_create_update_token.return_value = mock_acl_create_update_token() diff --git a/tests/unit/plugins/modules/test_npm.py b/tests/unit/plugins/modules/test_npm.py index fb16abca6d6..0716c5a14b2 100644 --- a/tests/unit/plugins/modules/test_npm.py +++ b/tests/unit/plugins/modules/test_npm.py @@ -8,7 +8,11 @@ from unittest.mock import call, patch from ansible_collections.community.general.plugins.modules import npm -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + ModuleTestCase, + set_module_args, +) class NPMModuleTestCase(ModuleTestCase): @@ -17,11 +21,11 @@ class NPMModuleTestCase(ModuleTestCase): def setUp(self): super().setUp() ansible_module_path = "ansible_collections.community.general.plugins.modules.npm.AnsibleModule" - self.mock_run_command = patch(f'{ansible_module_path}.run_command') + self.mock_run_command = patch(f"{ansible_module_path}.run_command") self.module_main_command = self.mock_run_command.start() - self.mock_get_bin_path = patch(f'{ansible_module_path}.get_bin_path') + self.mock_get_bin_path = patch(f"{ansible_module_path}.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() - self.get_bin_path.return_value = '/testbin/npm' + self.get_bin_path.return_value = "/testbin/npm" def tearDown(self): self.mock_run_command.stop() @@ -34,229 +38,307 @@ def module_main(self, exit_exc): return exc.exception.args[0] def test_present(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'present' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "present"}): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "install", "--global", "coffee-script"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_missing(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'present', - }): + with set_module_args( + { + "name": "coffee-script", + "global": "true", + "state": "present", + } + ): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "install", "--global", "coffee-script"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_version(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'present', - 'version': '2.5.1' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "present", "version": "2.5.1"}): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "install", "--global", "coffee-script@2.5.1"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_version_update(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'present', - 'version': '2.5.1' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "present", "version": "2.5.1"}): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "install", "--global", "coffee-script@2.5.1"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_version_exists(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'present', - 'version': '2.5.1' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "present", "version": "2.5.1"}): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertFalse(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertFalse(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_absent(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'absent' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "absent"}): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "uninstall", "--global", "coffee-script"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_absent_version(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'absent', - 'version': '2.5.1' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "absent", "version": "2.5.1"}): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "uninstall", "--global", "coffee-script"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_absent_version_different(self): - with set_module_args({ - 'name': 'coffee-script', - 'global': 'true', - 'state': 'absent', - 'version': '2.5.1' - }): + with set_module_args({"name": "coffee-script", "global": "true", "state": "absent", "version": "2.5.1"}): self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), - (0, '{}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "list", "--json", "--long", "--global"], + check_rc=False, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + call( + ["/testbin/npm", "uninstall", "--global", "coffee-script"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_package_json(self): - with set_module_args({ - 'global': 'true', - 'state': 'present' - }): + with set_module_args({"global": "true", "state": "present"}): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'install', '--global'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "install", "--global"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_package_json_production(self): - with set_module_args({ - 'production': 'true', - 'global': 'true', - 'state': 'present', - }): + with set_module_args( + { + "production": "true", + "global": "true", + "state": "present", + } + ): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'install', '--global', '--production'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "install", "--global", "--production"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_package_json_ci(self): - with set_module_args({ - 'ci': 'true', - 'global': 'true', - 'state': 'present' - }): + with set_module_args({"ci": "true", "global": "true", "state": "present"}): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'ci', '--global'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "ci", "--global"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) def test_present_package_json_ci_production(self): - with set_module_args({ - 'ci': 'true', - 'production': 'true', - 'global': 'true', - 'state': 'present' - }): + with set_module_args({"ci": "true", "production": "true", "global": "true", "state": "present"}): self.module_main_command.side_effect = [ - (0, '{}', ''), - (0, '{}', ''), + (0, "{}", ""), + (0, "{}", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.module_main_command.assert_has_calls([ - call(['/testbin/npm', 'ci', '--global', '--production'], check_rc=True, cwd=None, environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'}), - ]) + self.assertTrue(result["changed"]) + self.module_main_command.assert_has_calls( + [ + call( + ["/testbin/npm", "ci", "--global", "--production"], + check_rc=True, + cwd=None, + environ_update={"LANGUAGE": "C", "LC_ALL": "C"}, + ), + ] + ) diff --git a/tests/unit/plugins/modules/test_ocapi_command.py b/tests/unit/plugins/modules/test_ocapi_command.py index 91f32b8bf21..b51fabb1af0 100644 --- a/tests/unit/plugins/modules/test_ocapi_command.py +++ b/tests/unit/plugins/modules/test_ocapi_command.py @@ -13,8 +13,15 @@ from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.ocapi_command as module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + exit_json, + fail_json, +) MOCK_BASE_URI = "mockBaseUri/" @@ -27,60 +34,21 @@ MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG = { "ret": True, - "data": { - "IndicatorLED": { - "ID": 4, - "Name": "Off" - }, - "PowerState": { - "ID": 2, - "Name": "On" - } - }, - "headers": {"etag": "MockETag"} + "data": {"IndicatorLED": {"ID": 4, "Name": "Off"}, "PowerState": {"ID": 2, "Name": "On"}}, + "headers": {"etag": "MockETag"}, } -MOCK_SUCCESSFUL_HTTP_RESPONSE = { - "ret": True, - "data": {} -} +MOCK_SUCCESSFUL_HTTP_RESPONSE = {"ret": True, "data": {}} -MOCK_404_RESPONSE = { - "ret": False, - "status": 404 -} +MOCK_404_RESPONSE = {"ret": False, "status": 404} -MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER = { - "ret": True, - "data": {}, - "headers": {"location": "mock_location"} -} +MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER = {"ret": True, "data": {}, "headers": {"location": "mock_location"}} -MOCK_HTTP_RESPONSE_CONFLICT = { - "ret": False, - "msg": "Conflict", - "status": 409 -} +MOCK_HTTP_RESPONSE_CONFLICT = {"ret": False, "msg": "Conflict", "status": 409} -MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = { - "ret": True, - "data": { - "PercentComplete": 99 - }, - "headers": { - "etag": "12345" - } -} +MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = {"ret": True, "data": {"PercentComplete": 99}, "headers": {"etag": "12345"}} -MOCK_HTTP_RESPONSE_JOB_COMPLETE = { - "ret": True, - "data": { - "PercentComplete": 100 - }, - "headers": { - "etag": "12345" - } -} +MOCK_HTTP_RESPONSE_JOB_COMPLETE = {"ret": True, "data": {"PercentComplete": 100}, "headers": {"etag": "12345"}} def get_bin_path(self, arg, required=False): @@ -171,12 +139,10 @@ def mock_invalid_http_request(*args, **kwargs): class TestOcapiCommand(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) self.tempdir = tempfile.mkdtemp() @@ -192,237 +158,291 @@ def test_module_fail_when_required_args_missing(self): def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'unknown', - 'command': 'IndicatorLedOn', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'baseuri': MOCK_BASE_URI - }): + with set_module_args( + { + "category": "unknown", + "command": "IndicatorLedOn", + "username": "USERID", + "password": "PASSW0RD=21", + "baseuri": MOCK_BASE_URI, + } + ): module.main() self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json)) def test_set_power_mode(self): """Test that we can set chassis power mode""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'PowerModeLow', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Chassis", + "command": "PowerModeLow", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_set_chassis_led_indicator(self): """Test that we can set chassis LED indicator.""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedOn', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedOn", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_set_power_mode_already_set(self): """Test that if we set Power Mode to normal when it's already normal, we get changed=False.""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'PowerModeNormal', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Chassis", + "command": "PowerModeNormal", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_set_power_mode_check_mode(self): """Test check mode when setting chassis Power Mode.""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedOn', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - '_ansible_check_mode': True - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedOn", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_set_chassis_led_indicator_check_mode(self): """Test check mode when setting chassis LED indicator""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedOn', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - '_ansible_check_mode': True - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedOn", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_set_chassis_led_indicator_already_set(self): """Test that if we set LED Indicator to off when it's already off, we get changed=False.""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedOff', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedOff", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_set_chassis_led_indicator_already_set_check_mode(self): """Test that if we set LED Indicator to off when it's already off, we get changed=False even in check mode.""" - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedOff', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - "_ansible_check_mode": True - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedOff", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(NO_ACTION_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_set_chassis_invalid_indicator_command(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Chassis', - 'command': 'IndicatorLedBright', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Chassis", + "command": "IndicatorLedBright", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertIn("Invalid Command", get_exception_message(ansible_fail_json)) def test_reset_enclosure(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Systems', - 'command': 'PowerGracefulRestart', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Systems", + "command": "PowerGracefulRestart", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_reset_enclosure_check_mode(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Systems', - 'command': 'PowerGracefulRestart', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - "_ansible_check_mode": True - }): + with set_module_args( + { + "category": "Systems", + "command": "PowerGracefulRestart", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_firmware_upload_missing_update_image_path(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpload', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpload", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual("Missing update_image_path.", get_exception_message(ansible_fail_json)) def test_firmware_upload_file_not_found(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpload', - 'update_image_path': 'nonexistentfile.bin', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpload", + "update_image_path": "nonexistentfile.bin", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual("File does not exist.", get_exception_message(ansible_fail_json)) def test_firmware_upload(self): filename = "fake_firmware.bin" filepath = os.path.join(self.tempdir, filename) - file_contents = b'\x00\x01\x02\x03\x04' - with open(filepath, 'wb+') as f: + file_contents = b"\x00\x01\x02\x03\x04" + with open(filepath, "wb+") as f: f.write(file_contents) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpload', - 'update_image_path': filepath, - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpload", + "update_image_path": filepath, + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) @@ -430,208 +450,252 @@ def test_firmware_upload(self): def test_firmware_upload_check_mode(self): filename = "fake_firmware.bin" filepath = os.path.join(self.tempdir, filename) - file_contents = b'\x00\x01\x02\x03\x04' - with open(filepath, 'wb+') as f: + file_contents = b"\x00\x01\x02\x03\x04" + with open(filepath, "wb+") as f: f.write(file_contents) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpload', - 'update_image_path': filepath, - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - "_ansible_check_mode": True - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpload", + "update_image_path": filepath, + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_firmware_update(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpdate', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpdate", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_firmware_update_check_mode(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWUpdate', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - "_ansible_check_mode": True - }): + with set_module_args( + { + "category": "Update", + "command": "FWUpdate", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_firmware_activate(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWActivate', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Update", + "command": "FWActivate", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_firmware_activate_check_mode(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Update', - 'command': 'FWActivate', - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21', - "_ansible_check_mode": True - }): + with set_module_args( + { + "category": "Update", + "command": "FWActivate", + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_delete_job(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_complete, - delete_request=mock_delete_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_complete, + delete_request=mock_delete_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_delete_job_in_progress(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_in_progress, - delete_request=mock_invalid_http_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_in_progress, + delete_request=mock_invalid_http_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) def test_delete_job_in_progress_only_on_delete(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_complete, - delete_request=mock_http_request_conflict, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_complete, + delete_request=mock_http_request_conflict, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) def test_delete_job_check_mode(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_complete, - delete_request=mock_delete_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_complete, + delete_request=mock_delete_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21', - '_ansible_check_mode': True - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_delete_job_check_mode_job_not_found(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_does_not_exist, - delete_request=mock_delete_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_does_not_exist, + delete_request=mock_delete_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21', - '_ansible_check_mode': True - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual("Job already deleted.", get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_delete_job_check_mode_job_in_progress(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request_job_in_progress, - delete_request=mock_delete_request, - put_request=mock_invalid_http_request, - post_request=mock_invalid_http_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request_job_in_progress, + delete_request=mock_delete_request, + put_request=mock_invalid_http_request, + post_request=mock_invalid_http_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'DeleteJob', - 'baseuri': MOCK_BASE_URI, - 'job_name': MOCK_JOB_NAME, - 'username': 'USERID', - 'password': 'PASSWORD=21', - '_ansible_check_mode': True - }): + with set_module_args( + { + "category": "Jobs", + "command": "DeleteJob", + "baseuri": MOCK_BASE_URI, + "job_name": MOCK_JOB_NAME, + "username": "USERID", + "password": "PASSWORD=21", + "_ansible_check_mode": True, + } + ): module.main() self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json)) diff --git a/tests/unit/plugins/modules/test_ocapi_info.py b/tests/unit/plugins/modules/test_ocapi_info.py index eb5ce7dfcb2..eb3cf283d61 100644 --- a/tests/unit/plugins/modules/test_ocapi_info.py +++ b/tests/unit/plugins/modules/test_ocapi_info.py @@ -9,8 +9,15 @@ from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.ocapi_info as module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + exit_json, + fail_json, +) MOCK_BASE_URI = "mockBaseUri" MOCK_JOB_NAME_IN_PROGRESS = "MockJobInProgress" @@ -19,15 +26,9 @@ ACTION_WAS_SUCCESSFUL = "Action was successful." -MOCK_SUCCESSFUL_HTTP_RESPONSE = { - "ret": True, - "data": {} -} +MOCK_SUCCESSFUL_HTTP_RESPONSE = {"ret": True, "data": {}} -MOCK_404_RESPONSE = { - "ret": False, - "status": 404 -} +MOCK_404_RESPONSE = {"ret": False, "status": 404} MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = { "ret": True, @@ -35,19 +36,8 @@ "Self": "https://openflex-data24-usalp02120qo0012-iomb:443/Storage/Devices/openflex-data24-usalp02120qo0012/Jobs/FirmwareUpdate/", "ID": MOCK_JOB_NAME_IN_PROGRESS, "PercentComplete": 10, - "Status": { - "State": { - "ID": 16, - "Name": "In service" - }, - "Health": [ - { - "ID": 5, - "Name": "OK" - } - ] - } - } + "Status": {"State": {"ID": 16, "Name": "In service"}, "Health": [{"ID": 5, "Name": "OK"}]}, + }, } MOCK_HTTP_RESPONSE_JOB_COMPLETE = { @@ -57,21 +47,11 @@ "ID": MOCK_JOB_NAME_COMPLETE, "PercentComplete": 100, "Status": { - "State": { - "ID": 65540, - "Name": "Activate needed" - }, - "Health": [ - { - "ID": 5, - "Name": "OK" - } - ], - "Details": [ - "Completed." - ] - } - } + "State": {"ID": 65540, "Name": "Activate needed"}, + "Health": [{"ID": 5, "Name": "OK"}], + "Details": ["Completed."], + }, + }, } @@ -117,10 +97,9 @@ def mock_post_request(*args, **kwargs): class TestOcapiInfo(unittest.TestCase): def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) @@ -132,99 +111,137 @@ def test_module_fail_when_required_args_missing(self): def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'unknown', - 'command': 'JobStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'baseuri': MOCK_BASE_URI - }): + with set_module_args( + { + "category": "unknown", + "command": "JobStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "baseuri": MOCK_BASE_URI, + } + ): module.main() self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json)) def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'unknown', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'baseuri': MOCK_BASE_URI - }): + with set_module_args( + { + "category": "Jobs", + "command": "unknown", + "username": "USERID", + "password": "PASSW0RD=21", + "baseuri": MOCK_BASE_URI, + } + ): module.main() self.assertIn("Invalid Command 'unknown", get_exception_message(ansible_fail_json)) def test_job_status_in_progress(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - delete_request=mock_delete_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + delete_request=mock_delete_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'JobStatus', - 'job_name': MOCK_JOB_NAME_IN_PROGRESS, - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "JobStatus", + "job_name": MOCK_JOB_NAME_IN_PROGRESS, + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["PercentComplete"], response_data["percentComplete"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["ID"], response_data["operationStatusId"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["Name"], response_data["operationStatus"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"]) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["PercentComplete"], response_data["percentComplete"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["ID"], response_data["operationStatusId"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["Name"], response_data["operationStatus"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["Name"], + response_data["operationHealth"], + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["ID"], + response_data["operationHealthId"], + ) self.assertTrue(response_data["jobExists"]) self.assertFalse(response_data["changed"]) self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"]) self.assertIsNone(response_data["details"]) def test_job_status_complete(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - delete_request=mock_delete_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + delete_request=mock_delete_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'JobStatus', - 'job_name': MOCK_JOB_NAME_COMPLETE, - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "JobStatus", + "job_name": MOCK_JOB_NAME_COMPLETE, + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["PercentComplete"], response_data["percentComplete"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["ID"], response_data["operationStatusId"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["Name"], response_data["operationStatus"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"]) - self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"]) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["PercentComplete"], response_data["percentComplete"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["ID"], response_data["operationStatusId"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["Name"], response_data["operationStatus"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"] + ) + self.assertEqual( + MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"] + ) self.assertTrue(response_data["jobExists"]) self.assertFalse(response_data["changed"]) self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"]) self.assertEqual(["Completed."], response_data["details"]) def test_job_status_not_found(self): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", - get_request=mock_get_request, - put_request=mock_put_request, - delete_request=mock_delete_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils", + get_request=mock_get_request, + put_request=mock_put_request, + delete_request=mock_delete_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: - with set_module_args({ - 'category': 'Jobs', - 'command': 'JobStatus', - 'job_name': MOCK_JOB_NAME_DOES_NOT_EXIST, - 'baseuri': MOCK_BASE_URI, - 'username': 'USERID', - 'password': 'PASSWORD=21' - }): + with set_module_args( + { + "category": "Jobs", + "command": "JobStatus", + "job_name": MOCK_JOB_NAME_DOES_NOT_EXIST, + "baseuri": MOCK_BASE_URI, + "username": "USERID", + "password": "PASSWORD=21", + } + ): module.main() self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json)) response_data = ansible_exit_json.exception.args[0] diff --git a/tests/unit/plugins/modules/test_one_vm.py b/tests/unit/plugins/modules/test_one_vm.py index 18bc544dd12..40c4fab00c5 100644 --- a/tests/unit/plugins/modules/test_one_vm.py +++ b/tests/unit/plugins/modules/test_one_vm.py @@ -17,7 +17,7 @@ }, { "OS": {"ARCH": 2}, - } + }, ), ( { @@ -25,14 +25,13 @@ }, { "OS": {"ARCH": 1}, - } + }, ), ( { "OS": {"ASD": 1}, # "ASD" is an invalid attribute, we ignore it }, - { - } + {}, ), ( { @@ -47,12 +46,12 @@ "PASSWORD": 2, "SSH_PUBLIC_KEY": 3, }, - } + }, ), ] -@pytest.mark.parametrize('vm_template,expected_result', PARSE_UPDATECONF_VALID) +@pytest.mark.parametrize("vm_template,expected_result", PARSE_UPDATECONF_VALID) def test_parse_updateconf(vm_template, expected_result): result = parse_updateconf(vm_template) assert result == expected_result, repr(result) diff --git a/tests/unit/plugins/modules/test_oneview_datacenter_info.py b/tests/unit/plugins/modules/test_oneview_datacenter_info.py index 5bd83c2a13e..7614470dea7 100644 --- a/tests/unit/plugins/modules/test_oneview_datacenter_info.py +++ b/tests/unit/plugins/modules/test_oneview_datacenter_info.py @@ -11,11 +11,7 @@ from ansible_collections.community.general.plugins.modules.oneview_datacenter_info import DatacenterInfoModule -PARAMS_GET_CONNECTED = dict( - config='config.json', - name="MyDatacenter", - options=['visualContent'] -) +PARAMS_GET_CONNECTED = dict(config="config.json", name="MyDatacenter", options=["visualContent"]) class TestDatacenterInfoModule(FactsParamsTest): @@ -28,32 +24,29 @@ def setUp(self, mock_ansible_module, mock_ov_client): def test_should_get_all_datacenters(self): self.resource.get_all.return_value = {"name": "Data Center Name"} - self.mock_ansible_module.params = dict(config='config.json') + self.mock_ansible_module.params = dict(config="config.json") DatacenterInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - datacenters=({"name": "Data Center Name"}) + changed=False, datacenters=({"name": "Data Center Name"}) ) def test_should_get_datacenter_by_name(self): self.resource.get_by.return_value = [{"name": "Data Center Name"}] - self.mock_ansible_module.params = dict(config='config.json', name="MyDatacenter") + self.mock_ansible_module.params = dict(config="config.json", name="MyDatacenter") DatacenterInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - datacenters=([{"name": "Data Center Name"}]) + changed=False, datacenters=([{"name": "Data Center Name"}]) ) def test_should_get_datacenter_visual_content(self): self.resource.get_by.return_value = [{"name": "Data Center Name", "uri": "/rest/datacenter/id"}] - self.resource.get_visual_content.return_value = { - "name": "Visual Content"} + self.resource.get_visual_content.return_value = {"name": "Visual Content"} self.mock_ansible_module.params = PARAMS_GET_CONNECTED @@ -61,8 +54,8 @@ def test_should_get_datacenter_visual_content(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, - datacenter_visual_content={'name': 'Visual Content'}, - datacenters=[{'name': 'Data Center Name', 'uri': '/rest/datacenter/id'}] + datacenter_visual_content={"name": "Visual Content"}, + datacenters=[{"name": "Data Center Name", "uri": "/rest/datacenter/id"}], ) def test_should_get_none_datacenter_visual_content(self): @@ -73,7 +66,5 @@ def test_should_get_none_datacenter_visual_content(self): DatacenterInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - datacenter_visual_content=None, - datacenters=[] + changed=False, datacenter_visual_content=None, datacenters=[] ) diff --git a/tests/unit/plugins/modules/test_oneview_enclosure_info.py b/tests/unit/plugins/modules/test_oneview_enclosure_info.py index 6cfa3023253..844c985f536 100644 --- a/tests/unit/plugins/modules/test_oneview_enclosure_info.py +++ b/tests/unit/plugins/modules/test_oneview_enclosure_info.py @@ -10,53 +10,41 @@ from ansible_collections.community.general.plugins.modules.oneview_enclosure_info import EnclosureInfoModule -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name="Test-Enclosure", - options=[] -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Test-Enclosure", options=[]) PARAMS_GET_BY_NAME_WITH_OPTIONS = dict( - config='config.json', - name="Test-Enclosure", - options=['utilization', 'environmentalConfiguration', 'script'] + config="config.json", name="Test-Enclosure", options=["utilization", "environmentalConfiguration", "script"] ) PARAMS_GET_UTILIZATION_WITH_PARAMS = dict( - config='config.json', + config="config.json", name="Test-Enclosure", - options=[dict(utilization=dict(fields='AveragePower', - filter=['startDate=2016-06-30T03:29:42.000Z', - 'endDate=2016-07-01T03:29:42.000Z'], - view='day', - refresh=True))] + options=[ + dict( + utilization=dict( + fields="AveragePower", + filter=["startDate=2016-06-30T03:29:42.000Z", "endDate=2016-07-01T03:29:42.000Z"], + view="day", + refresh=True, + ) + ) + ], ) -PRESENT_ENCLOSURES = [{ - "name": "Test-Enclosure", - "uri": "/rest/enclosures/c6bf9af9-48e7-4236-b08a-77684dc258a5" -}] +PRESENT_ENCLOSURES = [{"name": "Test-Enclosure", "uri": "/rest/enclosures/c6bf9af9-48e7-4236-b08a-77684dc258a5"}] -ENCLOSURE_SCRIPT = '# script content' +ENCLOSURE_SCRIPT = "# script content" -ENCLOSURE_UTILIZATION = { - "isFresh": "True" -} +ENCLOSURE_UTILIZATION = {"isFresh": "True"} -ENCLOSURE_ENVIRONMENTAL_CONFIG = { - "calibratedMaxPower": "2500" -} +ENCLOSURE_ENVIRONMENTAL_CONFIG = {"calibratedMaxPower": "2500"} -class EnclosureInfoSpec(unittest.TestCase, - FactsParamsTestCase): +class EnclosureInfoSpec(unittest.TestCase, FactsParamsTestCase): def setUp(self): self.configure_mocks(self, EnclosureInfoModule) self.enclosures = self.mock_ov_client.enclosures @@ -68,10 +56,7 @@ def test_should_get_all_enclosures(self): EnclosureInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - enclosures=(PRESENT_ENCLOSURES) - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, enclosures=(PRESENT_ENCLOSURES)) def test_should_get_enclosure_by_name(self): self.enclosures.get_by.return_value = PRESENT_ENCLOSURES @@ -79,11 +64,7 @@ def test_should_get_enclosure_by_name(self): EnclosureInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - enclosures=(PRESENT_ENCLOSURES) - - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, enclosures=(PRESENT_ENCLOSURES)) def test_should_get_enclosure_by_name_with_options(self): self.enclosures.get_by.return_value = PRESENT_ENCLOSURES @@ -100,7 +81,7 @@ def test_should_get_enclosure_by_name_with_options(self): enclosures=PRESENT_ENCLOSURES, enclosure_script=ENCLOSURE_SCRIPT, enclosure_environmental_configuration=ENCLOSURE_ENVIRONMENTAL_CONFIG, - enclosure_utilization=ENCLOSURE_UTILIZATION + enclosure_utilization=ENCLOSURE_UTILIZATION, ) def test_should_get_all_utilization_data(self): @@ -113,8 +94,9 @@ def test_should_get_all_utilization_data(self): EnclosureInfoModule().run() - self.enclosures.get_utilization.assert_called_once_with(PRESENT_ENCLOSURES[0]['uri'], fields='', filter='', - view='', refresh='') + self.enclosures.get_utilization.assert_called_once_with( + PRESENT_ENCLOSURES[0]["uri"], fields="", filter="", view="", refresh="" + ) def test_should_get_utilization_with_parameters(self): self.enclosures.get_by.return_value = PRESENT_ENCLOSURES @@ -129,8 +111,9 @@ def test_should_get_utilization_with_parameters(self): date_filter = ["startDate=2016-06-30T03:29:42.000Z", "endDate=2016-07-01T03:29:42.000Z"] self.enclosures.get_utilization.assert_called_once_with( - PRESENT_ENCLOSURES[0]['uri'], fields='AveragePower', filter=date_filter, view='day', refresh=True) + PRESENT_ENCLOSURES[0]["uri"], fields="AveragePower", filter=date_filter, view="day", refresh=True + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_ethernet_network.py b/tests/unit/plugins/modules/test_oneview_ethernet_network.py index a4ac106b2bd..cc4a1bc5cb5 100644 --- a/tests/unit/plugins/modules/test_oneview_ethernet_network.py +++ b/tests/unit/plugins/modules/test_oneview_ethernet_network.py @@ -13,9 +13,9 @@ from .oneview_module_loader import EthernetNetworkModule from .hpe_test_utils import OneViewBaseTestCase -FAKE_MSG_ERROR = 'Fake message error' -DEFAULT_ETHERNET_NAME = 'Test Ethernet Network' -RENAMED_ETHERNET = 'Renamed Ethernet Network' +FAKE_MSG_ERROR = "Fake message error" +DEFAULT_ETHERNET_NAME = "Test Ethernet Network" +RENAMED_ETHERNET = "Renamed Ethernet Network" DEFAULT_ENET_TEMPLATE = dict( name=DEFAULT_ETHERNET_NAME, @@ -24,20 +24,13 @@ purpose="General", smartLink=False, privateNetwork=False, - connectionTemplateUri=None + connectionTemplateUri=None, ) -PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_ETHERNET_NAME) -) +PARAMS_FOR_PRESENT = dict(config="config.json", state="present", data=dict(name=DEFAULT_ETHERNET_NAME)) PARAMS_TO_RENAME = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_ETHERNET_NAME, - newName=RENAMED_ETHERNET) + config="config.json", state="present", data=dict(name=DEFAULT_ETHERNET_NAME, newName=RENAMED_ETHERNET) ) YAML_PARAMS_WITH_CHANGES = """ @@ -59,37 +52,26 @@ name: 'network name' """ -PARAMS_FOR_SCOPES_SET = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_ETHERNET_NAME) -) +PARAMS_FOR_SCOPES_SET = dict(config="config.json", state="present", data=dict(name=DEFAULT_ETHERNET_NAME)) -PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=DEFAULT_ETHERNET_NAME) -) +PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=DEFAULT_ETHERNET_NAME)) PARAMS_FOR_BULK_CREATED = dict( - config='config.json', - state='present', - data=dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10") + config="config.json", state="present", data=dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10") ) DEFAULT_BULK_ENET_TEMPLATE = [ - {'name': 'TestNetwork_1', 'vlanId': 1}, - {'name': 'TestNetwork_2', 'vlanId': 2}, - {'name': 'TestNetwork_5', 'vlanId': 5}, - {'name': 'TestNetwork_9', 'vlanId': 9}, - {'name': 'TestNetwork_10', 'vlanId': 10}, + {"name": "TestNetwork_1", "vlanId": 1}, + {"name": "TestNetwork_2", "vlanId": 2}, + {"name": "TestNetwork_5", "vlanId": 5}, + {"name": "TestNetwork_9", "vlanId": 9}, + {"name": "TestNetwork_10", "vlanId": 10}, ] DICT_PARAMS_WITH_CHANGES = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)["data"] -class EthernetNetworkModuleSpec(unittest.TestCase, - OneViewBaseTestCase): +class EthernetNetworkModuleSpec(unittest.TestCase, OneViewBaseTestCase): """ OneViewBaseTestCase provides the mocks used in this test case """ @@ -109,7 +91,7 @@ def test_should_create_new_ethernet_network(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=EthernetNetworkModule.MSG_CREATED, - ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE) + ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE), ) def test_should_not_update_when_data_is_equals(self): @@ -122,12 +104,12 @@ def test_should_not_update_when_data_is_equals(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=EthernetNetworkModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE) + ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE), ) def test_update_when_data_has_modified_attributes(self): data_merged = DEFAULT_ENET_TEMPLATE.copy() - data_merged['purpose'] = 'Management' + data_merged["purpose"] = "Management" self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE] self.resource.update.return_value = data_merged @@ -138,9 +120,7 @@ def test_update_when_data_has_modified_attributes(self): EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=EthernetNetworkModule.MSG_UPDATED, - ansible_facts=dict(ethernet_network=data_merged) + changed=True, msg=EthernetNetworkModule.MSG_UPDATED, ansible_facts=dict(ethernet_network=data_merged) ) def test_update_when_only_bandwidth_has_modified_attributes(self): @@ -154,31 +134,28 @@ def test_update_when_only_bandwidth_has_modified_attributes(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=EthernetNetworkModule.MSG_UPDATED, - ansible_facts=dict(ethernet_network=DICT_PARAMS_WITH_CHANGES) + ansible_facts=dict(ethernet_network=DICT_PARAMS_WITH_CHANGES), ) def test_update_when_data_has_modified_attributes_but_bandwidth_is_equal(self): data_merged = DEFAULT_ENET_TEMPLATE.copy() - data_merged['purpose'] = 'Management' + data_merged["purpose"] = "Management" self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE] self.resource.update.return_value = data_merged - self.mock_ov_client.connection_templates.get.return_value = { - "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']} + self.mock_ov_client.connection_templates.get.return_value = {"bandwidth": DICT_PARAMS_WITH_CHANGES["bandwidth"]} self.mock_ansible_module.params = yaml.safe_load(YAML_PARAMS_WITH_CHANGES) EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=EthernetNetworkModule.MSG_UPDATED, - ansible_facts=dict(ethernet_network=data_merged) + changed=True, msg=EthernetNetworkModule.MSG_UPDATED, ansible_facts=dict(ethernet_network=data_merged) ) def test_update_successfully_even_when_connection_template_uri_not_exists(self): data_merged = DEFAULT_ENET_TEMPLATE.copy() - del data_merged['connectionTemplateUri'] + del data_merged["connectionTemplateUri"] self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE] self.resource.update.return_value = data_merged @@ -188,14 +165,12 @@ def test_update_successfully_even_when_connection_template_uri_not_exists(self): EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=EthernetNetworkModule.MSG_UPDATED, - ansible_facts=dict(ethernet_network=data_merged) + changed=True, msg=EthernetNetworkModule.MSG_UPDATED, ansible_facts=dict(ethernet_network=data_merged) ) def test_rename_when_resource_exists(self): data_merged = DEFAULT_ENET_TEMPLATE.copy() - data_merged['name'] = RENAMED_ETHERNET + data_merged["name"] = RENAMED_ETHERNET params_to_rename = PARAMS_TO_RENAME.copy() self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE] @@ -209,7 +184,7 @@ def test_rename_when_resource_exists(self): def test_create_with_new_name_when_resource_not_exists(self): data_merged = DEFAULT_ENET_TEMPLATE.copy() - data_merged['name'] = RENAMED_ETHERNET + data_merged["name"] = RENAMED_ETHERNET params_to_rename = PARAMS_TO_RENAME.copy() self.resource.get_by.return_value = [] @@ -219,7 +194,7 @@ def test_create_with_new_name_when_resource_not_exists(self): EthernetNetworkModule().run() - self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data']) + self.resource.create.assert_called_once_with(PARAMS_TO_RENAME["data"]) def test_should_remove_ethernet_network(self): self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE] @@ -228,10 +203,7 @@ def test_should_remove_ethernet_network(self): EthernetNetworkModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=EthernetNetworkModule.MSG_DELETED - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=True, msg=EthernetNetworkModule.MSG_DELETED) def test_should_do_nothing_when_ethernet_network_not_exist(self): self.resource.get_by.return_value = [] @@ -241,8 +213,7 @@ def test_should_do_nothing_when_ethernet_network_not_exist(self): EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=EthernetNetworkModule.MSG_ALREADY_ABSENT + changed=False, msg=EthernetNetworkModule.MSG_ALREADY_ABSENT ) def test_should_create_all_ethernet_networks(self): @@ -253,17 +224,17 @@ def test_should_create_all_ethernet_networks(self): EthernetNetworkModule().run() - self.resource.create_bulk.assert_called_once_with( - dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10")) + self.resource.create_bulk.assert_called_once_with(dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10")) self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=EthernetNetworkModule.MSG_BULK_CREATED, - ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE)) + ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE), + ) def test_should_create_missing_ethernet_networks(self): enet_get_range_return = [ - {'name': 'TestNetwork_1', 'vlanId': 1}, - {'name': 'TestNetwork_2', 'vlanId': 2}, + {"name": "TestNetwork_1", "vlanId": 1}, + {"name": "TestNetwork_2", "vlanId": 2}, ] self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE] @@ -273,16 +244,17 @@ def test_should_create_missing_ethernet_networks(self): EthernetNetworkModule().run() - self.resource.create_bulk.assert_called_once_with( - dict(namePrefix="TestNetwork", vlanIdRange="5,9,10")) + self.resource.create_bulk.assert_called_once_with(dict(namePrefix="TestNetwork", vlanIdRange="5,9,10")) self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED, - ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE)) + changed=True, + msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED, + ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE), + ) def test_should_create_missing_ethernet_networks_with_just_one_difference(self): enet_get_range_return = [ - {'name': 'TestNetwork_1', 'vlanId': 1}, - {'name': 'TestNetwork_2', 'vlanId': 2}, + {"name": "TestNetwork_1", "vlanId": 1}, + {"name": "TestNetwork_2", "vlanId": 2}, ] self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE] @@ -292,12 +264,13 @@ def test_should_create_missing_ethernet_networks_with_just_one_difference(self): EthernetNetworkModule().run() - self.resource.create_bulk.assert_called_once_with({'vlanIdRange': '5-5', 'namePrefix': 'TestNetwork'}) + self.resource.create_bulk.assert_called_once_with({"vlanIdRange": "5-5", "namePrefix": "TestNetwork"}) self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED, - ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE)) + ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE), + ) def test_should_do_nothing_when_ethernet_networks_already_exist(self): self.resource.get_range.return_value = DEFAULT_BULK_ENET_TEMPLATE @@ -308,26 +281,27 @@ def test_should_do_nothing_when_ethernet_networks_already_exist(self): EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, msg=EthernetNetworkModule.MSG_BULK_ALREADY_EXIST, - ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE)) + changed=False, + msg=EthernetNetworkModule.MSG_BULK_ALREADY_EXIST, + ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE), + ) def test_reset_successfully(self): self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES] - self.mock_ov_client.connection_templates.update.return_value = {'result': 'success'} - self.mock_ov_client.connection_templates.get.return_value = { - "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']} + self.mock_ov_client.connection_templates.update.return_value = {"result": "success"} + self.mock_ov_client.connection_templates.get.return_value = {"bandwidth": DICT_PARAMS_WITH_CHANGES["bandwidth"]} - self.mock_ov_client.connection_templates.get_default.return_value = {"bandwidth": { - "max": 1 - }} + self.mock_ov_client.connection_templates.get_default.return_value = {"bandwidth": {"max": 1}} self.mock_ansible_module.params = yaml.safe_load(YAML_RESET_CONNECTION_TEMPLATE) EthernetNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, msg=EthernetNetworkModule.MSG_CONNECTION_TEMPLATE_RESET, - ansible_facts=dict(ethernet_network_connection_template={'result': 'success'})) + changed=True, + msg=EthernetNetworkModule.MSG_CONNECTION_TEMPLATE_RESET, + ansible_facts=dict(ethernet_network_connection_template={"result": "success"}), + ) def test_should_fail_when_reset_not_existing_ethernet_network(self): self.resource.get_by.return_value = [None] @@ -337,44 +311,40 @@ def test_should_fail_when_reset_not_existing_ethernet_network(self): EthernetNetworkModule().run() self.mock_ansible_module.fail_json.assert_called_once_with( - exception=mock.ANY, - msg=EthernetNetworkModule.MSG_ETHERNET_NETWORK_NOT_FOUND + exception=mock.ANY, msg=EthernetNetworkModule.MSG_ETHERNET_NETWORK_NOT_FOUND ) def test_update_scopes_when_different(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_ENET_TEMPLATE.copy() - resource_data['scopeUris'] = ['fake'] - resource_data['uri'] = 'rest/ethernet/fake' + resource_data["scopeUris"] = ["fake"] + resource_data["uri"] = "rest/ethernet/fake" self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() - patch_return['scopeUris'] = ['test'] + patch_return["scopeUris"] = ["test"] self.resource.patch.return_value = patch_return EthernetNetworkModule().run() - self.resource.patch.assert_called_once_with('rest/ethernet/fake', - operation='replace', - path='/scopeUris', - value=['test']) + self.resource.patch.assert_called_once_with( + "rest/ethernet/fake", operation="replace", path="/scopeUris", value=["test"] + ) self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - ansible_facts=dict(ethernet_network=patch_return), - msg=EthernetNetworkModule.MSG_UPDATED + changed=True, ansible_facts=dict(ethernet_network=patch_return), msg=EthernetNetworkModule.MSG_UPDATED ) def test_should_do_nothing_when_scopes_are_the_same(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_ENET_TEMPLATE.copy() - resource_data['scopeUris'] = ['test'] + resource_data["scopeUris"] = ["test"] self.resource.get_by.return_value = [resource_data] EthernetNetworkModule().run() @@ -384,9 +354,9 @@ def test_should_do_nothing_when_scopes_are_the_same(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, ansible_facts=dict(ethernet_network=resource_data), - msg=EthernetNetworkModule.MSG_ALREADY_PRESENT + msg=EthernetNetworkModule.MSG_ALREADY_PRESENT, ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py b/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py index ca0553244e2..1638d298870 100644 --- a/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py @@ -9,50 +9,42 @@ from .oneview_module_loader import EthernetNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name="Test Ethernet Network", - options=[] -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Test Ethernet Network", options=[]) PARAMS_GET_BY_NAME_WITH_OPTIONS = dict( - config='config.json', - name="Test Ethernet Network", - options=['associatedProfiles', 'associatedUplinkGroups'] + config="config.json", name="Test Ethernet Network", options=["associatedProfiles", "associatedUplinkGroups"] ) -PRESENT_ENETS = [{ - "name": "Test Ethernet Network", - "uri": "/rest/ethernet-networks/d34dcf5e-0d8e-441c-b00d-e1dd6a067188" -}] +PRESENT_ENETS = [ + {"name": "Test Ethernet Network", "uri": "/rest/ethernet-networks/d34dcf5e-0d8e-441c-b00d-e1dd6a067188"} +] ENET_ASSOCIATED_UPLINK_GROUP_URIS = [ "/rest/uplink-sets/c6bf9af9-48e7-4236-b08a-77684dc258a5", - "/rest/uplink-sets/e2f0031b-52bd-4223-9ac1-d91cb519d548" + "/rest/uplink-sets/e2f0031b-52bd-4223-9ac1-d91cb519d548", ] ENET_ASSOCIATED_PROFILE_URIS = [ "/rest/server-profiles/83e2e117-59dc-4e33-9f24-462af951cbbe", - "/rest/server-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d" + "/rest/server-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d", ] -ENET_ASSOCIATED_UPLINK_GROUPS = [dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[0], name='Uplink Set 1'), - dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[1], name='Uplink Set 2')] +ENET_ASSOCIATED_UPLINK_GROUPS = [ + dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[0], name="Uplink Set 1"), + dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[1], name="Uplink Set 2"), +] -ENET_ASSOCIATED_PROFILES = [dict(uri=ENET_ASSOCIATED_PROFILE_URIS[0], name='Server Profile 1'), - dict(uri=ENET_ASSOCIATED_PROFILE_URIS[1], name='Server Profile 2')] +ENET_ASSOCIATED_PROFILES = [ + dict(uri=ENET_ASSOCIATED_PROFILE_URIS[0], name="Server Profile 1"), + dict(uri=ENET_ASSOCIATED_PROFILE_URIS[1], name="Server Profile 2"), +] -class EthernetNetworkInfoSpec(unittest.TestCase, - FactsParamsTestCase - ): +class EthernetNetworkInfoSpec(unittest.TestCase, FactsParamsTestCase): def setUp(self): self.configure_mocks(self, EthernetNetworkInfoModule) self.ethernet_networks = self.mock_ov_client.ethernet_networks @@ -64,10 +56,7 @@ def test_should_get_all_enets(self): EthernetNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - ethernet_networks=(PRESENT_ENETS) - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, ethernet_networks=(PRESENT_ENETS)) def test_should_get_enet_by_name(self): self.ethernet_networks.get_by.return_value = PRESENT_ENETS @@ -75,10 +64,7 @@ def test_should_get_enet_by_name(self): EthernetNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - ethernet_networks=(PRESENT_ENETS) - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, ethernet_networks=(PRESENT_ENETS)) def test_should_get_enet_by_name_with_options(self): self.ethernet_networks.get_by.return_value = PRESENT_ENETS @@ -95,9 +81,9 @@ def test_should_get_enet_by_name_with_options(self): changed=False, ethernet_networks=PRESENT_ENETS, enet_associated_profiles=ENET_ASSOCIATED_PROFILES, - enet_associated_uplink_groups=ENET_ASSOCIATED_UPLINK_GROUPS + enet_associated_uplink_groups=ENET_ASSOCIATED_UPLINK_GROUPS, ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_fc_network.py b/tests/unit/plugins/modules/test_oneview_fc_network.py index 9a0d9cf05c5..953631c3322 100644 --- a/tests/unit/plugins/modules/test_oneview_fc_network.py +++ b/tests/unit/plugins/modules/test_oneview_fc_network.py @@ -9,37 +9,22 @@ from .oneview_module_loader import FcNetworkModule from .hpe_test_utils import OneViewBaseTestCase -FAKE_MSG_ERROR = 'Fake message error' +FAKE_MSG_ERROR = "Fake message error" -DEFAULT_FC_NETWORK_TEMPLATE = dict( - name='New FC Network 2', - autoLoginRedistribution=True, - fabricType='FabricAttach' -) +DEFAULT_FC_NETWORK_TEMPLATE = dict(name="New FC Network 2", autoLoginRedistribution=True, fabricType="FabricAttach") -PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name']) -) +PARAMS_FOR_PRESENT = dict(config="config.json", state="present", data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE["name"])) PARAMS_WITH_CHANGES = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'], - newName="New Name", - fabricType='DirectAttach') + config="config.json", + state="present", + data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE["name"], newName="New Name", fabricType="DirectAttach"), ) -PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name']) -) +PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE["name"])) -class FcNetworkModuleSpec(unittest.TestCase, - OneViewBaseTestCase): +class FcNetworkModuleSpec(unittest.TestCase, OneViewBaseTestCase): """ OneViewBaseTestCase provides the mocks used in this test case """ @@ -57,9 +42,7 @@ def test_should_create_new_fc_network(self): FcNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=FcNetworkModule.MSG_CREATED, - ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE) + changed=True, msg=FcNetworkModule.MSG_CREATED, ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE) ) def test_should_not_update_when_data_is_equals(self): @@ -72,13 +55,13 @@ def test_should_not_update_when_data_is_equals(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=FcNetworkModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE) + ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE), ) def test_update_when_data_has_modified_attributes(self): data_merged = DEFAULT_FC_NETWORK_TEMPLATE.copy() - data_merged['fabricType'] = 'DirectAttach' + data_merged["fabricType"] = "DirectAttach" self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE] self.resource.update.return_value = data_merged @@ -88,9 +71,7 @@ def test_update_when_data_has_modified_attributes(self): FcNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=FcNetworkModule.MSG_UPDATED, - ansible_facts=dict(fc_network=data_merged) + changed=True, msg=FcNetworkModule.MSG_UPDATED, ansible_facts=dict(fc_network=data_merged) ) def test_should_remove_fc_network(self): @@ -100,10 +81,7 @@ def test_should_remove_fc_network(self): FcNetworkModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=FcNetworkModule.MSG_DELETED - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=True, msg=FcNetworkModule.MSG_DELETED) def test_should_do_nothing_when_fc_network_not_exist(self): self.resource.get_by.return_value = [] @@ -113,44 +91,40 @@ def test_should_do_nothing_when_fc_network_not_exist(self): FcNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=FcNetworkModule.MSG_ALREADY_ABSENT + changed=False, msg=FcNetworkModule.MSG_ALREADY_ABSENT ) def test_update_scopes_when_different(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy() - resource_data['scopeUris'] = ['fake'] - resource_data['uri'] = 'rest/fc/fake' + resource_data["scopeUris"] = ["fake"] + resource_data["uri"] = "rest/fc/fake" self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() - patch_return['scopeUris'] = ['test'] + patch_return["scopeUris"] = ["test"] self.resource.patch.return_value = patch_return FcNetworkModule().run() - self.resource.patch.assert_called_once_with('rest/fc/fake', - operation='replace', - path='/scopeUris', - value=['test']) + self.resource.patch.assert_called_once_with( + "rest/fc/fake", operation="replace", path="/scopeUris", value=["test"] + ) self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - ansible_facts=dict(fc_network=patch_return), - msg=FcNetworkModule.MSG_UPDATED + changed=True, ansible_facts=dict(fc_network=patch_return), msg=FcNetworkModule.MSG_UPDATED ) def test_should_do_nothing_when_scopes_are_the_same(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy() - resource_data['scopeUris'] = ['test'] + resource_data["scopeUris"] = ["test"] self.resource.get_by.return_value = [resource_data] FcNetworkModule().run() @@ -158,11 +132,9 @@ def test_should_do_nothing_when_scopes_are_the_same(self): self.resource.patch.not_been_called() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - ansible_facts=dict(fc_network=resource_data), - msg=FcNetworkModule.MSG_ALREADY_PRESENT + changed=False, ansible_facts=dict(fc_network=resource_data), msg=FcNetworkModule.MSG_ALREADY_PRESENT ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_fc_network_info.py b/tests/unit/plugins/modules/test_oneview_fc_network_info.py index faecfdaa09a..20e0f42427f 100644 --- a/tests/unit/plugins/modules/test_oneview_fc_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_fc_network_info.py @@ -8,26 +8,16 @@ from .oneview_module_loader import FcNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name="Test FC Network" -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Test FC Network") -PRESENT_NETWORKS = [{ - "name": "Test FC Network", - "uri": "/rest/fc-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5" -}] +PRESENT_NETWORKS = [{"name": "Test FC Network", "uri": "/rest/fc-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"}] -class FcNetworkInfoSpec(unittest.TestCase, - FactsParamsTestCase): +class FcNetworkInfoSpec(unittest.TestCase, FactsParamsTestCase): def setUp(self): self.configure_mocks(self, FcNetworkInfoModule) self.fc_networks = self.mock_ov_client.fc_networks @@ -39,10 +29,7 @@ def test_should_get_all_fc_networks(self): FcNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - fc_networks=PRESENT_NETWORKS - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, fc_networks=PRESENT_NETWORKS) def test_should_get_fc_network_by_name(self): self.fc_networks.get_by.return_value = PRESENT_NETWORKS @@ -50,11 +37,8 @@ def test_should_get_fc_network_by_name(self): FcNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - fc_networks=PRESENT_NETWORKS - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, fc_networks=PRESENT_NETWORKS) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_fcoe_network.py b/tests/unit/plugins/modules/test_oneview_fcoe_network.py index 85df7ec12ed..5e7c074b2a4 100644 --- a/tests/unit/plugins/modules/test_oneview_fcoe_network.py +++ b/tests/unit/plugins/modules/test_oneview_fcoe_network.py @@ -9,37 +9,22 @@ from .oneview_module_loader import FcoeNetworkModule from .hpe_test_utils import OneViewBaseTestCase -FAKE_MSG_ERROR = 'Fake message error' +FAKE_MSG_ERROR = "Fake message error" -DEFAULT_FCOE_NETWORK_TEMPLATE = dict( - name='New FCoE Network 2', - vlanId="201", - connectionTemplateUri=None -) +DEFAULT_FCOE_NETWORK_TEMPLATE = dict(name="New FCoE Network 2", vlanId="201", connectionTemplateUri=None) -PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name']) -) +PARAMS_FOR_PRESENT = dict(config="config.json", state="present", data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE["name"])) PARAMS_WITH_CHANGES = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'], - fabricType='DirectAttach', - newName='New Name') + config="config.json", + state="present", + data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE["name"], fabricType="DirectAttach", newName="New Name"), ) -PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name']) -) +PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE["name"])) -class FcoeNetworkSpec(unittest.TestCase, - OneViewBaseTestCase): +class FcoeNetworkSpec(unittest.TestCase, OneViewBaseTestCase): """ OneViewBaseTestCase provides the mocks used in this test case """ @@ -59,7 +44,7 @@ def test_should_create_new_fcoe_network(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=FcoeNetworkModule.MSG_CREATED, - ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE) + ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE), ) def test_should_not_update_when_data_is_equals(self): @@ -71,12 +56,12 @@ def test_should_not_update_when_data_is_equals(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=FcoeNetworkModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE) + ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE), ) def test_update_when_data_has_modified_attributes(self): data_merged = DEFAULT_FCOE_NETWORK_TEMPLATE.copy() - data_merged['fabricType'] = 'DirectAttach' + data_merged["fabricType"] = "DirectAttach" self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE] self.resource.update.return_value = data_merged @@ -86,9 +71,7 @@ def test_update_when_data_has_modified_attributes(self): FcoeNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=FcoeNetworkModule.MSG_UPDATED, - ansible_facts=dict(fcoe_network=data_merged) + changed=True, msg=FcoeNetworkModule.MSG_UPDATED, ansible_facts=dict(fcoe_network=data_merged) ) def test_should_remove_fcoe_network(self): @@ -98,10 +81,7 @@ def test_should_remove_fcoe_network(self): FcoeNetworkModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=FcoeNetworkModule.MSG_DELETED - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=True, msg=FcoeNetworkModule.MSG_DELETED) def test_should_do_nothing_when_fcoe_network_not_exist(self): self.resource.get_by.return_value = [] @@ -111,44 +91,40 @@ def test_should_do_nothing_when_fcoe_network_not_exist(self): FcoeNetworkModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=FcoeNetworkModule.MSG_ALREADY_ABSENT + changed=False, msg=FcoeNetworkModule.MSG_ALREADY_ABSENT ) def test_update_scopes_when_different(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy() - resource_data['scopeUris'] = ['fake'] - resource_data['uri'] = 'rest/fcoe/fake' + resource_data["scopeUris"] = ["fake"] + resource_data["uri"] = "rest/fcoe/fake" self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() - patch_return['scopeUris'] = ['test'] + patch_return["scopeUris"] = ["test"] self.resource.patch.return_value = patch_return FcoeNetworkModule().run() - self.resource.patch.assert_called_once_with('rest/fcoe/fake', - operation='replace', - path='/scopeUris', - value=['test']) + self.resource.patch.assert_called_once_with( + "rest/fcoe/fake", operation="replace", path="/scopeUris", value=["test"] + ) self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - ansible_facts=dict(fcoe_network=patch_return), - msg=FcoeNetworkModule.MSG_UPDATED + changed=True, ansible_facts=dict(fcoe_network=patch_return), msg=FcoeNetworkModule.MSG_UPDATED ) def test_should_do_nothing_when_scopes_are_the_same(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy() - resource_data['scopeUris'] = ['test'] + resource_data["scopeUris"] = ["test"] self.resource.get_by.return_value = [resource_data] FcoeNetworkModule().run() @@ -156,11 +132,9 @@ def test_should_do_nothing_when_scopes_are_the_same(self): self.resource.patch.not_been_called() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - ansible_facts=dict(fcoe_network=resource_data), - msg=FcoeNetworkModule.MSG_ALREADY_PRESENT + changed=False, ansible_facts=dict(fcoe_network=resource_data), msg=FcoeNetworkModule.MSG_ALREADY_PRESENT ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py b/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py index 351f101385d..90358b02c98 100644 --- a/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py +++ b/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py @@ -9,27 +9,16 @@ from .oneview_module_loader import FcoeNetworkInfoModule from .hpe_test_utils import FactsParamsTestCase -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name="Test FCoE Networks" -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Test FCoE Networks") -PRESENT_NETWORKS = [{ - "name": "Test FCoE Networks", - "uri": "/rest/fcoe-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5" -}] +PRESENT_NETWORKS = [{"name": "Test FCoE Networks", "uri": "/rest/fcoe-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"}] -class FcoeNetworkInfoSpec(unittest.TestCase, - FactsParamsTestCase - ): +class FcoeNetworkInfoSpec(unittest.TestCase, FactsParamsTestCase): def setUp(self): self.configure_mocks(self, FcoeNetworkInfoModule) self.fcoe_networks = self.mock_ov_client.fcoe_networks @@ -41,10 +30,7 @@ def test_should_get_all_fcoe_network(self): FcoeNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - fcoe_networks=PRESENT_NETWORKS - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, fcoe_networks=PRESENT_NETWORKS) def test_should_get_fcoe_network_by_name(self): self.fcoe_networks.get_by.return_value = PRESENT_NETWORKS @@ -52,11 +38,8 @@ def test_should_get_fcoe_network_by_name(self): FcoeNetworkInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - fcoe_networks=PRESENT_NETWORKS - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, fcoe_networks=PRESENT_NETWORKS) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py index 08ffa17931f..50a1b067b03 100644 --- a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py +++ b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py @@ -9,80 +9,59 @@ from unittest import mock from .hpe_test_utils import OneViewBaseTestCase -from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group import LogicalInterconnectGroupModule +from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group import ( + LogicalInterconnectGroupModule, +) -FAKE_MSG_ERROR = 'Fake message error' +FAKE_MSG_ERROR = "Fake message error" -DEFAULT_LIG_NAME = 'Test Logical Interconnect Group' -RENAMED_LIG = 'Renamed Logical Interconnect Group' +DEFAULT_LIG_NAME = "Test Logical Interconnect Group" +RENAMED_LIG = "Renamed Logical Interconnect Group" DEFAULT_LIG_TEMPLATE = dict( name=DEFAULT_LIG_NAME, uplinkSets=[], - enclosureType='C7000', - interconnectMapTemplate=dict( - interconnectMapEntryTemplates=[] - ) + enclosureType="C7000", + interconnectMapTemplate=dict(interconnectMapEntryTemplates=[]), ) PARAMS_LIG_TEMPLATE_WITH_MAP = dict( - config='config.json', - state='present', + config="config.json", + state="present", data=dict( name=DEFAULT_LIG_NAME, uplinkSets=[], - enclosureType='C7000', + enclosureType="C7000", interconnectMapTemplate=dict( interconnectMapEntryTemplates=[ { "logicalDownlinkUri": None, "logicalLocation": { "locationEntries": [ - { - "relativeValue": "1", - "type": "Bay" - }, - { - "relativeValue": 1, - "type": "Enclosure" - } + {"relativeValue": "1", "type": "Bay"}, + {"relativeValue": 1, "type": "Enclosure"}, ] }, - "permittedInterconnectTypeName": "HP VC Flex-10/10D Module" - }] - ) - )) - -PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_LIG_NAME) + "permittedInterconnectTypeName": "HP VC Flex-10/10D Module", + } + ] + ), + ), ) -PARAMS_TO_RENAME = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_LIG_NAME, - newName=RENAMED_LIG) -) +PARAMS_FOR_PRESENT = dict(config="config.json", state="present", data=dict(name=DEFAULT_LIG_NAME)) + +PARAMS_TO_RENAME = dict(config="config.json", state="present", data=dict(name=DEFAULT_LIG_NAME, newName=RENAMED_LIG)) PARAMS_WITH_CHANGES = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_LIG_NAME, - description='It is an example') + config="config.json", state="present", data=dict(name=DEFAULT_LIG_NAME, description="It is an example") ) -PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=DEFAULT_LIG_NAME) -) +PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=DEFAULT_LIG_NAME)) -class LogicalInterconnectGroupGeneralSpec(unittest.TestCase, - OneViewBaseTestCase): +class LogicalInterconnectGroupGeneralSpec(unittest.TestCase, OneViewBaseTestCase): def setUp(self): self.configure_mocks(self, LogicalInterconnectGroupModule) self.resource = self.mock_ov_client.logical_interconnect_groups @@ -98,7 +77,7 @@ def test_should_create_new_lig(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=LogicalInterconnectGroupModule.MSG_CREATED, - ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE) + ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE), ) def test_should_create_new_with_named_permitted_interconnect_type(self): @@ -112,7 +91,7 @@ def test_should_create_new_with_named_permitted_interconnect_type(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=LogicalInterconnectGroupModule.MSG_CREATED, - ansible_facts=dict(logical_interconnect_group=PARAMS_FOR_PRESENT.copy()) + ansible_facts=dict(logical_interconnect_group=PARAMS_FOR_PRESENT.copy()), ) def test_should_fail_when_permitted_interconnect_type_name_not_exists(self): @@ -125,8 +104,8 @@ def test_should_fail_when_permitted_interconnect_type_name_not_exists(self): LogicalInterconnectGroupModule().run() self.mock_ansible_module.fail_json.assert_called_once_with( - exception=mock.ANY, - msg=LogicalInterconnectGroupModule.MSG_INTERCONNECT_TYPE_NOT_FOUND) + exception=mock.ANY, msg=LogicalInterconnectGroupModule.MSG_INTERCONNECT_TYPE_NOT_FOUND + ) def test_should_not_update_when_data_is_equals(self): self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE] @@ -138,12 +117,12 @@ def test_should_not_update_when_data_is_equals(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE) + ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE), ) def test_update_when_data_has_modified_attributes(self): data_merged = DEFAULT_LIG_TEMPLATE.copy() - data_merged['description'] = 'New description' + data_merged["description"] = "New description" self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE] self.resource.update.return_value = data_merged @@ -155,12 +134,12 @@ def test_update_when_data_has_modified_attributes(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=LogicalInterconnectGroupModule.MSG_UPDATED, - ansible_facts=dict(logical_interconnect_group=data_merged) + ansible_facts=dict(logical_interconnect_group=data_merged), ) def test_rename_when_resource_exists(self): data_merged = DEFAULT_LIG_TEMPLATE.copy() - data_merged['name'] = RENAMED_LIG + data_merged["name"] = RENAMED_LIG params_to_rename = PARAMS_TO_RENAME.copy() self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE] @@ -174,7 +153,7 @@ def test_rename_when_resource_exists(self): def test_create_with_newName_when_resource_not_exists(self): data_merged = DEFAULT_LIG_TEMPLATE.copy() - data_merged['name'] = RENAMED_LIG + data_merged["name"] = RENAMED_LIG params_to_rename = PARAMS_TO_RENAME.copy() self.resource.get_by.return_value = [] @@ -184,7 +163,7 @@ def test_create_with_newName_when_resource_not_exists(self): LogicalInterconnectGroupModule().run() - self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data']) + self.resource.create.assert_called_once_with(PARAMS_TO_RENAME["data"]) def test_should_remove_lig(self): self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE] @@ -194,8 +173,7 @@ def test_should_remove_lig(self): LogicalInterconnectGroupModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=LogicalInterconnectGroupModule.MSG_DELETED + changed=True, msg=LogicalInterconnectGroupModule.MSG_DELETED ) def test_should_do_nothing_when_lig_not_exist(self): @@ -206,44 +184,42 @@ def test_should_do_nothing_when_lig_not_exist(self): LogicalInterconnectGroupModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=LogicalInterconnectGroupModule.MSG_ALREADY_ABSENT + changed=False, msg=LogicalInterconnectGroupModule.MSG_ALREADY_ABSENT ) def test_update_scopes_when_different(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_LIG_TEMPLATE.copy() - resource_data['scopeUris'] = ['fake'] - resource_data['uri'] = 'rest/lig/fake' + resource_data["scopeUris"] = ["fake"] + resource_data["uri"] = "rest/lig/fake" self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() - patch_return['scopeUris'] = ['test'] + patch_return["scopeUris"] = ["test"] self.resource.patch.return_value = patch_return LogicalInterconnectGroupModule().run() - self.resource.patch.assert_called_once_with('rest/lig/fake', - operation='replace', - path='/scopeUris', - value=['test']) + self.resource.patch.assert_called_once_with( + "rest/lig/fake", operation="replace", path="/scopeUris", value=["test"] + ) self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, ansible_facts=dict(logical_interconnect_group=patch_return), - msg=LogicalInterconnectGroupModule.MSG_UPDATED + msg=LogicalInterconnectGroupModule.MSG_UPDATED, ) def test_should_do_nothing_when_scopes_are_the_same(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = DEFAULT_LIG_TEMPLATE.copy() - resource_data['scopeUris'] = ['test'] + resource_data["scopeUris"] = ["test"] self.resource.get_by.return_value = [resource_data] LogicalInterconnectGroupModule().run() @@ -253,9 +229,9 @@ def test_should_do_nothing_when_scopes_are_the_same(self): self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, ansible_facts=dict(logical_interconnect_group=resource_data), - msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT + msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT, ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py index 1a1f655b216..7ea1ab84d36 100644 --- a/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py +++ b/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py @@ -7,26 +7,22 @@ import unittest from .hpe_test_utils import FactsParamsTestCase from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group_info import ( - LogicalInterconnectGroupInfoModule + LogicalInterconnectGroupInfoModule, ) -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name="Test Logical Interconnect Group" -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Test Logical Interconnect Group") -PRESENT_LIGS = [{ - "name": "Test Logical Interconnect Group", - "uri": "/rest/logical-interconnect-groups/ebb4ada8-08df-400e-8fac-9ff987ac5140" -}] +PRESENT_LIGS = [ + { + "name": "Test Logical Interconnect Group", + "uri": "/rest/logical-interconnect-groups/ebb4ada8-08df-400e-8fac-9ff987ac5140", + } +] class LogicalInterconnectGroupInfoSpec(unittest.TestCase, FactsParamsTestCase): @@ -42,8 +38,7 @@ def test_should_get_all_ligs(self): LogicalInterconnectGroupInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - logical_interconnect_groups=(PRESENT_LIGS) + changed=False, logical_interconnect_groups=(PRESENT_LIGS) ) def test_should_get_lig_by_name(self): @@ -53,10 +48,9 @@ def test_should_get_lig_by_name(self): LogicalInterconnectGroupInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - logical_interconnect_groups=(PRESENT_LIGS) + changed=False, logical_interconnect_groups=(PRESENT_LIGS) ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_network_set.py b/tests/unit/plugins/modules/test_oneview_network_set.py index 4aeb960751e..acf433a6bb6 100644 --- a/tests/unit/plugins/modules/test_oneview_network_set.py +++ b/tests/unit/plugins/modules/test_oneview_network_set.py @@ -9,39 +9,32 @@ from .hpe_test_utils import OneViewBaseTestCase from .oneview_module_loader import NetworkSetModule -FAKE_MSG_ERROR = 'Fake message error' +FAKE_MSG_ERROR = "Fake message error" -NETWORK_SET = dict( - name='OneViewSDK Test Network Set', - networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'] -) +NETWORK_SET = dict(name="OneViewSDK Test Network Set", networkUris=["/rest/ethernet-networks/aaa-bbb-ccc"]) -NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed') +NETWORK_SET_WITH_NEW_NAME = dict(name="OneViewSDK Test Network Set - Renamed") PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=dict(name=NETWORK_SET['name'], - networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']) + config="config.json", + state="present", + data=dict(name=NETWORK_SET["name"], networkUris=["/rest/ethernet-networks/aaa-bbb-ccc"]), ) PARAMS_WITH_CHANGES = dict( - config='config.json', - state='present', - data=dict(name=NETWORK_SET['name'], - newName=f"{NETWORK_SET['name']} - Renamed", - networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network']) + config="config.json", + state="present", + data=dict( + name=NETWORK_SET["name"], + newName=f"{NETWORK_SET['name']} - Renamed", + networkUris=["/rest/ethernet-networks/aaa-bbb-ccc", "Name of a Network"], + ), ) -PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=NETWORK_SET['name']) -) +PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=NETWORK_SET["name"])) -class NetworkSetModuleSpec(unittest.TestCase, - OneViewBaseTestCase): +class NetworkSetModuleSpec(unittest.TestCase, OneViewBaseTestCase): """ OneViewBaseTestCase has common tests for class constructor and main function, also provides the mocks used in this test case. @@ -61,9 +54,7 @@ def test_should_create_new_network_set(self): NetworkSetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=NetworkSetModule.MSG_CREATED, - ansible_facts=dict(network_set=NETWORK_SET) + changed=True, msg=NetworkSetModule.MSG_CREATED, ansible_facts=dict(network_set=NETWORK_SET) ) def test_should_not_update_when_data_is_equals(self): @@ -74,29 +65,25 @@ def test_should_not_update_when_data_is_equals(self): NetworkSetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=NetworkSetModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(network_set=NETWORK_SET) + changed=False, msg=NetworkSetModule.MSG_ALREADY_PRESENT, ansible_facts=dict(network_set=NETWORK_SET) ) def test_update_when_data_has_modified_attributes(self): - data_merged = dict(name=f"{NETWORK_SET['name']} - Renamed", - networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', - '/rest/ethernet-networks/ddd-eee-fff'] - ) + data_merged = dict( + name=f"{NETWORK_SET['name']} - Renamed", + networkUris=["/rest/ethernet-networks/aaa-bbb-ccc", "/rest/ethernet-networks/ddd-eee-fff"], + ) self.resource.get_by.side_effect = [NETWORK_SET], [] self.resource.update.return_value = data_merged - self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}] + self.ethernet_network_client.get_by.return_value = [{"uri": "/rest/ethernet-networks/ddd-eee-fff"}] self.mock_ansible_module.params = PARAMS_WITH_CHANGES NetworkSetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=NetworkSetModule.MSG_UPDATED, - ansible_facts=dict(network_set=data_merged) + changed=True, msg=NetworkSetModule.MSG_UPDATED, ansible_facts=dict(network_set=data_merged) ) def test_should_raise_exception_when_ethernet_network_not_found(self): @@ -108,8 +95,7 @@ def test_should_raise_exception_when_ethernet_network_not_found(self): NetworkSetModule().run() self.mock_ansible_module.fail_json.assert_called_once_with( - exception=mock.ANY, - msg=f"{NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND}Name of a Network" + exception=mock.ANY, msg=f"{NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND}Name of a Network" ) def test_should_remove_network(self): @@ -119,10 +105,7 @@ def test_should_remove_network(self): NetworkSetModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=NetworkSetModule.MSG_DELETED - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=True, msg=NetworkSetModule.MSG_DELETED) def test_should_do_nothing_when_network_set_not_exist(self): self.resource.get_by.return_value = [] @@ -132,44 +115,40 @@ def test_should_do_nothing_when_network_set_not_exist(self): NetworkSetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=NetworkSetModule.MSG_ALREADY_ABSENT + changed=False, msg=NetworkSetModule.MSG_ALREADY_ABSENT ) def test_update_scopes_when_different(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = NETWORK_SET.copy() - resource_data['scopeUris'] = ['fake'] - resource_data['uri'] = 'rest/network-sets/fake' + resource_data["scopeUris"] = ["fake"] + resource_data["uri"] = "rest/network-sets/fake" self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() - patch_return['scopeUris'] = ['test'] + patch_return["scopeUris"] = ["test"] self.resource.patch.return_value = patch_return NetworkSetModule().run() - self.resource.patch.assert_called_once_with('rest/network-sets/fake', - operation='replace', - path='/scopeUris', - value=['test']) + self.resource.patch.assert_called_once_with( + "rest/network-sets/fake", operation="replace", path="/scopeUris", value=["test"] + ) self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - ansible_facts=dict(network_set=patch_return), - msg=NetworkSetModule.MSG_UPDATED + changed=True, ansible_facts=dict(network_set=patch_return), msg=NetworkSetModule.MSG_UPDATED ) def test_should_do_nothing_when_scopes_are_the_same(self): params_to_scope = PARAMS_FOR_PRESENT.copy() - params_to_scope['data']['scopeUris'] = ['test'] + params_to_scope["data"]["scopeUris"] = ["test"] self.mock_ansible_module.params = params_to_scope resource_data = NETWORK_SET.copy() - resource_data['scopeUris'] = ['test'] + resource_data["scopeUris"] = ["test"] self.resource.get_by.return_value = [resource_data] NetworkSetModule().run() @@ -177,11 +156,9 @@ def test_should_do_nothing_when_scopes_are_the_same(self): self.resource.patch.not_been_called() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - ansible_facts=dict(network_set=resource_data), - msg=NetworkSetModule.MSG_ALREADY_PRESENT + changed=False, ansible_facts=dict(network_set=resource_data), msg=NetworkSetModule.MSG_ALREADY_PRESENT ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_network_set_info.py b/tests/unit/plugins/modules/test_oneview_network_set_info.py index 89ce0f23203..5732c236395 100644 --- a/tests/unit/plugins/modules/test_oneview_network_set_info.py +++ b/tests/unit/plugins/modules/test_oneview_network_set_info.py @@ -8,46 +8,31 @@ from .oneview_module_loader import NetworkSetInfoModule from .hpe_test_utils import FactsParamsTestCase -ERROR_MSG = 'Fake message error' +ERROR_MSG = "Fake message error" -PARAMS_GET_ALL = dict( - config='config.json', - name=None -) +PARAMS_GET_ALL = dict(config="config.json", name=None) -PARAMS_GET_ALL_WITHOUT_ETHERNET = dict( - config='config.json', - name=None, - options=['withoutEthernet'] -) +PARAMS_GET_ALL_WITHOUT_ETHERNET = dict(config="config.json", name=None, options=["withoutEthernet"]) -PARAMS_GET_BY_NAME = dict( - config='config.json', - name='Network Set 1' -) +PARAMS_GET_BY_NAME = dict(config="config.json", name="Network Set 1") -PARAMS_GET_BY_NAME_WITHOUT_ETHERNET = dict( - config='config.json', - name='Network Set 1', - options=['withoutEthernet'] -) +PARAMS_GET_BY_NAME_WITHOUT_ETHERNET = dict(config="config.json", name="Network Set 1", options=["withoutEthernet"]) -class NetworkSetInfoSpec(unittest.TestCase, - FactsParamsTestCase): +class NetworkSetInfoSpec(unittest.TestCase, FactsParamsTestCase): def setUp(self): self.configure_mocks(self, NetworkSetInfoModule) self.network_sets = self.mock_ov_client.network_sets FactsParamsTestCase.configure_client_mock(self, self.network_sets) def test_should_get_all_network_sets(self): - network_sets = [{ - "name": "Network Set 1", - "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc'] - }, { - "name": "Network Set 2", - "networkUris": ['/rest/ethernet-networks/ddd-eee-fff', '/rest/ethernet-networks/ggg-hhh-fff'] - }] + network_sets = [ + {"name": "Network Set 1", "networkUris": ["/rest/ethernet-networks/aaa-bbb-ccc"]}, + { + "name": "Network Set 2", + "networkUris": ["/rest/ethernet-networks/ddd-eee-fff", "/rest/ethernet-networks/ggg-hhh-fff"], + }, + ] self.network_sets.get_all.return_value = network_sets self.mock_ansible_module.params = PARAMS_GET_ALL @@ -56,18 +41,10 @@ def test_should_get_all_network_sets(self): self.network_sets.get_all.assert_called_once_with() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - network_sets=network_sets) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, network_sets=network_sets) def test_should_get_all_network_sets_without_ethernet(self): - network_sets = [{ - "name": "Network Set 1", - "networkUris": [] - }, { - "name": "Network Set 2", - "networkUris": [] - }] + network_sets = [{"name": "Network Set 1", "networkUris": []}, {"name": "Network Set 2", "networkUris": []}] self.network_sets.get_all.return_value = network_sets self.mock_ansible_module.params = PARAMS_GET_ALL @@ -76,32 +53,22 @@ def test_should_get_all_network_sets_without_ethernet(self): self.network_sets.get_all.assert_called_once_with() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - network_sets=network_sets) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, network_sets=network_sets) def test_should_get_network_set_by_name(self): - network_sets = [{ - "name": "Network Set 1", - "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc'] - }] + network_sets = [{"name": "Network Set 1", "networkUris": ["/rest/ethernet-networks/aaa-bbb-ccc"]}] self.network_sets.get_by.return_value = network_sets self.mock_ansible_module.params = PARAMS_GET_BY_NAME NetworkSetInfoModule().run() - self.network_sets.get_by.assert_called_once_with('name', 'Network Set 1') + self.network_sets.get_by.assert_called_once_with("name", "Network Set 1") - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - network_sets=network_sets) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, network_sets=network_sets) def test_should_get_network_set_by_name_without_ethernet(self): - network_sets = [{ - "name": "Network Set 1", - "networkUris": [] - }] + network_sets = [{"name": "Network Set 1", "networkUris": []}] self.network_sets.get_all_without_ethernet.return_value = network_sets self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITHOUT_ETHERNET @@ -111,10 +78,8 @@ def test_should_get_network_set_by_name_without_ethernet(self): expected_filter = "\"'name'='Network Set 1'\"" self.network_sets.get_all_without_ethernet.assert_called_once_with(filter=expected_filter) - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - network_sets=network_sets) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, network_sets=network_sets) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_san_manager.py b/tests/unit/plugins/modules/test_oneview_san_manager.py index cf3ec44b966..5d91db70cb3 100644 --- a/tests/unit/plugins/modules/test_oneview_san_manager.py +++ b/tests/unit/plugins/modules/test_oneview_san_manager.py @@ -11,51 +11,40 @@ from .oneview_module_loader import SanManagerModule from .hpe_test_utils import OneViewBaseTestCase -FAKE_MSG_ERROR = 'Fake message error' +FAKE_MSG_ERROR = "Fake message error" DEFAULT_SAN_MANAGER_TEMPLATE = dict( - name='172.18.15.1', - providerDisplayName='Brocade Network Advisor', - uri='/rest/fc-sans/device-managers/UUU-AAA-BBB', - refreshState='OK', + name="172.18.15.1", + providerDisplayName="Brocade Network Advisor", + uri="/rest/fc-sans/device-managers/UUU-AAA-BBB", + refreshState="OK", connectionInfo=[ { - 'valueFormat': 'IPAddressOrHostname', - 'displayName': 'Host', - 'name': 'Host', - 'valueType': 'String', - 'required': False, - 'value': '172.18.15.1' - }] + "valueFormat": "IPAddressOrHostname", + "displayName": "Host", + "name": "Host", + "valueType": "String", + "required": False, + "value": "172.18.15.1", + } + ], ) -class SanManagerModuleSpec(unittest.TestCase, - OneViewBaseTestCase): - PARAMS_FOR_PRESENT = dict( - config='config.json', - state='present', - data=DEFAULT_SAN_MANAGER_TEMPLATE - ) +class SanManagerModuleSpec(unittest.TestCase, OneViewBaseTestCase): + PARAMS_FOR_PRESENT = dict(config="config.json", state="present", data=DEFAULT_SAN_MANAGER_TEMPLATE) PARAMS_FOR_CONNECTION_INFORMATION_SET = dict( - config='config.json', - state='connection_information_set', - data=DEFAULT_SAN_MANAGER_TEMPLATE.copy() + config="config.json", state="connection_information_set", data=DEFAULT_SAN_MANAGER_TEMPLATE.copy() ) PARAMS_WITH_CHANGES = dict( - config='config.json', - state='present', - data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name'], - refreshState='RefreshPending') + config="config.json", + state="present", + data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE["name"], refreshState="RefreshPending"), ) - PARAMS_FOR_ABSENT = dict( - config='config.json', - state='absent', - data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name']) - ) + PARAMS_FOR_ABSENT = dict(config="config.json", state="absent", data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE["name"])) def setUp(self): self.configure_mocks(self, SanManagerModule) @@ -63,7 +52,7 @@ def setUp(self): def test_should_add_new_san_manager(self): self.resource.get_by_name.return_value = [] - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT @@ -71,45 +60,41 @@ def test_should_add_new_san_manager(self): SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=SanManagerModule.MSG_CREATED, - ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE) + changed=True, msg=SanManagerModule.MSG_CREATED, ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE) ) def test_should_find_provider_uri_to_add(self): self.resource.get_by_name.return_value = [] - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT SanManagerModule().run() - provider_display_name = DEFAULT_SAN_MANAGER_TEMPLATE['providerDisplayName'] + provider_display_name = DEFAULT_SAN_MANAGER_TEMPLATE["providerDisplayName"] self.resource.get_provider_uri.assert_called_once_with(provider_display_name) def test_should_not_update_when_data_is_equals(self): output_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - output_data.pop('connectionInfo') + output_data.pop("connectionInfo") self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=SanManagerModule.MSG_ALREADY_PRESENT, - ansible_facts=dict(san_manager=output_data) + changed=False, msg=SanManagerModule.MSG_ALREADY_PRESENT, ansible_facts=dict(san_manager=output_data) ) def test_update_when_data_has_modified_attributes(self): data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - data_merged['fabricType'] = 'DirectAttach' + data_merged["fabricType"] = "DirectAttach" self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.update.return_value = data_merged self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES @@ -117,39 +102,34 @@ def test_update_when_data_has_modified_attributes(self): SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=SanManagerModule.MSG_UPDATED, - ansible_facts=dict(san_manager=data_merged) + changed=True, msg=SanManagerModule.MSG_UPDATED, ansible_facts=dict(san_manager=data_merged) ) def test_update_should_not_send_connection_info_when_not_informed_on_data(self): merged_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - merged_data['refreshState'] = 'RefreshPending' + merged_data["refreshState"] = "RefreshPending" output_data = deepcopy(merged_data) - output_data.pop('connectionInfo') + output_data.pop("connectionInfo") self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.update.return_value = merged_data self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES SanManagerModule().run() - self.resource.update.assert_called_once_with(resource=output_data, id_or_uri=output_data['uri']) + self.resource.update.assert_called_once_with(resource=output_data, id_or_uri=output_data["uri"]) def test_should_remove_san_manager(self): self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.mock_ansible_module.params = self.PARAMS_FOR_ABSENT.copy() SanManagerModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=SanManagerModule.MSG_DELETED - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=True, msg=SanManagerModule.MSG_DELETED) def test_should_do_nothing_when_san_manager_not_exist(self): self.resource.get_by_name.return_value = [] @@ -159,8 +139,7 @@ def test_should_do_nothing_when_san_manager_not_exist(self): SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - msg=SanManagerModule.MSG_ALREADY_ABSENT + changed=False, msg=SanManagerModule.MSG_ALREADY_ABSENT ) def test_should_fail_when_name_not_found(self): @@ -172,14 +151,13 @@ def test_should_fail_when_name_not_found(self): SanManagerModule().run() self.mock_ansible_module.fail_json.assert_called_once_with( - exception=mock.ANY, - msg="The provider 'Brocade Network Advisor' was not found." + exception=mock.ANY, msg="The provider 'Brocade Network Advisor' was not found." ) def test_should_fail_when_name_and_hosts_in_connectionInfo_missing(self): bad_params = deepcopy(self.PARAMS_FOR_PRESENT) - bad_params['data'].pop('name') - bad_params['data'].pop('connectionInfo') + bad_params["data"].pop("name") + bad_params["data"].pop("connectionInfo") self.mock_ansible_module.params = bad_params @@ -192,10 +170,10 @@ def test_should_fail_when_name_and_hosts_in_connectionInfo_missing(self): def test_connection_information_set_should_set_the_connection_information(self): data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE) - data_merged['fabricType'] = 'DirectAttach' + data_merged["fabricType"] = "DirectAttach" self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.update.return_value = data_merged self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET @@ -203,14 +181,12 @@ def test_connection_information_set_should_set_the_connection_information(self): SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=SanManagerModule.MSG_UPDATED, - ansible_facts=dict(san_manager=data_merged) + changed=True, msg=SanManagerModule.MSG_UPDATED, ansible_facts=dict(san_manager=data_merged) ) def test_should_add_new_san_manager_when_connection_information_set_called_without_resource(self): self.resource.get_by_name.return_value = [] - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET @@ -218,27 +194,25 @@ def test_should_add_new_san_manager_when_connection_information_set_called_witho SanManagerModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=True, - msg=SanManagerModule.MSG_CREATED, - ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE) + changed=True, msg=SanManagerModule.MSG_CREATED, ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE) ) def test_should_fail_when_required_attribute_missing(self): bad_params = deepcopy(self.PARAMS_FOR_CONNECTION_INFORMATION_SET) - bad_params['data'] = self.PARAMS_FOR_CONNECTION_INFORMATION_SET['data'].copy() - bad_params['data'].pop('connectionInfo') + bad_params["data"] = self.PARAMS_FOR_CONNECTION_INFORMATION_SET["data"].copy() + bad_params["data"].pop("connectionInfo") self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE - self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers' + self.resource.get_provider_uri.return_value = "/rest/fc-sans/providers/123/device-managers" self.mock_ansible_module.params = bad_params SanManagerModule().run() - msg = 'A connectionInfo field is required for this operation.' + msg = "A connectionInfo field is required for this operation." self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=msg) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_oneview_san_manager_info.py b/tests/unit/plugins/modules/test_oneview_san_manager_info.py index e3287d11ea7..ed15d764d81 100644 --- a/tests/unit/plugins/modules/test_oneview_san_manager_info.py +++ b/tests/unit/plugins/modules/test_oneview_san_manager_info.py @@ -10,22 +10,18 @@ class SanManagerInfoSpec(unittest.TestCase, FactsParamsTestCase): - ERROR_MSG = 'Fake message error' + ERROR_MSG = "Fake message error" - PARAMS_GET_ALL = dict( - config='config.json', - provider_display_name=None - ) + PARAMS_GET_ALL = dict(config="config.json", provider_display_name=None) - PARAMS_GET_BY_PROVIDER_DISPLAY_NAME = dict( - config='config.json', - provider_display_name="Brocade Network Advisor" - ) + PARAMS_GET_BY_PROVIDER_DISPLAY_NAME = dict(config="config.json", provider_display_name="Brocade Network Advisor") - PRESENT_SAN_MANAGERS = [{ - "providerDisplayName": "Brocade Network Advisor", - "uri": "/rest/fc-sans/device-managers//d60efc8a-15b8-470c-8470-738d16d6b319" - }] + PRESENT_SAN_MANAGERS = [ + { + "providerDisplayName": "Brocade Network Advisor", + "uri": "/rest/fc-sans/device-managers//d60efc8a-15b8-470c-8470-738d16d6b319", + } + ] def setUp(self): self.configure_mocks(self, SanManagerInfoModule) @@ -40,8 +36,7 @@ def test_should_get_all(self): SanManagerInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - san_managers=self.PRESENT_SAN_MANAGERS + changed=False, san_managers=self.PRESENT_SAN_MANAGERS ) def test_should_get_by_display_name(self): @@ -51,8 +46,7 @@ def test_should_get_by_display_name(self): SanManagerInfoModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - san_managers=self.PRESENT_SAN_MANAGERS + changed=False, san_managers=self.PRESENT_SAN_MANAGERS ) def test_should_return_empty_list_when_get_by_display_name_is_null(self): @@ -61,11 +55,8 @@ def test_should_return_empty_list_when_get_by_display_name_is_null(self): SanManagerInfoModule().run() - self.mock_ansible_module.exit_json.assert_called_once_with( - changed=False, - san_managers=[] - ) + self.mock_ansible_module.exit_json.assert_called_once_with(changed=False, san_managers=[]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/unit/plugins/modules/test_pacman.py b/tests/unit/plugins/modules/test_pacman.py index 39e5cc4fa6b..50aa0755568 100644 --- a/tests/unit/plugins/modules/test_pacman.py +++ b/tests/unit/plugins/modules/test_pacman.py @@ -44,9 +44,7 @@ def get_bin_path(self, arg, required=False): "sed": "4.8-1", "sqlite": "3.36.0-1", }, - "installed_groups": { - "base-devel": set(["gawk", "grep", "file", "findutils", "pacman", "sed", "gzip", "gettext"]) - }, + "installed_groups": {"base-devel": set(["gawk", "grep", "file", "findutils", "pacman", "sed", "gzip", "gettext"])}, "available_pkgs": { "acl": "2.3.1-1", "amd-ucode": "20211027.1d00989-1", @@ -164,9 +162,7 @@ def test_fail(self, mock_empty_inventory): with set_module_args({"update_cache": True}): P = pacman.Pacman(pacman.setup_module()) - args = dict( - msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1 - ) + args = dict(msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1) with pytest.raises(AnsibleFailJson) as e: P.fail(**args) @@ -369,32 +365,44 @@ def test_update_db_check(self, mock_empty_inventory): ( {}, [ - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''), - (["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'), - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'b\na\nc', ''), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "a\nb\nc", ""), + (["pacman", "--sync", "--refresh"], {"check_rc": False}, 0, "stdout", "stderr"), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "b\na\nc", ""), ], False, ), ( {"force": True}, [ - (["pacman", "--sync", "--refresh", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'), + (["pacman", "--sync", "--refresh", "--refresh"], {"check_rc": False}, 0, "stdout", "stderr"), ], True, ), ( {"update_cache_extra_args": "--some-extra args"}, # shlex test [ - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''), - (["pacman", "--sync", "--refresh", "--some-extra", "args"], {'check_rc': False}, 0, 'stdout', 'stderr'), - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a changed\nb\nc', ''), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "a\nb\nc", ""), + ( + ["pacman", "--sync", "--refresh", "--some-extra", "args"], + {"check_rc": False}, + 0, + "stdout", + "stderr", + ), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "a changed\nb\nc", ""), ], True, ), ( {"force": True, "update_cache_extra_args": "--some-extra args"}, [ - (["pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'), + ( + ["pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh"], + {"check_rc": False}, + 0, + "stdout", + "stderr", + ), ], True, ), @@ -402,16 +410,16 @@ def test_update_db_check(self, mock_empty_inventory): # Test whether pacman --sync --list is not called more than twice {"upgrade": True}, [ - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''), - (["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'), - (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "core foo 1.0.0-1 [installed]", ""), + (["pacman", "--sync", "--refresh"], {"check_rc": False}, 0, "stdout", "stderr"), + (["pacman", "--sync", "--list"], {"check_rc": True}, 0, "core foo 1.0.0-1 [installed]", ""), # The following is _build_inventory: - (["pacman", "--query"], {'check_rc': True}, 0, 'foo 1.0.0-1', ''), - (["pacman", "--query", "--groups"], {'check_rc': True}, 0, '', ''), - (["pacman", "--sync", "--groups", "--groups"], {'check_rc': True}, 0, '', ''), - (["pacman", "--query", "--upgrades"], {'check_rc': False}, 0, '', ''), - (["pacman", "--query", "--explicit"], {'check_rc': True}, 0, 'foo 1.0.0-1', ''), - (["pacman", "--query", "--deps"], {'check_rc': True}, 0, '', ''), + (["pacman", "--query"], {"check_rc": True}, 0, "foo 1.0.0-1", ""), + (["pacman", "--query", "--groups"], {"check_rc": True}, 0, "", ""), + (["pacman", "--sync", "--groups", "--groups"], {"check_rc": True}, 0, "", ""), + (["pacman", "--query", "--upgrades"], {"check_rc": False}, 0, "", ""), + (["pacman", "--query", "--explicit"], {"check_rc": True}, 0, "foo 1.0.0-1", ""), + (["pacman", "--query", "--deps"], {"check_rc": True}, 0, "", ""), ], False, ), @@ -421,7 +429,6 @@ def test_update_db(self, module_args, expected_calls, changed): args = {"update_cache": True} args.update(module_args) with set_module_args(args): - self.mock_run_command.side_effect = [ (rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls ] @@ -429,9 +436,12 @@ def test_update_db(self, module_args, expected_calls, changed): P = pacman.Pacman(pacman.setup_module()) P.run() - self.mock_run_command.assert_has_calls([ - mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls - ]) + self.mock_run_command.assert_has_calls( + [ + mock.call(mock.ANY, expected_call, **kwargs) + for expected_call, kwargs, rc, stdout, stderr in expected_calls + ] + ) out = e.value.args[0] assert out["cache_updated"] == changed assert out["changed"] == changed @@ -474,7 +484,6 @@ def test_upgrade(self, mock_valid_inventory, check_mode_value, run_command_data, if upgrade_extra_args: args["upgrade_extra_args"] = upgrade_extra_args with set_module_args(args): - if run_command_data and "return_value" in run_command_data: self.mock_run_command.return_value = run_command_data["return_value"] @@ -628,9 +637,7 @@ def test_upgrade_fail(self, mock_valid_inventory): ), ], ) - def test_package_list( - self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises - ): + def test_package_list(self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises): with set_module_args({"name": pkg_names, "state": state}): P = pacman.Pacman(pacman.setup_module()) P.inventory = P._build_inventory() diff --git a/tests/unit/plugins/modules/test_pacman_key.py b/tests/unit/plugins/modules/test_pacman_key.py index 550336c8b75..256e1cbacbb 100644 --- a/tests/unit/plugins/modules/test_pacman_key.py +++ b/tests/unit/plugins/modules/test_pacman_key.py @@ -10,11 +10,11 @@ import json # path used for mocking get_bin_path() -MOCK_BIN_PATH = '/mocked/path' +MOCK_BIN_PATH = "/mocked/path" # Key ID used for tests -TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353' -TESTING_KEYFILE_PATH = '/tmp/pubkey.asc' +TESTING_KEYID = "14F26682D0916CDD81E37B6D61B7B526D98F0353" +TESTING_KEYFILE_PATH = "/tmp/pubkey.asc" # gpg --{show,list}-key output (key present, but expired) GPG_SHOWKEY_OUTPUT_EXPIRED = """ @@ -90,60 +90,60 @@ # expected command for gpg --list-keys KEYID RUN_CMD_LISTKEYS = [ MOCK_BIN_PATH, - '--homedir=/etc/pacman.d/gnupg', - '--no-permission-warning', - '--with-colons', - '--quiet', - '--batch', - '--no-tty', - '--no-default-keyring', - '--list-keys', + "--homedir=/etc/pacman.d/gnupg", + "--no-permission-warning", + "--with-colons", + "--quiet", + "--batch", + "--no-tty", + "--no-default-keyring", + "--list-keys", TESTING_KEYID, ] # expected command for gpg --show-keys KEYFILE RUN_CMD_SHOW_KEYFILE = [ MOCK_BIN_PATH, - '--no-permission-warning', - '--with-colons', - '--quiet', - '--batch', - '--no-tty', - '--with-fingerprint', - '--show-keys', + "--no-permission-warning", + "--with-colons", + "--quiet", + "--batch", + "--no-tty", + "--with-fingerprint", + "--show-keys", TESTING_KEYFILE_PATH, ] # expected command for pacman-key --lsign-key KEYID RUN_CMD_LSIGN_KEY = [ MOCK_BIN_PATH, - '--gpgdir', - '/etc/pacman.d/gnupg', - '--lsign-key', + "--gpgdir", + "/etc/pacman.d/gnupg", + "--lsign-key", TESTING_KEYID, ] RUN_CMD_LIST_SECRET_KEY = [ MOCK_BIN_PATH, - '--homedir=/etc/pacman.d/gnupg', - '--no-permission-warning', - '--with-colons', - '--quiet', - '--batch', - '--no-tty', - '--list-secret-key', + "--homedir=/etc/pacman.d/gnupg", + "--no-permission-warning", + "--with-colons", + "--quiet", + "--batch", + "--no-tty", + "--list-secret-key", ] # expected command for gpg --check-signatures RUN_CMD_CHECK_SIGNATURES = [ MOCK_BIN_PATH, - '--homedir=/etc/pacman.d/gnupg', - '--no-permission-warning', - '--with-colons', - '--quiet', - '--batch', - '--no-tty', - '--check-signatures', + "--homedir=/etc/pacman.d/gnupg", + "--no-permission-warning", + "--with-colons", + "--quiet", + "--batch", + "--no-tty", + "--check-signatures", TESTING_KEYID, ] @@ -154,60 +154,60 @@ # state: present, id: absent [ { - 'state': 'present', + "state": "present", }, { - 'id': 'param_missing_id', - 'msg': 'missing required arguments: id', - 'failed': True, + "id": "param_missing_id", + "msg": "missing required arguments: id", + "failed": True, }, ], # state: present, required parameters: missing [ { - 'state': 'present', - 'id': '0xDOESNTMATTER', + "state": "present", + "id": "0xDOESNTMATTER", }, { - 'id': 'param_missing_method', - 'msg': 'state is present but any of the following are missing: data, file, url, keyserver', - 'failed': True, + "id": "param_missing_method", + "msg": "state is present but any of the following are missing: data, file, url, keyserver", + "failed": True, }, ], # state: present, id: invalid (not full-length) [ { - 'id': '0xDOESNTMATTER', - 'data': 'FAKEDATA', + "id": "0xDOESNTMATTER", + "data": "FAKEDATA", }, { - 'id': 'param_id_not_full', - 'msg': 'key ID is not full-length: DOESNTMATTER', - 'failed': True, + "id": "param_id_not_full", + "msg": "key ID is not full-length: DOESNTMATTER", + "failed": True, }, ], # state: present, id: invalid (not hexadecimal) [ { - 'state': 'present', - 'id': '01234567890ABCDE01234567890ABCDE1234567M', - 'data': 'FAKEDATA', + "state": "present", + "id": "01234567890ABCDE01234567890ABCDE1234567M", + "data": "FAKEDATA", }, { - 'id': 'param_id_not_hex', - 'msg': 'key ID is not hexadecimal: 01234567890ABCDE01234567890ABCDE1234567M', - 'failed': True, + "id": "param_id_not_hex", + "msg": "key ID is not hexadecimal: 01234567890ABCDE01234567890ABCDE1234567M", + "failed": True, }, ], # state: absent, id: absent [ { - 'state': 'absent', + "state": "absent", }, { - 'id': 'param_absent_state_missing_id', - 'msg': 'missing required arguments: id', - 'failed': True, + "id": "param_absent_state_missing_id", + "msg": "missing required arguments: id", + "failed": True, }, ], # @@ -216,95 +216,95 @@ # state & key present [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'data': 'FAKEDATA', - '_ansible_check_mode': True, + "state": "present", + "id": TESTING_KEYID, + "data": "FAKEDATA", + "_ansible_check_mode": True, }, { - 'id': 'checkmode_state_and_key_present', - 'run_command.calls': [ + "id": "checkmode_state_and_key_present", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ], - 'changed': False, + "changed": False, }, ], # state present, key absent [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'data': 'FAKEDATA', - '_ansible_check_mode': True, + "state": "present", + "id": TESTING_KEYID, + "data": "FAKEDATA", + "_ansible_check_mode": True, }, { - 'id': 'checkmode_state_present_key_absent', - 'run_command.calls': [ + "id": "checkmode_state_present_key_absent", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ], - 'changed': True, + "changed": True, }, ], # state & key absent [ { - 'state': 'absent', - 'id': TESTING_KEYID, - '_ansible_check_mode': True, + "state": "absent", + "id": TESTING_KEYID, + "_ansible_check_mode": True, }, { - 'id': 'checkmode_state_and_key_absent', - 'run_command.calls': [ + "id": "checkmode_state_and_key_absent", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ], - 'changed': False, + "changed": False, }, ], # state absent, key present [ { - 'state': 'absent', - 'id': TESTING_KEYID, - '_ansible_check_mode': True, + "state": "absent", + "id": TESTING_KEYID, + "_ansible_check_mode": True, }, { - 'id': 'check_mode_state_absent_key_present', - 'run_command.calls': [ + "id": "check_mode_state_absent_key_present", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ], - 'changed': True, + "changed": True, }, ], # @@ -313,74 +313,74 @@ # state & key present [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'data': 'FAKEDATA', + "state": "present", + "id": TESTING_KEYID, + "data": "FAKEDATA", }, { - 'id': 'state_and_key_present', - 'run_command.calls': [ + "id": "state_and_key_present", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ], - 'changed': False, + "changed": False, }, ], # state present, ensure_trusted & key expired [ { - 'state': 'present', - 'ensure_trusted': True, - 'id': TESTING_KEYID, - 'data': 'FAKEDATA', - '_ansible_check_mode': True, + "state": "present", + "ensure_trusted": True, + "id": TESTING_KEYID, + "data": "FAKEDATA", + "_ansible_check_mode": True, }, { - 'id': 'state_present_trusted_key_expired', - 'run_command.calls': [ + "id": "state_present_trusted_key_expired", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, { - 'check_rc': False, + "check_rc": False, }, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ], - 'changed': True, + "changed": True, }, ], # state present & key trusted [ { - 'state': 'present', - 'ensure_trusted': True, - 'id': TESTING_KEYID, - 'data': 'FAKEDATA', - '_ansible_check_mode': True, + "state": "present", + "ensure_trusted": True, + "id": TESTING_KEYID, + "data": "FAKEDATA", + "_ansible_check_mode": True, }, { - 'id': 'state_present_and_key_trusted', - 'run_command.calls': [ + "id": "state_present_and_key_trusted", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, { - 'check_rc': False, + "check_rc": False, }, ( 0, GPG_SHOWKEY_OUTPUT_TRUSTED, - '', + "", ), ), ( @@ -389,7 +389,7 @@ ( 0, GPG_CHECK_SIGNATURES_OUTPUT, - '', + "", ), ), ( @@ -398,273 +398,273 @@ ( 0, GPG_LIST_SECRET_KEY_OUTPUT, - '', + "", ), ), ], - 'changed': False, + "changed": False, }, ], # state absent, key present [ { - 'state': 'absent', - 'id': TESTING_KEYID, + "state": "absent", + "id": TESTING_KEYID, }, { - 'id': 'state_absent_key_present', - 'run_command.calls': [ + "id": "state_absent_key_present", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ( [ MOCK_BIN_PATH, - '--gpgdir', - '/etc/pacman.d/gnupg', - '--delete', + "--gpgdir", + "/etc/pacman.d/gnupg", + "--delete", TESTING_KEYID, ], - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ], - 'changed': True, + "changed": True, }, ], # state & key absent [ { - 'state': 'absent', - 'id': TESTING_KEYID, + "state": "absent", + "id": TESTING_KEYID, }, { - 'id': 'state_and_key_absent', - 'run_command.calls': [ + "id": "state_and_key_absent", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ], - 'changed': False, + "changed": False, }, ], # state: present, key: absent, method: file [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'file': TESTING_KEYFILE_PATH, + "state": "present", + "id": TESTING_KEYID, + "file": TESTING_KEYFILE_PATH, }, { - 'id': 'state_present_key_absent_method_file', - 'run_command.calls': [ + "id": "state_present_key_absent_method_file", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ( RUN_CMD_SHOW_KEYFILE, - {'check_rc': True}, + {"check_rc": True}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ( [ MOCK_BIN_PATH, - '--gpgdir', - '/etc/pacman.d/gnupg', - '--add', - '/tmp/pubkey.asc', + "--gpgdir", + "/etc/pacman.d/gnupg", + "--add", + "/tmp/pubkey.asc", ], - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ( RUN_CMD_LSIGN_KEY, - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ], - 'changed': True, + "changed": True, }, ], # state: present, key: absent, method: file # failure: keyid & keyfile don't match [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'file': TESTING_KEYFILE_PATH, + "state": "present", + "id": TESTING_KEYID, + "file": TESTING_KEYFILE_PATH, }, { - 'id': 'state_present_key_absent_verify_failed', - 'msg': 'key ID does not match. expected 14F26682D0916CDD81E37B6D61B7B526D98F0353, got 14F26682D0916CDD81E37B6D61B7B526D98F0354', - 'run_command.calls': [ + "id": "state_present_key_absent_verify_failed", + "msg": "key ID does not match. expected 14F26682D0916CDD81E37B6D61B7B526D98F0353, got 14F26682D0916CDD81E37B6D61B7B526D98F0354", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ( RUN_CMD_SHOW_KEYFILE, - {'check_rc': True}, + {"check_rc": True}, ( 0, - GPG_SHOWKEY_OUTPUT_EXPIRED.replace('61B7B526D98F0353', '61B7B526D98F0354'), - '', + GPG_SHOWKEY_OUTPUT_EXPIRED.replace("61B7B526D98F0353", "61B7B526D98F0354"), + "", ), ), ], - 'failed': True, + "failed": True, }, ], # state: present, key: absent, method: keyserver [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'keyserver': 'pgp.mit.edu', + "state": "present", + "id": TESTING_KEYID, + "keyserver": "pgp.mit.edu", }, { - 'id': 'state_present_key_absent_method_keyserver', - 'run_command.calls': [ + "id": "state_present_key_absent_method_keyserver", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ( [ MOCK_BIN_PATH, - '--gpgdir', - '/etc/pacman.d/gnupg', - '--keyserver', - 'pgp.mit.edu', - '--recv-keys', + "--gpgdir", + "/etc/pacman.d/gnupg", + "--keyserver", + "pgp.mit.edu", + "--recv-keys", TESTING_KEYID, ], - {'check_rc': True}, + {"check_rc": True}, ( 0, - ''' + """ gpg: key 0x61B7B526D98F0353: 32 signatures not checked due to missing keys gpg: key 0x61B7B526D98F0353: public key "Mozilla Software Releases " imported gpg: marginals needed: 3 completes needed: 1 trust model: pgp gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u gpg: Total number processed: 1 gpg: imported: 1 -''', - '', +""", + "", ), ), ( RUN_CMD_LSIGN_KEY, - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ], - 'changed': True, + "changed": True, }, ], # state: present, key: absent, method: data [ { - 'state': 'present', - 'id': TESTING_KEYID, - 'data': 'PGP_DATA', + "state": "present", + "id": TESTING_KEYID, + "data": "PGP_DATA", }, { - 'id': 'state_present_key_absent_method_data', - 'run_command.calls': [ + "id": "state_present_key_absent_method_data", + "run_command.calls": [ ( RUN_CMD_LISTKEYS, - {'check_rc': False}, + {"check_rc": False}, ( 2, - '', + "", GPG_NOKEY_OUTPUT, ), ), ( RUN_CMD_SHOW_KEYFILE, - {'check_rc': True}, + {"check_rc": True}, ( 0, GPG_SHOWKEY_OUTPUT_EXPIRED, - '', + "", ), ), ( [ MOCK_BIN_PATH, - '--gpgdir', - '/etc/pacman.d/gnupg', - '--add', - '/tmp/pubkey.asc', + "--gpgdir", + "/etc/pacman.d/gnupg", + "--add", + "/tmp/pubkey.asc", ], - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ( RUN_CMD_LSIGN_KEY, - {'check_rc': True}, + {"check_rc": True}, ( 0, PACMAN_KEY_SUCCESS, - '', + "", ), ), ], - 'save_key_output': TESTING_KEYFILE_PATH, - 'changed': True, + "save_key_output": TESTING_KEYFILE_PATH, + "changed": True, }, ], ] @@ -674,33 +674,33 @@ def patch_get_bin_path(mocker): get_bin_path = mocker.patch.object( AnsibleModule, - 'get_bin_path', + "get_bin_path", return_value=MOCK_BIN_PATH, ) @pytest.mark.parametrize( - 'patch_ansible_module, expected', + "patch_ansible_module, expected", TESTCASES, - ids=[item[1]['id'] for item in TESTCASES], # type: ignore - indirect=['patch_ansible_module'] + ids=[item[1]["id"] for item in TESTCASES], # type: ignore + indirect=["patch_ansible_module"], ) -@pytest.mark.usefixtures('patch_ansible_module') +@pytest.mark.usefixtures("patch_ansible_module") def test_operation(mocker, capfd, patch_get_bin_path, expected): # patch run_command invocations with mock data - if 'run_command.calls' in expected: + if "run_command.calls" in expected: mock_run_command = mocker.patch.object( AnsibleModule, - 'run_command', - side_effect=[item[2] for item in expected['run_command.calls']], + "run_command", + side_effect=[item[2] for item in expected["run_command.calls"]], ) # patch save_key invocations with mock data - if 'save_key_output' in expected: + if "save_key_output" in expected: mock_save_key = mocker.patch.object( pacman_key.PacmanKey, - 'save_key', - return_value=expected['save_key_output'], + "save_key", + return_value=expected["save_key_output"], ) # invoke module @@ -712,15 +712,15 @@ def test_operation(mocker, capfd, patch_get_bin_path, expected): results = json.loads(out) # assertion time! - if 'msg' in expected: - assert results['msg'] == expected['msg'] - if 'changed' in expected: - assert results['changed'] == expected['changed'] - if 'failed' in expected: - assert results['failed'] == expected['failed'] + if "msg" in expected: + assert results["msg"] == expected["msg"] + if "changed" in expected: + assert results["changed"] == expected["changed"] + if "failed" in expected: + assert results["failed"] == expected["failed"] - if 'run_command.calls' in expected: - assert AnsibleModule.run_command.call_count == len(expected['run_command.calls']) + if "run_command.calls" in expected: + assert AnsibleModule.run_command.call_count == len(expected["run_command.calls"]) call_args_list = [(item[0][0], item[1]) for item in AnsibleModule.run_command.call_args_list] - expected_call_args_list = [(item[0], item[1]) for item in expected['run_command.calls']] + expected_call_args_list = [(item[0], item[1]) for item in expected["run_command.calls"]] assert call_args_list == expected_call_args_list diff --git a/tests/unit/plugins/modules/test_pagerduty.py b/tests/unit/plugins/modules/test_pagerduty.py index 0fa8ed5c861..cc101b57745 100644 --- a/tests/unit/plugins/modules/test_pagerduty.py +++ b/tests/unit/plugins/modules/test_pagerduty.py @@ -12,88 +12,81 @@ class PagerDutyTest(unittest.TestCase): def setUp(self): - self.pd = pagerduty.PagerDutyRequest(module=pagerduty, name='name', user='user', token='token') + self.pd = pagerduty.PagerDutyRequest(module=pagerduty, name="name", user="user", token="token") def _assert_ongoing_maintenance_windows(self, module, url, headers): - self.assertEqual('https://api.pagerduty.com/maintenance_windows?filter=ongoing', url) - return object(), {'status': 200} + self.assertEqual("https://api.pagerduty.com/maintenance_windows?filter=ongoing", url) + return object(), {"status": 200} def _assert_ongoing_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None): self.assertEqual( - 'application/vnd.pagerduty+json;version=2', - headers.get('Accept'), - 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found' + "application/vnd.pagerduty+json;version=2", + headers.get("Accept"), + "Accept:application/vnd.pagerduty+json;version=2 HTTP header not found", ) - return object(), {'status': 200} + return object(), {"status": 200} def _assert_create_a_maintenance_window_url(self, module, url, headers, data=None, method=None): - self.assertEqual('https://api.pagerduty.com/maintenance_windows', url) - return object(), {'status': 201} + self.assertEqual("https://api.pagerduty.com/maintenance_windows", url) + return object(), {"status": 201} def _assert_create_a_maintenance_window_http_method(self, module, url, headers, data=None, method=None): - self.assertEqual('POST', method) - return object(), {'status': 201} + self.assertEqual("POST", method) + return object(), {"status": 201} def _assert_create_a_maintenance_window_from_header(self, module, url, headers, data=None, method=None): - self.assertEqual( - 'requester_id', - headers.get('From'), - 'From:requester_id HTTP header not found' - ) - return object(), {'status': 201} + self.assertEqual("requester_id", headers.get("From"), "From:requester_id HTTP header not found") + return object(), {"status": 201} def _assert_create_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None): self.assertEqual( - 'application/vnd.pagerduty+json;version=2', - headers.get('Accept'), - 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found' + "application/vnd.pagerduty+json;version=2", + headers.get("Accept"), + "Accept:application/vnd.pagerduty+json;version=2 HTTP header not found", ) - return object(), {'status': 201} + return object(), {"status": 201} def _assert_create_window_payload(self, module, url, headers, data=None, method=None): payload = json.loads(data) - window_data = payload['maintenance_window'] - self.assertTrue('start_time' in window_data, '"start_time" is required attribute') - self.assertTrue('end_time' in window_data, '"end_time" is required attribute') - self.assertTrue('services' in window_data, '"services" is required attribute') - return object(), {'status': 201} + window_data = payload["maintenance_window"] + self.assertTrue("start_time" in window_data, '"start_time" is required attribute') + self.assertTrue("end_time" in window_data, '"end_time" is required attribute') + self.assertTrue("services" in window_data, '"services" is required attribute') + return object(), {"status": 201} def _assert_create_window_single_service(self, module, url, headers, data=None, method=None): payload = json.loads(data) - window_data = payload['maintenance_window'] - services = window_data['services'] - self.assertEqual( - [{'id': 'service_id', 'type': 'service_reference'}], - services - ) - return object(), {'status': 201} + window_data = payload["maintenance_window"] + services = window_data["services"] + self.assertEqual([{"id": "service_id", "type": "service_reference"}], services) + return object(), {"status": 201} def _assert_create_window_multiple_service(self, module, url, headers, data=None, method=None): payload = json.loads(data) - window_data = payload['maintenance_window'] - services = window_data['services'] + window_data = payload["maintenance_window"] + services = window_data["services"] print(services) self.assertEqual( [ - {'id': 'service_id_1', 'type': 'service_reference'}, - {'id': 'service_id_2', 'type': 'service_reference'}, - {'id': 'service_id_3', 'type': 'service_reference'}, + {"id": "service_id_1", "type": "service_reference"}, + {"id": "service_id_2", "type": "service_reference"}, + {"id": "service_id_3", "type": "service_reference"}, ], - services + services, ) - return object(), {'status': 201} + return object(), {"status": 201} def _assert_absent_maintenance_window_url(self, module, url, headers, method=None): - self.assertEqual('https://api.pagerduty.com/maintenance_windows/window_id', url) - return object(), {'status': 204} + self.assertEqual("https://api.pagerduty.com/maintenance_windows/window_id", url) + return object(), {"status": 204} def _assert_absent_window_with_v1_compatible_header(self, module, url, headers, method=None): self.assertEqual( - 'application/vnd.pagerduty+json;version=2', - headers.get('Accept'), - 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found' + "application/vnd.pagerduty+json;version=2", + headers.get("Accept"), + "Accept:application/vnd.pagerduty+json;version=2 HTTP header not found", ) - return object(), {'status': 204} + return object(), {"status": 204} def test_ongoing_maintenance_windos_url(self): self.pd.ongoing(http_call=self._assert_ongoing_maintenance_windows) @@ -102,28 +95,41 @@ def test_ongoing_maintenance_windos_compatibility_header(self): self.pd.ongoing(http_call=self._assert_ongoing_window_with_v1_compatible_header) def test_create_maintenance_window_url(self): - self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_url) + self.pd.create("requester_id", "service", 1, 0, "desc", http_call=self._assert_create_a_maintenance_window_url) def test_create_maintenance_window_http_method(self): - self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_http_method) + self.pd.create( + "requester_id", "service", 1, 0, "desc", http_call=self._assert_create_a_maintenance_window_http_method + ) def test_create_maintenance_from_header(self): - self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_from_header) + self.pd.create( + "requester_id", "service", 1, 0, "desc", http_call=self._assert_create_a_maintenance_window_from_header + ) def test_create_maintenance_compatibility_header(self): - self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_with_v1_compatible_header) + self.pd.create( + "requester_id", "service", 1, 0, "desc", http_call=self._assert_create_window_with_v1_compatible_header + ) def test_create_maintenance_request_payload(self): - self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_payload) + self.pd.create("requester_id", "service", 1, 0, "desc", http_call=self._assert_create_window_payload) def test_create_maintenance_for_single_service(self): - self.pd.create('requester_id', 'service_id', 1, 0, 'desc', http_call=self._assert_create_window_single_service) + self.pd.create("requester_id", "service_id", 1, 0, "desc", http_call=self._assert_create_window_single_service) def test_create_maintenance_for_multiple_services(self): - self.pd.create('requester_id', ['service_id_1', 'service_id_2', 'service_id_3'], 1, 0, 'desc', http_call=self._assert_create_window_multiple_service) + self.pd.create( + "requester_id", + ["service_id_1", "service_id_2", "service_id_3"], + 1, + 0, + "desc", + http_call=self._assert_create_window_multiple_service, + ) def test_absent_maintenance_window_url(self): - self.pd.absent('window_id', http_call=self._assert_absent_maintenance_window_url) + self.pd.absent("window_id", http_call=self._assert_absent_maintenance_window_url) def test_absent_maintenance_compatibility_header(self): - self.pd.absent('window_id', http_call=self._assert_absent_window_with_v1_compatible_header) + self.pd.absent("window_id", http_call=self._assert_absent_window_with_v1_compatible_header) diff --git a/tests/unit/plugins/modules/test_pagerduty_alert.py b/tests/unit/plugins/modules/test_pagerduty_alert.py index 900150cd409..2724e81b510 100644 --- a/tests/unit/plugins/modules/test_pagerduty_alert.py +++ b/tests/unit/plugins/modules/test_pagerduty_alert.py @@ -11,38 +11,60 @@ import pytest from ansible_collections.community.general.plugins.modules import pagerduty_alert -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class PagerDutyAlertsTest(unittest.TestCase): def _assert_incident_api(self, module, url, method, headers): - self.assertTrue('https://api.pagerduty.com/incidents' in url, 'url must contain REST API v2 network path') - self.assertTrue('service_ids%5B%5D=service_id' in url, 'url must contain service id to filter incidents') - self.assertTrue('sort_by=incident_number%3Adesc' in url, 'url should contain sorting parameter') - self.assertTrue('time_zone=UTC' in url, 'url should contain time zone parameter') - return Response(), {'status': 200} + self.assertTrue("https://api.pagerduty.com/incidents" in url, "url must contain REST API v2 network path") + self.assertTrue("service_ids%5B%5D=service_id" in url, "url must contain service id to filter incidents") + self.assertTrue("sort_by=incident_number%3Adesc" in url, "url should contain sorting parameter") + self.assertTrue("time_zone=UTC" in url, "url should contain time zone parameter") + return Response(), {"status": 200} def _assert_compatibility_header(self, module, url, method, headers): self.assertEqual( - 'application/vnd.pagerduty+json;version=2', - headers.get('Accept'), - 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found' + "application/vnd.pagerduty+json;version=2", + headers.get("Accept"), + "Accept:application/vnd.pagerduty+json;version=2 HTTP header not found", ) - return Response(), {'status': 200} + return Response(), {"status": 200} def _assert_incident_key(self, module, url, method, headers): - self.assertTrue('incident_key=incident_key_value' in url, 'url must contain incident key') - return Response(), {'status': 200} + self.assertTrue("incident_key=incident_key_value" in url, "url must contain incident key") + return Response(), {"status": 200} def test_incident_url(self): - pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_incident_api) + pagerduty_alert.check( + None, "name", "state", "service_id", "integration_key", "api_key", http_call=self._assert_incident_api + ) def test_compatibility_header(self): - pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_compatibility_header) + pagerduty_alert.check( + None, + "name", + "state", + "service_id", + "integration_key", + "api_key", + http_call=self._assert_compatibility_header, + ) def test_incident_key_in_url_when_it_is_given(self): pagerduty_alert.check( - None, 'name', 'state', 'service_id', 'integration_key', 'api_key', incident_key='incident_key_value', http_call=self._assert_incident_key + None, + "name", + "state", + "service_id", + "integration_key", + "api_key", + incident_key="incident_key_value", + http_call=self._assert_incident_key, ) @@ -61,7 +83,7 @@ def tearDown(self): @pytest.fixture def fetch_url_mock(self, mocker): - return mocker.patch('ansible.module_utils.monitoring.pagerduty_change.fetch_url') + return mocker.patch("ansible.module_utils.monitoring.pagerduty_change.fetch_url") def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): @@ -69,86 +91,89 @@ def test_module_fail_when_required_args_missing(self): self.module.main() def test_ensure_alert_created_with_minimal_data(self): - with set_module_args({ - 'state': 'triggered', - 'api_version': 'v2', - 'integration_key': 'test', - 'source': 'My Ansible Script', - 'desc': 'Description for alert' - }): - - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + with set_module_args( + { + "state": "triggered", + "api_version": "v2", + "integration_key": "test", + "source": "My Ansible Script", + "desc": "Description for alert", + } + ): + with patch.object(pagerduty_alert, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (Response(), {"status": 202}) with self.assertRaises(AnsibleExitJson): self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - json_data = fetch_url_mock.call_args[1]['data'] + json_data = fetch_url_mock.call_args[1]["data"] data = json.loads(json_data) - assert url == 'https://events.pagerduty.com/v2/enqueue' - assert data['routing_key'] == 'test' - assert data['event_action'] == 'trigger' - assert data['payload']['summary'] == 'Description for alert' - assert data['payload']['source'] == 'My Ansible Script' - assert data['payload']['severity'] == 'critical' - assert data['payload']['timestamp'] is not None + assert url == "https://events.pagerduty.com/v2/enqueue" + assert data["routing_key"] == "test" + assert data["event_action"] == "trigger" + assert data["payload"]["summary"] == "Description for alert" + assert data["payload"]["source"] == "My Ansible Script" + assert data["payload"]["severity"] == "critical" + assert data["payload"]["timestamp"] is not None def test_ensure_alert_created_with_full_data(self): - with set_module_args({ - 'api_version': 'v2', - 'component': 'mysql', - 'custom_details': {'environment': 'production', 'notes': 'this is a test note'}, - 'desc': 'Description for alert', - 'incident_class': 'ping failure', - 'integration_key': 'test', - 'link_url': 'https://pagerduty.com', - 'link_text': 'PagerDuty', - 'state': 'triggered', - 'source': 'My Ansible Script', - }): - - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + with set_module_args( + { + "api_version": "v2", + "component": "mysql", + "custom_details": {"environment": "production", "notes": "this is a test note"}, + "desc": "Description for alert", + "incident_class": "ping failure", + "integration_key": "test", + "link_url": "https://pagerduty.com", + "link_text": "PagerDuty", + "state": "triggered", + "source": "My Ansible Script", + } + ): + with patch.object(pagerduty_alert, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (Response(), {"status": 202}) with self.assertRaises(AnsibleExitJson): self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - json_data = fetch_url_mock.call_args[1]['data'] + json_data = fetch_url_mock.call_args[1]["data"] data = json.loads(json_data) - assert url == 'https://events.pagerduty.com/v2/enqueue' - assert data['routing_key'] == 'test' - assert data['payload']['summary'] == 'Description for alert' - assert data['payload']['source'] == 'My Ansible Script' - assert data['payload']['class'] == 'ping failure' - assert data['payload']['component'] == 'mysql' - assert data['payload']['custom_details']['environment'] == 'production' - assert data['payload']['custom_details']['notes'] == 'this is a test note' - assert data['links'][0]['href'] == 'https://pagerduty.com' - assert data['links'][0]['text'] == 'PagerDuty' + assert url == "https://events.pagerduty.com/v2/enqueue" + assert data["routing_key"] == "test" + assert data["payload"]["summary"] == "Description for alert" + assert data["payload"]["source"] == "My Ansible Script" + assert data["payload"]["class"] == "ping failure" + assert data["payload"]["component"] == "mysql" + assert data["payload"]["custom_details"]["environment"] == "production" + assert data["payload"]["custom_details"]["notes"] == "this is a test note" + assert data["links"][0]["href"] == "https://pagerduty.com" + assert data["links"][0]["text"] == "PagerDuty" def test_ensure_alert_acknowledged(self): - with set_module_args({ - 'state': 'acknowledged', - 'api_version': 'v2', - 'integration_key': 'test', - 'incident_key': 'incident_test_id', - }): - - with patch.object(pagerduty_alert, 'fetch_url') as fetch_url_mock: + with set_module_args( + { + "state": "acknowledged", + "api_version": "v2", + "integration_key": "test", + "incident_key": "incident_test_id", + } + ): + with patch.object(pagerduty_alert, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (Response(), {"status": 202}) with self.assertRaises(AnsibleExitJson): self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - json_data = fetch_url_mock.call_args[1]['data'] + json_data = fetch_url_mock.call_args[1]["data"] data = json.loads(json_data) - assert url == 'https://events.pagerduty.com/v2/enqueue' - assert data['routing_key'] == 'test' - assert data['event_action'] == 'acknowledge' - assert data['dedup_key'] == 'incident_test_id' + assert url == "https://events.pagerduty.com/v2/enqueue" + assert data["routing_key"] == "test" + assert data["event_action"] == "acknowledge" + assert data["dedup_key"] == "incident_test_id" diff --git a/tests/unit/plugins/modules/test_pagerduty_change.py b/tests/unit/plugins/modules/test_pagerduty_change.py index 960c331e683..264a947317c 100644 --- a/tests/unit/plugins/modules/test_pagerduty_change.py +++ b/tests/unit/plugins/modules/test_pagerduty_change.py @@ -10,7 +10,12 @@ import pytest from ansible_collections.community.general.plugins.modules import pagerduty_change -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestPagerDutyChangeModule(ModuleTestCase): @@ -23,7 +28,7 @@ def tearDown(self): @pytest.fixture def fetch_url_mock(self, mocker): - return mocker.patch('ansible.module_utils.monitoring.pagerduty_change.fetch_url') + return mocker.patch("ansible.module_utils.monitoring.pagerduty_change.fetch_url") def test_module_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): @@ -31,56 +36,53 @@ def test_module_fail_when_required_args_missing(self): self.module.main() def test_ensure_change_event_created_with_minimal_data(self): - with set_module_args({ - 'integration_key': 'test', - 'summary': 'Testing' - }): - - with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: + with set_module_args({"integration_key": "test", "summary": "Testing"}): + with patch.object(pagerduty_change, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 202}) with self.assertRaises(AnsibleExitJson): self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - json_data = fetch_url_mock.call_args[1]['data'] + json_data = fetch_url_mock.call_args[1]["data"] data = json.loads(json_data) - assert url == 'https://events.pagerduty.com/v2/change/enqueue' - assert data['routing_key'] == 'test' - assert data['payload']['summary'] == 'Testing' - assert data['payload']['source'] == 'Ansible' + assert url == "https://events.pagerduty.com/v2/change/enqueue" + assert data["routing_key"] == "test" + assert data["payload"]["summary"] == "Testing" + assert data["payload"]["source"] == "Ansible" def test_ensure_change_event_created_with_full_data(self): - with set_module_args({ - 'integration_key': 'test', - 'summary': 'Testing', - 'source': 'My Ansible Script', - 'user': 'ansible', - 'repo': 'github.com/ansible/ansible', - 'revision': '8c67432', - 'environment': 'production', - 'link_url': 'https://pagerduty.com', - 'link_text': 'PagerDuty' - }): - - with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock: + with set_module_args( + { + "integration_key": "test", + "summary": "Testing", + "source": "My Ansible Script", + "user": "ansible", + "repo": "github.com/ansible/ansible", + "revision": "8c67432", + "environment": "production", + "link_url": "https://pagerduty.com", + "link_text": "PagerDuty", + } + ): + with patch.object(pagerduty_change, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 202}) with self.assertRaises(AnsibleExitJson): self.module.main() assert fetch_url_mock.call_count == 1 url = fetch_url_mock.call_args[0][1] - json_data = fetch_url_mock.call_args[1]['data'] + json_data = fetch_url_mock.call_args[1]["data"] data = json.loads(json_data) - assert url == 'https://events.pagerduty.com/v2/change/enqueue' - assert data['routing_key'] == 'test' - assert data['payload']['summary'] == 'Testing' - assert data['payload']['source'] == 'My Ansible Script' - assert data['payload']['custom_details']['user'] == 'ansible' - assert data['payload']['custom_details']['repo'] == 'github.com/ansible/ansible' - assert data['payload']['custom_details']['revision'] == '8c67432' - assert data['payload']['custom_details']['environment'] == 'production' - assert data['links'][0]['href'] == 'https://pagerduty.com' - assert data['links'][0]['text'] == 'PagerDuty' + assert url == "https://events.pagerduty.com/v2/change/enqueue" + assert data["routing_key"] == "test" + assert data["payload"]["summary"] == "Testing" + assert data["payload"]["source"] == "My Ansible Script" + assert data["payload"]["custom_details"]["user"] == "ansible" + assert data["payload"]["custom_details"]["repo"] == "github.com/ansible/ansible" + assert data["payload"]["custom_details"]["revision"] == "8c67432" + assert data["payload"]["custom_details"]["environment"] == "production" + assert data["links"][0]["href"] == "https://pagerduty.com" + assert data["links"][0]["text"] == "PagerDuty" diff --git a/tests/unit/plugins/modules/test_pamd.py b/tests/unit/plugins/modules/test_pamd.py index 18a56b68547..3400a5baaba 100644 --- a/tests/unit/plugins/modules/test_pamd.py +++ b/tests/unit/plugins/modules/test_pamd.py @@ -14,7 +14,6 @@ class PamdLineTestCase(unittest.TestCase): - def setUp(self): self.pamd_line = PamdLine("This is a test") @@ -26,7 +25,6 @@ def test_matches(self): class PamdIncludeTestCase(unittest.TestCase): - def setUp(self): self.good_include = PamdInclude("@include foobar") self.bad_include = PamdInclude("include foobar") @@ -43,7 +41,6 @@ def test_valid(self): class PamdCommentTestCase(unittest.TestCase): - def setUp(self): self.good_comment = PamdComment("# This is a test comment") self.bad_comment = PamdComment("This is a bad test comment") @@ -61,20 +58,20 @@ def test_valid(self): class PamdRuleTestCase(unittest.TestCase): def setUp(self): - self.rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke') + self.rule = PamdRule("account", "optional", "pam_keyinit.so", "revoke") def test_type(self): - self.assertEqual(self.rule.rule_type, 'account') + self.assertEqual(self.rule.rule_type, "account") def test_control(self): - self.assertEqual(self.rule.rule_control, 'optional') - self.assertEqual(self.rule._control, 'optional') + self.assertEqual(self.rule.rule_control, "optional") + self.assertEqual(self.rule._control, "optional") def test_path(self): - self.assertEqual(self.rule.rule_path, 'pam_keyinit.so') + self.assertEqual(self.rule.rule_path, "pam_keyinit.so") def test_args(self): - self.assertEqual(self.rule.rule_args, ['revoke']) + self.assertEqual(self.rule.rule_args, ["revoke"]) def test_valid(self): self.assertTrue(self.rule.validate()[0]) @@ -82,10 +79,10 @@ def test_valid(self): class PamdRuleBadValidationTestCase(unittest.TestCase): def setUp(self): - self.bad_type = PamdRule('foobar', 'optional', 'pam_keyinit.so', 'revoke') - self.bad_control_simple = PamdRule('account', 'foobar', 'pam_keyinit.so', 'revoke') - self.bad_control_value = PamdRule('account', '[foobar=1 default=ignore]', 'pam_keyinit.so', 'revoke') - self.bad_control_action = PamdRule('account', '[success=1 default=foobar]', 'pam_keyinit.so', 'revoke') + self.bad_type = PamdRule("foobar", "optional", "pam_keyinit.so", "revoke") + self.bad_control_simple = PamdRule("account", "foobar", "pam_keyinit.so", "revoke") + self.bad_control_value = PamdRule("account", "[foobar=1 default=ignore]", "pam_keyinit.so", "revoke") + self.bad_control_action = PamdRule("account", "[success=1 default=foobar]", "pam_keyinit.so", "revoke") def test_validate_bad_type(self): self.assertFalse(self.bad_type.validate()[0]) @@ -153,114 +150,142 @@ def test_properly_parsed(self): self.assertEqual(num_lines, num_lines_processed) def test_has_rule(self): - self.assertTrue(self.pamd.has_rule('account', 'required', 'pam_permit.so')) - self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so')) + self.assertTrue(self.pamd.has_rule("account", "required", "pam_permit.so")) + self.assertTrue(self.pamd.has_rule("account", "[success=1 default=ignore]", "pam_succeed_if.so")) def test_doesnt_have_rule(self): - self.assertFalse(self.pamd.has_rule('account', 'requisite', 'pam_permit.so')) + self.assertFalse(self.pamd.has_rule("account", "requisite", "pam_permit.so")) # Test Update def test_update_rule_type(self): - self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_type='account')) - self.assertTrue(self.pamd.has_rule('account', 'optional', 'pam_keyinit.so')) - test_rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke') + self.assertTrue(self.pamd.update_rule("session", "optional", "pam_keyinit.so", new_type="account")) + self.assertTrue(self.pamd.has_rule("account", "optional", "pam_keyinit.so")) + test_rule = PamdRule("account", "optional", "pam_keyinit.so", "revoke") self.assertIn(str(test_rule), str(self.pamd)) def test_update_rule_that_doesnt_exist(self): - self.assertFalse(self.pamd.update_rule('blah', 'blah', 'blah', new_type='account')) - self.assertFalse(self.pamd.has_rule('blah', 'blah', 'blah')) - test_rule = PamdRule('blah', 'blah', 'blah', 'account') + self.assertFalse(self.pamd.update_rule("blah", "blah", "blah", new_type="account")) + self.assertFalse(self.pamd.has_rule("blah", "blah", "blah")) + test_rule = PamdRule("blah", "blah", "blah", "account") self.assertNotIn(str(test_rule), str(self.pamd)) def test_update_rule_type_two(self): - self.assertTrue(self.pamd.update_rule('session', '[success=1 default=ignore]', 'pam_succeed_if.so', new_type='account')) - self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so')) - test_rule = PamdRule('account', '[success=1 default=ignore]', 'pam_succeed_if.so') + self.assertTrue( + self.pamd.update_rule("session", "[success=1 default=ignore]", "pam_succeed_if.so", new_type="account") + ) + self.assertTrue(self.pamd.has_rule("account", "[success=1 default=ignore]", "pam_succeed_if.so")) + test_rule = PamdRule("account", "[success=1 default=ignore]", "pam_succeed_if.so") self.assertIn(str(test_rule), str(self.pamd)) def test_update_rule_control_simple(self): - self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_control='required')) - self.assertTrue(self.pamd.has_rule('session', 'required', 'pam_keyinit.so')) - test_rule = PamdRule('session', 'required', 'pam_keyinit.so') + self.assertTrue(self.pamd.update_rule("session", "optional", "pam_keyinit.so", new_control="required")) + self.assertTrue(self.pamd.has_rule("session", "required", "pam_keyinit.so")) + test_rule = PamdRule("session", "required", "pam_keyinit.so") self.assertIn(str(test_rule), str(self.pamd)) def test_update_rule_control_complex(self): - self.assertTrue(self.pamd.update_rule('session', - '[success=1 default=ignore]', - 'pam_succeed_if.so', - new_control='[success=2 test=me default=ignore]')) - self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')) - test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so') + self.assertTrue( + self.pamd.update_rule( + "session", + "[success=1 default=ignore]", + "pam_succeed_if.so", + new_control="[success=2 test=me default=ignore]", + ) + ) + self.assertTrue(self.pamd.has_rule("session", "[success=2 test=me default=ignore]", "pam_succeed_if.so")) + test_rule = PamdRule("session", "[success=2 test=me default=ignore]", "pam_succeed_if.so") self.assertIn(str(test_rule), str(self.pamd)) def test_update_rule_control_more_complex(self): - - self.assertTrue(self.pamd.update_rule('session', - '[success=1 test=me default=ignore]', - 'pam_succeed_if.so', - new_control='[success=2 test=me default=ignore]')) - self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')) - test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so') + self.assertTrue( + self.pamd.update_rule( + "session", + "[success=1 test=me default=ignore]", + "pam_succeed_if.so", + new_control="[success=2 test=me default=ignore]", + ) + ) + self.assertTrue(self.pamd.has_rule("session", "[success=2 test=me default=ignore]", "pam_succeed_if.so")) + test_rule = PamdRule("session", "[success=2 test=me default=ignore]", "pam_succeed_if.so") self.assertIn(str(test_rule), str(self.pamd)) def test_update_rule_module_path(self): - self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='pam_limits.so')) - self.assertTrue(self.pamd.has_rule('auth', 'required', 'pam_limits.so')) + self.assertTrue(self.pamd.update_rule("auth", "required", "pam_env.so", new_path="pam_limits.so")) + self.assertTrue(self.pamd.has_rule("auth", "required", "pam_limits.so")) def test_update_rule_module_path_slash(self): - self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='/lib64/security/pam_duo.so')) - self.assertTrue(self.pamd.has_rule('auth', 'required', '/lib64/security/pam_duo.so')) + self.assertTrue(self.pamd.update_rule("auth", "required", "pam_env.so", new_path="/lib64/security/pam_duo.so")) + self.assertTrue(self.pamd.has_rule("auth", "required", "/lib64/security/pam_duo.so")) def test_update_rule_module_args(self): - self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='uid uid')) - test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'uid uid') + self.assertTrue(self.pamd.update_rule("auth", "sufficient", "pam_unix.so", new_args="uid uid")) + test_rule = PamdRule("auth", "sufficient", "pam_unix.so", "uid uid") self.assertIn(str(test_rule), str(self.pamd)) - test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') + test_rule = PamdRule("auth", "sufficient", "pam_unix.so", "nullok try_first_pass") self.assertNotIn(str(test_rule), str(self.pamd)) def test_update_rule_remove_module_args(self): - self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='')) - test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', '') + self.assertTrue(self.pamd.update_rule("auth", "sufficient", "pam_unix.so", new_args="")) + test_rule = PamdRule("auth", "sufficient", "pam_unix.so", "") self.assertIn(str(test_rule), str(self.pamd)) - test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') + test_rule = PamdRule("auth", "sufficient", "pam_unix.so", "nullok try_first_pass") self.assertNotIn(str(test_rule), str(self.pamd)) def test_update_first_three(self): - self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', - new_type='one', new_control='two', new_path='three')) - self.assertTrue(self.pamd.has_rule('one', 'two', 'three')) + self.assertTrue( + self.pamd.update_rule("auth", "required", "pam_env.so", new_type="one", new_control="two", new_path="three") + ) + self.assertTrue(self.pamd.has_rule("one", "two", "three")) def test_update_first_three_with_module_args(self): - self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', - new_type='one', new_control='two', new_path='three')) - self.assertTrue(self.pamd.has_rule('one', 'two', 'three')) - test_rule = PamdRule('one', 'two', 'three') + self.assertTrue( + self.pamd.update_rule( + "auth", "sufficient", "pam_unix.so", new_type="one", new_control="two", new_path="three" + ) + ) + self.assertTrue(self.pamd.has_rule("one", "two", "three")) + test_rule = PamdRule("one", "two", "three") self.assertIn(str(test_rule), str(self.pamd)) self.assertIn(str(test_rule), str(self.pamd)) def test_update_all_four(self): - self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', - new_type='one', new_control='two', new_path='three', - new_args='four five')) - test_rule = PamdRule('one', 'two', 'three', 'four five') + self.assertTrue( + self.pamd.update_rule( + "auth", + "sufficient", + "pam_unix.so", + new_type="one", + new_control="two", + new_path="three", + new_args="four five", + ) + ) + test_rule = PamdRule("one", "two", "three", "four five") self.assertIn(str(test_rule), str(self.pamd)) - test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') + test_rule = PamdRule("auth", "sufficient", "pam_unix.so", "nullok try_first_pass") self.assertNotIn(str(test_rule), str(self.pamd)) def test_update_rule_with_slash(self): - self.assertTrue(self.pamd.update_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so', - new_type='session', new_path='pam_access.so')) - test_rule = PamdRule('session', '[success=1 default=ignore]', 'pam_access.so') + self.assertTrue( + self.pamd.update_rule( + "account", + "[success=1 default=ignore]", + "pam_succeed_if.so", + new_type="session", + new_path="pam_access.so", + ) + ) + test_rule = PamdRule("session", "[success=1 default=ignore]", "pam_access.so") self.assertIn(str(test_rule), str(self.pamd)) # Insert Before def test_insert_before_rule(self): - - count = self.pamd.insert_before('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_limits.so') + count = self.pamd.insert_before( + "account", "required", "pam_access.so", new_type="account", new_control="required", new_path="pam_limits.so" + ) self.assertEqual(count, 1) rules = self.pamd.get("account", "required", "pam_access.so") @@ -268,26 +293,48 @@ def test_insert_before_rule(self): self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so")) def test_insert_before_rule_where_rule_doesnt_exist(self): - - count = self.pamd.insert_before('account', 'sufficient', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_limits.so') + count = self.pamd.insert_before( + "account", + "sufficient", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_limits.so", + ) self.assertFalse(count) def test_insert_before_rule_with_args(self): - self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_limits.so', - new_args='uid')) + self.assertTrue( + self.pamd.insert_before( + "account", + "required", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_limits.so", + new_args="uid", + ) + ) rules = self.pamd.get("account", "required", "pam_access.so") for current_rule in rules: - self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so", 'uid')) + self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so", "uid")) def test_insert_before_rule_test_duplicates(self): - self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_limits.so')) - - self.pamd.insert_before('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_limits.so') + self.assertTrue( + self.pamd.insert_before( + "account", + "required", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_limits.so", + ) + ) + + self.pamd.insert_before( + "account", "required", "pam_access.so", new_type="account", new_control="required", new_path="pam_limits.so" + ) rules = self.pamd.get("account", "required", "pam_access.so") for current_rule in rules: @@ -296,37 +343,75 @@ def test_insert_before_rule_test_duplicates(self): self.assertFalse(previous_rule.prev.matches("account", "required", "pam_limits.so")) def test_insert_before_first_rule(self): - self.assertTrue(self.pamd.insert_before('auth', 'required', 'pam_env.so', - new_type='account', new_control='required', new_path='pam_limits.so')) + self.assertTrue( + self.pamd.insert_before( + "auth", "required", "pam_env.so", new_type="account", new_control="required", new_path="pam_limits.so" + ) + ) def test_insert_before_first_rule_simple(self): simple_service = PamdService(self.simple_system_auth_string) - self.assertTrue(simple_service.insert_before('auth', 'required', 'pam_env.so', - new_type='account', new_control='required', new_path='pam_limits.so')) + self.assertTrue( + simple_service.insert_before( + "auth", "required", "pam_env.so", new_type="account", new_control="required", new_path="pam_limits.so" + ) + ) # Insert After def test_insert_after_rule(self): - self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_unix.so', - new_type='account', new_control='required', new_path='pam_permit.so')) + self.assertTrue( + self.pamd.insert_after( + "account", + "required", + "pam_unix.so", + new_type="account", + new_control="required", + new_path="pam_permit.so", + ) + ) rules = self.pamd.get("account", "required", "pam_unix.so") for current_rule in rules: self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so")) def test_insert_after_rule_with_args(self): - self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_permit.so', - new_args='uid')) + self.assertTrue( + self.pamd.insert_after( + "account", + "required", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_permit.so", + new_args="uid", + ) + ) rules = self.pamd.get("account", "required", "pam_access.so") for current_rule in rules: self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid")) def test_insert_after_test_duplicates(self): - self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_permit.so', - new_args='uid')) - self.assertFalse(self.pamd.insert_after('account', 'required', 'pam_access.so', - new_type='account', new_control='required', new_path='pam_permit.so', - new_args='uid')) + self.assertTrue( + self.pamd.insert_after( + "account", + "required", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_permit.so", + new_args="uid", + ) + ) + self.assertFalse( + self.pamd.insert_after( + "account", + "required", + "pam_access.so", + new_type="account", + new_control="required", + new_path="pam_permit.so", + new_args="uid", + ) + ) rules = self.pamd.get("account", "required", "pam_access.so") for current_rule in rules: @@ -334,52 +419,70 @@ def test_insert_after_test_duplicates(self): self.assertFalse(current_rule.next.next.matches("account", "required", "pam_permit.so", "uid")) def test_insert_after_rule_last_rule(self): - self.assertTrue(self.pamd.insert_after('session', 'required', 'pam_unix.so', - new_type='account', new_control='required', new_path='pam_permit.so', - new_args='uid')) + self.assertTrue( + self.pamd.insert_after( + "session", + "required", + "pam_unix.so", + new_type="account", + new_control="required", + new_path="pam_permit.so", + new_args="uid", + ) + ) rules = self.pamd.get("session", "required", "pam_unix.so") for current_rule in rules: self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid")) # Remove Module Arguments def test_remove_module_arguments_one(self): - self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', 'nullok')) + self.assertTrue(self.pamd.remove_module_arguments("auth", "sufficient", "pam_unix.so", "nullok")) def test_remove_module_arguments_one_list(self): - self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', ['nullok'])) + self.assertTrue(self.pamd.remove_module_arguments("auth", "sufficient", "pam_unix.so", ["nullok"])) def test_remove_module_arguments_two(self): - self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', 'service crond')) + self.assertTrue( + self.pamd.remove_module_arguments( + "session", "[success=1 default=ignore]", "pam_succeed_if.so", "service crond" + ) + ) def test_remove_module_arguments_two_list(self): - self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', ['service', 'crond'])) + self.assertTrue( + self.pamd.remove_module_arguments( + "session", "[success=1 default=ignore]", "pam_succeed_if.so", ["service", "crond"] + ) + ) def test_remove_module_arguments_where_none_existed(self): - self.assertTrue(self.pamd.add_module_arguments('session', 'required', 'pam_limits.so', 'arg1 arg2= arg3=arg3')) + self.assertTrue(self.pamd.add_module_arguments("session", "required", "pam_limits.so", "arg1 arg2= arg3=arg3")) def test_add_module_arguments_where_none_existed(self): - self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', 'arg1 arg2= arg3=arg3')) + self.assertTrue(self.pamd.add_module_arguments("account", "required", "pam_unix.so", "arg1 arg2= arg3=arg3")) def test_add_module_arguments_where_none_existed_list(self): - self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', ['arg1', 'arg2=', 'arg3=arg3'])) + self.assertTrue( + self.pamd.add_module_arguments("account", "required", "pam_unix.so", ["arg1", "arg2=", "arg3=arg3"]) + ) def test_add_module_arguments_where_some_existed(self): - self.assertTrue(self.pamd.add_module_arguments('auth', 'sufficient', 'pam_unix.so', 'arg1 arg2= arg3=arg3')) + self.assertTrue(self.pamd.add_module_arguments("auth", "sufficient", "pam_unix.so", "arg1 arg2= arg3=arg3")) def test_remove_rule(self): - self.assertTrue(self.pamd.remove('account', 'required', 'pam_unix.so')) + self.assertTrue(self.pamd.remove("account", "required", "pam_unix.so")) # Second run should not change anything - self.assertFalse(self.pamd.remove('account', 'required', 'pam_unix.so')) - test_rule = PamdRule('account', 'required', 'pam_unix.so') + self.assertFalse(self.pamd.remove("account", "required", "pam_unix.so")) + test_rule = PamdRule("account", "required", "pam_unix.so") self.assertNotIn(str(test_rule), str(self.pamd)) def test_remove_first_rule(self): no_header_service = PamdService(self.no_header_system_auth_string) - self.assertTrue(no_header_service.remove('auth', 'required', 'pam_env.so')) - test_rule = PamdRule('auth', 'required', 'pam_env.so') + self.assertTrue(no_header_service.remove("auth", "required", "pam_env.so")) + test_rule = PamdRule("auth", "required", "pam_env.so") self.assertNotIn(str(test_rule), str(no_header_service)) def test_remove_last_rule(self): - self.assertTrue(self.pamd.remove('session', 'required', 'pam_unix.so')) - test_rule = PamdRule('session', 'required', 'pam_unix.so') + self.assertTrue(self.pamd.remove("session", "required", "pam_unix.so")) + test_rule = PamdRule("session", "required", "pam_unix.so") self.assertNotIn(str(test_rule), str(self.pamd)) diff --git a/tests/unit/plugins/modules/test_parted.py b/tests/unit/plugins/modules/test_parted.py index 0210202aee8..1774c88a22c 100644 --- a/tests/unit/plugins/modules/test_parted.py +++ b/tests/unit/plugins/modules/test_parted.py @@ -9,7 +9,12 @@ from ansible_collections.community.general.plugins.modules import parted as parted_module from ansible_collections.community.general.plugins.modules.parted import parse_parted_version from ansible_collections.community.general.plugins.modules.parted import parse_partition_info -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) # Example of output : parted -s -m /dev/sdb -- unit 'MB' print parted_output1 = """ @@ -19,7 +24,8 @@ 2:106MB:368MB:262MB:ext2::; 3:368MB:256061MB:255692MB:::;""" -parted_version_info = {""" +parted_version_info = { + """ parted (GNU parted) 3.3 Copyright (C) 2019 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later . @@ -27,7 +33,8 @@ There is NO WARRANTY, to the extent permitted by law. Written by . - """: (3, 3, 0), """ + """: (3, 3, 0), + """ parted (GNU parted) 3.4.5 Copyright (C) 2019 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later . @@ -35,7 +42,8 @@ There is NO WARRANTY, to the extent permitted by law. Written by . - """: (3, 4, 5), """ + """: (3, 4, 5), + """ parted (GNU parted) 3.3.14-dfc61 Copyright (C) 2019 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later . @@ -43,7 +51,8 @@ There is NO WARRANTY, to the extent permitted by law. Written by . - """: (3, 3, 14)} + """: (3, 3, 14), +} # corresponding dictionary after parsing by parse_partition_info parted_dict1 = { @@ -54,36 +63,40 @@ "table": "msdos", "model": "ATA TOSHIBA THNSFJ25", "logical_block": 512, - "physical_block": 512 + "physical_block": 512, }, - "partitions": [{ - "num": 1, - "begin": 1.05, - "end": 106.0, - "size": 105.0, - "fstype": "fat32", - "name": '', - "flags": ["esp"], - "unit": "mb" - }, { - "num": 2, - "begin": 106.0, - "end": 368.0, - "size": 262.0, - "fstype": "ext2", - "name": '', - "flags": [], - "unit": "mb" - }, { - "num": 3, - "begin": 368.0, - "end": 256061.0, - "size": 255692.0, - "fstype": "", - "name": '', - "flags": [], - "unit": "mb" - }] + "partitions": [ + { + "num": 1, + "begin": 1.05, + "end": 106.0, + "size": 105.0, + "fstype": "fat32", + "name": "", + "flags": ["esp"], + "unit": "mb", + }, + { + "num": 2, + "begin": 106.0, + "end": 368.0, + "size": 262.0, + "fstype": "ext2", + "name": "", + "flags": [], + "unit": "mb", + }, + { + "num": 3, + "begin": 368.0, + "end": 256061.0, + "size": 255692.0, + "fstype": "", + "name": "", + "flags": [], + "unit": "mb", + }, + ], } parted_output2 = """ @@ -99,9 +112,9 @@ "table": "msdos", "model": "ATA TOSHIBA THNSFJ25", "logical_block": 512, - "physical_block": 512 + "physical_block": 512, }, - "partitions": [] + "partitions": [], } # fake some_flag exists @@ -113,18 +126,20 @@ "table": "msdos", "model": "ATA TOSHIBA THNSFJ25", "logical_block": 512, - "physical_block": 512 + "physical_block": 512, }, - "partitions": [{ - "num": 1, - "begin": 1.05, - "end": 106.0, - "size": 105.0, - "fstype": "fat32", - "name": '', - "flags": ["some_flag"], - "unit": "mb" - }] + "partitions": [ + { + "num": 1, + "begin": 1.05, + "end": 106.0, + "size": 105.0, + "fstype": "fat32", + "name": "", + "flags": ["some_flag"], + "unit": "mb", + } + ], } @@ -133,16 +148,18 @@ def setUp(self): super().setUp() self.module = parted_module - self.mock_check_parted_label = (patch('ansible_collections.community.general.plugins.modules.parted.check_parted_label', return_value=False)) + self.mock_check_parted_label = patch( + "ansible_collections.community.general.plugins.modules.parted.check_parted_label", return_value=False + ) self.check_parted_label = self.mock_check_parted_label.start() - self.mock_parted = (patch('ansible_collections.community.general.plugins.modules.parted.parted')) + self.mock_parted = patch("ansible_collections.community.general.plugins.modules.parted.parted") self.parted = self.mock_parted.start() - self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command')) + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") self.run_command = self.mock_run_command.start() - self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')) + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() def tearDown(self): @@ -155,13 +172,13 @@ def tearDown(self): def execute_module(self, failed=False, changed=False, script=None): if failed: result = self.failed() - self.assertTrue(result['failed'], result) + self.assertTrue(result["failed"], result) else: result = self.changed(changed) - self.assertEqual(result['changed'], changed, result) + self.assertEqual(result["changed"], changed, result) if script: - self.assertEqual(script, result['script'], result['script']) + self.assertEqual(script, result["script"], result["script"]) return result @@ -170,7 +187,7 @@ def failed(self): self.module.main() result = exc.exception.args[0] - self.assertTrue(result['failed'], result) + self.assertTrue(result["failed"], result) return result def changed(self, changed=False): @@ -178,170 +195,262 @@ def changed(self, changed=False): self.module.main() result = exc.exception.args[0] - self.assertEqual(result['changed'], changed, result) + self.assertEqual(result["changed"], changed, result) return result def test_parse_partition_info(self): """Test that the parse_partition_info returns the expected dictionary""" - self.assertEqual(parse_partition_info(parted_output1, 'MB'), parted_dict1) - self.assertEqual(parse_partition_info(parted_output2, 'MB'), parted_dict2) + self.assertEqual(parse_partition_info(parted_output1, "MB"), parted_dict1) + self.assertEqual(parse_partition_info(parted_output2, "MB"), parted_dict2) def test_partition_already_exists(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'state': 'present', - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "state": "present", + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): self.execute_module(changed=False) def test_create_new_partition(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 4, - 'state': 'present', - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['unit', 'KiB', 'mkpart', 'primary', '0%', '100%']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 4, + "state": "present", + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module(changed=True, script=["unit", "KiB", "mkpart", "primary", "0%", "100%"]) def test_create_new_partition_1G(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 4, - 'state': 'present', - 'part_end': '1GiB', - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['unit', 'KiB', 'mkpart', 'primary', '0%', '1GiB']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 4, + "state": "present", + "part_end": "1GiB", + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module(changed=True, script=["unit", "KiB", "mkpart", "primary", "0%", "1GiB"]) def test_create_new_partition_minus_1G(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 4, - 'state': 'present', - 'fs_type': 'ext2', - 'part_start': '-1GiB', - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['unit', 'KiB', 'mkpart', 'primary', 'ext2', '-1GiB', '100%']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 4, + "state": "present", + "fs_type": "ext2", + "part_start": "-1GiB", + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module(changed=True, script=["unit", "KiB", "mkpart", "primary", "ext2", "-1GiB", "100%"]) def test_remove_partition_number_1(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'state': 'absent', - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['rm', '1']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "state": "absent", + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module(changed=True, script=["rm", "1"]) def test_resize_partition(self): - with set_module_args({ - 'device': '/dev/sdb', - 'number': 3, - 'state': 'present', - 'part_end': '100%', - 'resize': True - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['resizepart', '3', '100%']) + with set_module_args( + {"device": "/dev/sdb", "number": 3, "state": "present", "part_end": "100%", "resize": True} + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module(changed=True, script=["resizepart", "3", "100%"]) def test_change_flag(self): # Flags are set in a second run of parted(). # Between the two runs, the partition dict is updated. # use checkmode here allow us to continue even if the dictionary is # not updated. - with set_module_args({ - 'device': '/dev/sdb', - 'number': 3, - 'state': 'present', - 'flags': ['lvm', 'boot'], - '_ansible_check_mode': True, - }): - - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): + with set_module_args( + { + "device": "/dev/sdb", + "number": 3, + "state": "present", + "flags": ["lvm", "boot"], + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): self.parted.reset_mock() self.execute_module(changed=True) # When using multiple flags: # order of execution is non deterministic, because set() operations are used in # the current implementation. - expected_calls_order1 = [call(['unit', 'KiB', 'set', '3', 'lvm', 'on', 'set', '3', 'boot', 'on'], - '/dev/sdb', 'optimal')] - expected_calls_order2 = [call(['unit', 'KiB', 'set', '3', 'boot', 'on', 'set', '3', 'lvm', 'on'], - '/dev/sdb', 'optimal')] - self.assertTrue(self.parted.mock_calls == expected_calls_order1 or - self.parted.mock_calls == expected_calls_order2) + expected_calls_order1 = [ + call(["unit", "KiB", "set", "3", "lvm", "on", "set", "3", "boot", "on"], "/dev/sdb", "optimal") + ] + expected_calls_order2 = [ + call(["unit", "KiB", "set", "3", "boot", "on", "set", "3", "lvm", "on"], "/dev/sdb", "optimal") + ] + self.assertTrue( + self.parted.mock_calls == expected_calls_order1 or self.parted.mock_calls == expected_calls_order2 + ) def test_create_new_primary_lvm_partition(self): # use check_mode, see previous test comment - with set_module_args({ - 'device': '/dev/sdb', - 'number': 4, - 'flags': ["boot"], - 'state': 'present', - 'part_start': '257GiB', - 'fs_type': 'ext3', - '_ansible_check_mode': True, - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, - script=['unit', 'KiB', 'mkpart', 'primary', 'ext3', '257GiB', '100%', 'unit', 'KiB', 'set', '4', 'boot', 'on']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 4, + "flags": ["boot"], + "state": "present", + "part_start": "257GiB", + "fs_type": "ext3", + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module( + changed=True, + script=[ + "unit", + "KiB", + "mkpart", + "primary", + "ext3", + "257GiB", + "100%", + "unit", + "KiB", + "set", + "4", + "boot", + "on", + ], + ) def test_create_label_gpt(self): # Like previous test, current implementation use parted to create the partition and # then retrieve and update the dictionary. Use check_mode to force to continue even if # dictionary is not updated. - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'flags': ["lvm"], - 'label': 'gpt', - 'name': 'lvmpartition', - 'state': 'present', - '_ansible_check_mode': True, - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict2): + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "flags": ["lvm"], + "label": "gpt", + "name": "lvmpartition", + "state": "present", + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict2, + ): self.execute_module( changed=True, - script=['unit', 'KiB', 'mklabel', 'gpt', 'mkpart', 'primary', '0%', '100%', - 'unit', 'KiB', 'name', '1', '"lvmpartition"', 'set', '1', 'lvm', 'on']) + script=[ + "unit", + "KiB", + "mklabel", + "gpt", + "mkpart", + "primary", + "0%", + "100%", + "unit", + "KiB", + "name", + "1", + '"lvmpartition"', + "set", + "1", + "lvm", + "on", + ], + ) def test_change_label_gpt(self): # When partitions already exists and label is changed, mkpart should be called even when partition already exists, # because new empty label will be created anyway - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'state': 'present', - 'label': 'gpt', - '_ansible_check_mode': True, - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1): - self.execute_module(changed=True, script=['unit', 'KiB', 'mklabel', 'gpt', 'mkpart', 'primary', '0%', '100%']) + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "state": "present", + "label": "gpt", + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict1, + ): + self.execute_module( + changed=True, script=["unit", "KiB", "mklabel", "gpt", "mkpart", "primary", "0%", "100%"] + ) def test_check_mode_unchanged(self): # Test that get_device_info result is checked in check mode too # No change on partition 1 - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'state': 'present', - 'flags': ['some_flag'], - '_ansible_check_mode': True, - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "state": "present", + "flags": ["some_flag"], + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict3, + ): self.execute_module(changed=False) def test_check_mode_changed(self): # Test that get_device_info result is checked in check mode too # Flag change on partition 1 - with set_module_args({ - 'device': '/dev/sdb', - 'number': 1, - 'state': 'present', - 'flags': ['other_flag'], - '_ansible_check_mode': True, - }): - with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3): + with set_module_args( + { + "device": "/dev/sdb", + "number": 1, + "state": "present", + "flags": ["other_flag"], + "_ansible_check_mode": True, + } + ): + with patch( + "ansible_collections.community.general.plugins.modules.parted.get_device_info", + return_value=parted_dict3, + ): self.execute_module(changed=True) def test_version_info(self): diff --git a/tests/unit/plugins/modules/test_pkgin.py b/tests/unit/plugins/modules/test_pkgin.py index e4378eb8285..f15760bae60 100644 --- a/tests/unit/plugins/modules/test_pkgin.py +++ b/tests/unit/plugins/modules/test_pkgin.py @@ -11,14 +11,13 @@ class TestPkginQueryPackage(unittest.TestCase): - def setUp(self): pkgin.PKGIN_PATH = "" - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_without_version_is_present(self, mock_module): # given - package = 'py37-conan' + package = "py37-conan" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -31,10 +30,10 @@ def test_package_without_version_is_present(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.PRESENT) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_with_version_is_present(self, mock_module): # given - package = 'py37-conan-1.21.0' + package = "py37-conan-1.21.0" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -47,14 +46,18 @@ def test_package_with_version_is_present(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.PRESENT) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_found_but_not_installed(self, mock_module): # given - package = 'cmake' + package = "cmake" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), - (0, "cmake316-3.16.0nb1 = Cross platform make\ncmake314-3.14.6nb1 = Cross platform make\ncmake-3.14.0 Cross platform make", None), + ( + 0, + "cmake316-3.16.0nb1 = Cross platform make\ncmake314-3.14.6nb1 = Cross platform make\ncmake-3.14.0 Cross platform make", + None, + ), ] # when @@ -63,10 +66,10 @@ def test_package_found_but_not_installed(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.NOT_INSTALLED) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_found_outdated(self, mock_module): # given - package = 'cmake316' + package = "cmake316" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -79,10 +82,10 @@ def test_package_found_outdated(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.OUTDATED) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_with_version_found_outdated(self, mock_module): # given - package = 'cmake316-3.16.0nb1' + package = "cmake316-3.16.0nb1" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -95,10 +98,10 @@ def test_package_with_version_found_outdated(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.OUTDATED) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_package_not_found(self, mock_module): # given - package = 'cmake320-3.20.0nb1' + package = "cmake320-3.20.0nb1" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -111,10 +114,10 @@ def test_package_not_found(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.NOT_FOUND) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_with_parseable_flag_supported_package_is_present(self, mock_module): # given - package = 'py37-conan' + package = "py37-conan" parseable_flag_supported = 0 mock_module.run_command.side_effect = [ (parseable_flag_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), @@ -127,10 +130,10 @@ def test_with_parseable_flag_supported_package_is_present(self, mock_module): # then self.assertEqual(command_result, pkgin.PackageState.PRESENT) - @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule') + @mock.patch("ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule") def test_with_parseable_flag_not_supported_package_is_present(self, mock_module): # given - package = 'py37-conan' + package = "py37-conan" parseable_flag_not_supported = 1 mock_module.run_command.side_effect = [ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None), diff --git a/tests/unit/plugins/modules/test_pmem.py b/tests/unit/plugins/modules/test_pmem.py index 84768801e62..90b92004aab 100644 --- a/tests/unit/plugins/modules/test_pmem.py +++ b/tests/unit/plugins/modules/test_pmem.py @@ -8,9 +8,14 @@ import json from unittest.mock import patch -pytest.importorskip('xmltodict') +pytest.importorskip("xmltodict") -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + ModuleTestCase, + set_module_args, + AnsibleFailJson, + AnsibleExitJson, +) from ansible_collections.community.general.plugins.modules import pmem as pmem_module @@ -267,16 +272,19 @@ def setUp(self): super().setUp() self.module = pmem_module - self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command')) - self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')) + self.mock_run_command = patch("ansible.module_utils.basic.AnsibleModule.run_command") + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.run_command = self.mock_run_command.start() self.get_bin_path = self.mock_get_bin_path.start() - self.mock_pmem_is_dcpmm_installed = (patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_is_dcpmm_installed', return_value="")) - self.mock_pmem_init_env = (patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_init_env', return_value="")) + self.mock_pmem_is_dcpmm_installed = patch( + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_is_dcpmm_installed", + return_value="", + ) + self.mock_pmem_init_env = patch( + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_init_env", return_value="" + ) self.pmem_is_dcpmm_installed = self.mock_pmem_is_dcpmm_installed.start() self.pmem_init_env = self.mock_pmem_init_env.start() @@ -289,10 +297,10 @@ def tearDown(self): self.mock_pmem_init_env.stop() def result_check(self, result, socket, appdirect, memmode, reserved): - self.assertTrue(result.exception.args[0]['changed']) - self.assertTrue(result.exception.args[0]['reboot_required']) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertTrue(result.exception.args[0]["reboot_required"]) - test_result = result.exception.args[0]['result'] + test_result = result.exception.args[0]["result"] if socket: maxIndex = 1 @@ -300,22 +308,22 @@ def result_check(self, result, socket, appdirect, memmode, reserved): maxIndex = 0 for i in range(0, maxIndex): - self.assertAlmostEqual(test_result[i]['appdirect'], appdirect[i]) - self.assertAlmostEqual(test_result[i]['memorymode'], memmode[i]) - self.assertAlmostEqual(test_result[i]['reserved'], reserved[i]) + self.assertAlmostEqual(test_result[i]["appdirect"], appdirect[i]) + self.assertAlmostEqual(test_result[i]["memorymode"], memmode[i]) + self.assertAlmostEqual(test_result[i]["reserved"], reserved[i]) if socket: - self.assertAlmostEqual(test_result[i]['socket'], i) + self.assertAlmostEqual(test_result[i]["socket"], i) def result_check_ns(self, result, namespace): - self.assertTrue(result.exception.args[0]['changed']) - self.assertFalse(result.exception.args[0]['reboot_required']) + self.assertTrue(result.exception.args[0]["changed"]) + self.assertFalse(result.exception.args[0]["reboot_required"]) - test_result = result.exception.args[0]['result'] + test_result = result.exception.args[0]["result"] expected = json.loads(namespace) for i, result in enumerate(test_result): - self.assertEqual(result['dev'], expected[i]['dev']) - self.assertEqual(result['size'], expected[i]['size']) + self.assertEqual(result["dev"], expected[i]["dev"]) + self.assertEqual(result["size"], expected[i]["size"]) def test_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): @@ -324,382 +332,401 @@ def test_fail_when_required_args_missing(self): def test_fail_when_appdirect_only(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'appdirect': 10, - }): + with set_module_args( + { + "appdirect": 10, + } + ): pmem_module.main() def test_fail_when_MemosyMode_only(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'memorymode': 70, - }): + with set_module_args( + { + "memorymode": 70, + } + ): pmem_module.main() def test_fail_when_reserved_only(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'reserved': 10, - }): + with set_module_args( + { + "reserved": 10, + } + ): pmem_module.main() def test_fail_when_appdirect_memorymode_reserved_total_not_100(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'appdirect': 10, - 'memorymode': 70, - 'reserved': 10, - }): + with set_module_args( + { + "appdirect": 10, + "memorymode": 70, + "reserved": 10, + } + ): pmem_module.main() def test_when_appdirect_memorymode(self): - with set_module_args({ - 'appdirect': 10, - 'memorymode': 70, - }): + with set_module_args( + { + "appdirect": 10, + "memorymode": 70, + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[goal_plain, goal, dimmlist], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_when_appdirect_memorymode_reserved(self): - with set_module_args({ - 'appdirect': 10, - 'memorymode': 70, - 'reserved': 20, - }): + with set_module_args( + { + "appdirect": 10, + "memorymode": 70, + "reserved": 20, + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[goal_plain, goal, dimmlist], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_when_appdirect_notinterleaved_memorymode_reserved(self): - with set_module_args({ - 'appdirect': 10, - 'appdirect_interleaved': False, - 'memorymode': 70, - 'reserved': 20, - }): + with set_module_args( + { + "appdirect": 10, + "appdirect_interleaved": False, + "memorymode": 70, + "reserved": 20, + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[goal_plain, goal, dimmlist]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[goal_plain, goal, dimmlist], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check(result, False, [25769803776], [188978561024], [328230764544]) def test_fail_when_socket_id_appdirect(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'socket': [ - { - 'id': 0, - 'appdirect': 10, - }, - { - 'id': 1, - 'appdirect': 10, - }, - ], - }): + with set_module_args( + { + "socket": [ + { + "id": 0, + "appdirect": 10, + }, + { + "id": 1, + "appdirect": 10, + }, + ], + } + ): pmem_module.main() def test_fail_when_socket0_id_memorymode_socket1_id_appdirect(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'socket': [ - { - 'id': 0, - ' memorymode': 70, - }, - { - 'id': 1, - 'appdirect': 10, - }, - ], - }): + with set_module_args( + { + "socket": [ + { + "id": 0, + " memorymode": 70, + }, + { + "id": 1, + "appdirect": 10, + }, + ], + } + ): pmem_module.main() def test_fail_when_socket0_without_id(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'socket': [ + with set_module_args( + { + "socket": [ + { + "appdirect": 10, + "memorymode": 70, + }, + { + "id": 1, + "appdirect": 10, + "memorymode": 70, + }, + ], + } + ): + pmem_module.main() + + def test_when_socket0_and_1_appdirect_memorymode(self): + with set_module_args( + { + "socket": [ { - 'appdirect': 10, - 'memorymode': 70, + "id": 0, + "appdirect": 10, + "memorymode": 70, }, { - 'id': 1, - 'appdirect': 10, - 'memorymode': 70, + "id": 1, + "appdirect": 10, + "memorymode": 70, }, ], - }): - pmem_module.main() - - def test_when_socket0_and_1_appdirect_memorymode(self): - with set_module_args({ - 'socket': [ - { - 'id': 0, - 'appdirect': 10, - 'memorymode': 70, - }, - { - 'id': 1, - 'appdirect': 10, - 'memorymode': 70, - }, - ], - }): + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272] + ) def test_when_socket0_and_1_appdirect_memorymode_reserved(self): - with set_module_args({ - 'socket': [ - { - 'id': 0, - 'appdirect': 10, - 'memorymode': 70, - 'reserved': 20, - }, - { - 'id': 1, - 'appdirect': 10, - 'memorymode': 70, - 'reserved': 20, - }, - ], - }): + with set_module_args( + { + "socket": [ + { + "id": 0, + "appdirect": 10, + "memorymode": 70, + "reserved": 20, + }, + { + "id": 1, + "appdirect": 10, + "memorymode": 70, + "reserved": 20, + }, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272] + ) def test_when_socket0_appdirect_notinterleaved_memorymode_reserved_socket1_appdirect_memorymode_reserved(self): - with set_module_args({ - 'socket': [ - { - 'id': 0, - 'appdirect': 10, - 'appdirect_interleaved': False, - 'memorymode': 70, - 'reserved': 20, - }, - { - 'id': 1, - 'appdirect': 10, - 'memorymode': 70, - 'reserved': 20, - }, - ], - }): + with set_module_args( + { + "socket": [ + { + "id": 0, + "appdirect": 10, + "appdirect_interleaved": False, + "memorymode": 70, + "reserved": 20, + }, + { + "id": 1, + "appdirect": 10, + "memorymode": 70, + "reserved": 20, + }, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ - show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check( - result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272]) + result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272] + ) def test_fail_when_namespace_without_mode(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '1GB', - 'type': 'pmem', - }, - { - 'size': '2GB', - 'type': 'blk', - }, - ], - }): + with set_module_args( + { + "namespace": [ + { + "size": "1GB", + "type": "pmem", + }, + { + "size": "2GB", + "type": "blk", + }, + ], + } + ): pmem_module.main() def test_fail_when_region_is_empty(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '1GB', - 'type': 'pmem', - 'mode': 'sector', - }, - ], - }): + with set_module_args( + { + "namespace": [ + { + "size": "1GB", + "type": "pmem", + "mode": "sector", + }, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region_empty]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region_empty], + ): pmem_module.main() def test_fail_when_namespace_invalid_size(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '1XXX', - 'type': 'pmem', - 'mode': 'sector', - }, - ], - }): + with set_module_args( + { + "namespace": [ + { + "size": "1XXX", + "type": "pmem", + "mode": "sector", + }, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region], + ): pmem_module.main() def test_fail_when_size_is_invalid_alignment(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '400MB', - 'type': 'pmem', - 'mode': 'sector' - }, - { - 'size': '500MB', - 'type': 'pmem', - 'mode': 'sector' - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"size": "400MB", "type": "pmem", "mode": "sector"}, + {"size": "500MB", "type": "pmem", "mode": "sector"}, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region], + ): pmem_module.main() def test_fail_when_blk_is_unsupported_type(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '4GB', - 'type': 'pmem', - 'mode': 'sector' - }, - { - 'size': '5GB', - 'type': 'blk', - 'mode': 'sector' - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"size": "4GB", "type": "pmem", "mode": "sector"}, + {"size": "5GB", "type": "blk", "mode": "sector"}, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region], + ): pmem_module.main() def test_fail_when_size_isnot_set_to_multiple_namespaces(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'type': 'pmem', - 'mode': 'sector' - }, - { - 'size': '500GB', - 'type': 'blk', - 'mode': 'sector' - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"type": "pmem", "mode": "sector"}, + {"size": "500GB", "type": "blk", "mode": "sector"}, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region], + ): pmem_module.main() def test_fail_when_size_of_namespace_over_available(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'namespace': [ - { - 'size': '400GB', - 'type': 'pmem', - 'mode': 'sector' - }, - { - 'size': '500GB', - 'type': 'pmem', - 'mode': 'sector' - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"size": "400GB", "type": "pmem", "mode": "sector"}, + {"size": "500GB", "type": "pmem", "mode": "sector"}, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region], + ): pmem_module.main() def test_when_namespace0_without_size(self): - with set_module_args({ - 'namespace': [ - { - 'type': 'pmem', - 'mode': 'sector' - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"type": "pmem", "mode": "sector"}, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_without_size, ndctl_list_N]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region, ndctl_create_without_size, ndctl_list_N], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check_ns(result, ndctl_list_N) def test_when_namespace0_with_namespace_append(self): - with set_module_args({ - 'namespace': [ - { - 'size': '640MB', - 'type': 'pmem', - 'mode': 'raw' - }, - ], - 'namespace_append': True, - }): + with set_module_args( + { + "namespace": [ + {"size": "640MB", "type": "pmem", "mode": "raw"}, + ], + "namespace_append": True, + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_640M, ndctl_list_N_two_namespaces]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region, ndctl_create_640M, ndctl_list_N_two_namespaces], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check_ns(result, ndctl_list_N_two_namespaces) def test_when_namespace0_1GiB_pmem_sector_namespace1_640MiB_pmem_raw(self): - with set_module_args({ - 'namespace': [ - { - 'size': '1GB', - 'type': 'pmem', - 'mode': 'sector' - }, - { - 'size': '640MB', - 'type': 'pmem', - 'mode': 'raw', - }, - ], - }): + with set_module_args( + { + "namespace": [ + {"size": "1GB", "type": "pmem", "mode": "sector"}, + { + "size": "640MB", + "type": "pmem", + "mode": "raw", + }, + ], + } + ): with patch( - 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command', - side_effect=[ndctl_region, ndctl_create_1G, ndctl_create_640M, ndctl_list_N_two_namespaces]): + "ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command", + side_effect=[ndctl_region, ndctl_create_1G, ndctl_create_640M, ndctl_list_N_two_namespaces], + ): with self.assertRaises(AnsibleExitJson) as result: pmem_module.main() self.result_check_ns(result, ndctl_list_N_two_namespaces) diff --git a/tests/unit/plugins/modules/test_pritunl_org.py b/tests/unit/plugins/modules/test_pritunl_org.py index 42d975e440e..157dfa8c3f2 100644 --- a/tests/unit/plugins/modules/test_pritunl_org.py +++ b/tests/unit/plugins/modules/test_pritunl_org.py @@ -41,21 +41,21 @@ def patch_add_pritunl_organization(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_organization", autospec=True, - **kwds + **kwds, ) def patch_delete_pritunl_organization(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_organization", autospec=True, - **kwds + **kwds, ) def patch_get_pritunl_organizations(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", autospec=True, - **kwds + **kwds, ) def test_without_parameters(self): @@ -78,12 +78,8 @@ def test_present(self): ) ): # Test creation - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_add_pritunl_organization( - side_effect=PritunlPostOrganizationMock - ) as mock_add: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as mock_get: + with self.patch_add_pritunl_organization(side_effect=PritunlPostOrganizationMock) as mock_add: with self.assertRaises(AnsibleExitJson) as create_result: self.module.main() @@ -94,12 +90,8 @@ def test_present(self): self.assertEqual(create_exc["response"]["user_count"], 0) # Test module idempotency - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationAfterPostMock - ) as mock_get: - with self.patch_add_pritunl_organization( - side_effect=PritunlPostOrganizationMock - ) as mock_add: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationAfterPostMock) as mock_get: + with self.patch_add_pritunl_organization(side_effect=PritunlPostOrganizationMock) as mock_add: with self.assertRaises(AnsibleExitJson) as idempotent_result: self.module.main() @@ -128,12 +120,8 @@ def test_absent(self): ) ): # Test deletion - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationAfterPostMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_delete: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationAfterPostMock) as mock_get: + with self.patch_delete_pritunl_organization(side_effect=PritunlDeleteOrganizationMock) as mock_delete: with self.assertRaises(AnsibleExitJson) as delete_result: self.module.main() @@ -143,12 +131,8 @@ def test_absent(self): self.assertEqual(delete_exc["response"], {}) # Test module idempotency - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_add: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as mock_get: + with self.patch_delete_pritunl_organization(side_effect=PritunlDeleteOrganizationMock) as mock_add: with self.assertRaises(AnsibleExitJson) as idempotent_result: self.module.main() @@ -170,12 +154,8 @@ def test_absent_with_existing_users(self): } with set_module_args(module_args): # Test deletion - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: - with self.patch_delete_pritunl_organization( - side_effect=PritunlDeleteOrganizationMock - ) as mock_delete: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as mock_get: + with self.patch_delete_pritunl_organization(side_effect=PritunlDeleteOrganizationMock) as mock_delete: with self.assertRaises(AnsibleFailJson) as failure_result: self.module.main() @@ -185,9 +165,7 @@ def test_absent_with_existing_users(self): # Switch force=True which should run successfully with set_module_args(dict_merge(module_args, {"force": True})): - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as mock_get: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as mock_get: with self.patch_delete_pritunl_organization( side_effect=PritunlDeleteOrganizationMock ) as mock_delete: diff --git a/tests/unit/plugins/modules/test_pritunl_org_info.py b/tests/unit/plugins/modules/test_pritunl_org_info.py index f0c67ae5d6b..b6c07e2329b 100644 --- a/tests/unit/plugins/modules/test_pritunl_org_info.py +++ b/tests/unit/plugins/modules/test_pritunl_org_info.py @@ -38,14 +38,12 @@ def patch_get_pritunl_organizations(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", autospec=True, - **kwds + **kwds, ) def test_without_parameters(self): """Test without parameters""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: with set_module_args({}): with self.assertRaises(AnsibleFailJson): self.module.main() @@ -54,9 +52,7 @@ def test_without_parameters(self): def test_list_empty_organizations(self): """Listing all organizations even when no org exists should be valid.""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlEmptyOrganizationMock - ) as org_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlEmptyOrganizationMock) as org_mock: with self.assertRaises(AnsibleExitJson) as result: with set_module_args( { @@ -74,9 +70,7 @@ def test_list_empty_organizations(self): def test_list_specific_organization(self): """Listing a specific organization should be valid.""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: with self.assertRaises(AnsibleExitJson) as result: with set_module_args( { @@ -95,9 +89,7 @@ def test_list_specific_organization(self): def test_list_unknown_organization(self): """Listing an unknown organization should result in a failure.""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: with self.assertRaises(AnsibleFailJson) as result: with set_module_args( { @@ -116,9 +108,7 @@ def test_list_unknown_organization(self): def test_list_all_organizations(self): """Listing all organizations should be valid.""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: with self.assertRaises(AnsibleExitJson) as result: with set_module_args( { diff --git a/tests/unit/plugins/modules/test_pritunl_user.py b/tests/unit/plugins/modules/test_pritunl_user.py index 175995687c3..767f9ae72af 100644 --- a/tests/unit/plugins/modules/test_pritunl_user.py +++ b/tests/unit/plugins/modules/test_pritunl_user.py @@ -28,14 +28,10 @@ def mock_pritunl_api(func, **kwargs): def wrapped(self=None): - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ): + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock): with self.patch_get_pritunl_users(side_effect=PritunlListUserMock): with self.patch_add_pritunl_users(side_effect=PritunlPostUserMock): - with self.patch_delete_pritunl_users( - side_effect=PritunlDeleteUserMock - ): + with self.patch_delete_pritunl_users(side_effect=PritunlDeleteUserMock): func(self, **kwargs) return wrapped @@ -57,35 +53,35 @@ def patch_get_pritunl_users(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users", autospec=True, - **kwds + **kwds, ) def patch_add_pritunl_users(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_user", autospec=True, - **kwds + **kwds, ) def patch_update_pritunl_users(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._put_pritunl_user", autospec=True, - **kwds + **kwds, ) def patch_delete_pritunl_users(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_user", autospec=True, - **kwds + **kwds, ) def patch_get_pritunl_organizations(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", autospec=True, - **kwds + **kwds, ) def test_without_parameters(self): @@ -112,9 +108,7 @@ def test_present(self): user_params, ) ): - with self.patch_update_pritunl_users( - side_effect=PritunlPostUserMock - ) as post_mock: + with self.patch_update_pritunl_users(side_effect=PritunlPostUserMock) as post_mock: with self.assertRaises(AnsibleExitJson) as create_result: self.module.main() @@ -143,9 +137,7 @@ def test_present(self): new_user_params, ) ): - with self.patch_update_pritunl_users( - side_effect=PritunlPutUserMock - ) as put_mock: + with self.patch_update_pritunl_users(side_effect=PritunlPutUserMock) as put_mock: with self.assertRaises(AnsibleExitJson) as update_result: self.module.main() diff --git a/tests/unit/plugins/modules/test_pritunl_user_info.py b/tests/unit/plugins/modules/test_pritunl_user_info.py index 2adf99fffdc..1e05e5e14e0 100644 --- a/tests/unit/plugins/modules/test_pritunl_user_info.py +++ b/tests/unit/plugins/modules/test_pritunl_user_info.py @@ -38,24 +38,20 @@ def patch_get_pritunl_users(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users", autospec=True, - **kwds + **kwds, ) def patch_get_pritunl_organizations(self, **kwds): return patch( "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", autospec=True, - **kwds + **kwds, ) def test_without_parameters(self): """Test without parameters""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: - with self.patch_get_pritunl_users( - side_effect=PritunlListUserMock - ) as user_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: + with self.patch_get_pritunl_users(side_effect=PritunlListUserMock) as user_mock: with set_module_args({}): with self.assertRaises(AnsibleFailJson): self.module.main() @@ -65,12 +61,8 @@ def test_without_parameters(self): def test_missing_organization(self): """Failure must occur when the requested organization is not found.""" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: - with self.patch_get_pritunl_users( - side_effect=PritunlListUserMock - ) as user_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: + with self.patch_get_pritunl_users(side_effect=PritunlListUserMock) as user_mock: with self.assertRaises(AnsibleFailJson) as result: with set_module_args( { @@ -93,12 +85,8 @@ def test_get_all_client_users_from_organization(self): The list of all Pritunl client users from the organization must be returned when no user specified. """ expected_user_type = "client" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: - with self.patch_get_pritunl_users( - side_effect=PritunlListUserMock - ) as user_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: + with self.patch_get_pritunl_users(side_effect=PritunlListUserMock) as user_mock: with self.assertRaises(AnsibleExitJson) as result: with set_module_args( { @@ -127,12 +115,8 @@ def test_get_specific_server_user_from_organization(self): """ expected_user_type = "server" expected_user_name = "ops" - with self.patch_get_pritunl_organizations( - side_effect=PritunlListOrganizationMock - ) as org_mock: - with self.patch_get_pritunl_users( - side_effect=PritunlListUserMock - ) as user_mock: + with self.patch_get_pritunl_organizations(side_effect=PritunlListOrganizationMock) as org_mock: + with self.patch_get_pritunl_users(side_effect=PritunlListUserMock) as user_mock: with self.assertRaises(AnsibleExitJson) as result: with set_module_args( { diff --git a/tests/unit/plugins/modules/test_redhat_subscription.py b/tests/unit/plugins/modules/test_redhat_subscription.py index 1edc8e6840a..7193654da0b 100644 --- a/tests/unit/plugins/modules/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/test_redhat_subscription.py @@ -20,40 +20,44 @@ def patch_redhat_subscription(mocker): """ Function used for mocking some parts of redhat_subscription module """ - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm.REDHAT_REPO') - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.isfile', return_value=False) - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.unlink', return_value=True) - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.AnsibleModule.get_bin_path', - return_value='/testbin/subscription-manager') - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm._can_connect_to_dbus', - return_value=False) - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm._has_dbus_interface', - return_value=False) - mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.getuid', - return_value=0) + mocker.patch("ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm.REDHAT_REPO") + mocker.patch("ansible_collections.community.general.plugins.modules.redhat_subscription.isfile", return_value=False) + mocker.patch("ansible_collections.community.general.plugins.modules.redhat_subscription.unlink", return_value=True) + mocker.patch( + "ansible_collections.community.general.plugins.modules.redhat_subscription.AnsibleModule.get_bin_path", + return_value="/testbin/subscription-manager", + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm._can_connect_to_dbus", + return_value=False, + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm._has_dbus_interface", + return_value=False, + ) + mocker.patch("ansible_collections.community.general.plugins.modules.redhat_subscription.getuid", return_value=0) -@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') +@pytest.mark.parametrize("patch_ansible_module", [{}], indirect=["patch_ansible_module"]) +@pytest.mark.usefixtures("patch_ansible_module") def test_without_required_parameters_unregistered(mocker, capfd, patch_redhat_subscription): """ Failure must occurs when all parameters are missing """ mock_run_command = mocker.patch.object( - basic.AnsibleModule, - 'run_command', - return_value=(1, 'This system is not yet registered.', '')) + basic.AnsibleModule, "run_command", return_value=(1, "This system is not yet registered.", "") + ) with pytest.raises(SystemExit): redhat_subscription.main() out, err = capfd.readouterr() results = json.loads(out) - assert results['failed'] - assert 'state is present but any of the following are missing' in results['msg'] + assert results["failed"] + assert "state is present but any of the following are missing" in results["msg"] -@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') +@pytest.mark.parametrize("patch_ansible_module", [{}], indirect=["patch_ansible_module"]) +@pytest.mark.usefixtures("patch_ansible_module") def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subscription): """ System already registered, no parameters required (state=present is the @@ -61,404 +65,396 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs """ mock_run_command = mocker.patch.object( basic.AnsibleModule, - 'run_command', - return_value=(0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')) + "run_command", + return_value=(0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), + ) with pytest.raises(SystemExit): redhat_subscription.main() out, err = capfd.readouterr() results = json.loads(out) - assert 'changed' in results - if 'msg' in results: - assert results['msg'] == 'System already registered.' + assert "changed" in results + if "msg" in results: + assert results["msg"] == "System already registered." TEST_CASES = [ # Test the case, when the system is already registered [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin' + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", }, { - 'id': 'test_already_registered_system', - 'run_command.calls': [ + "id": "test_already_registered_system", + "run_command.calls": [ ( # Calling of following command will be asserted - ['/testbin/subscription-manager', 'identity'], + ["/testbin/subscription-manager", "identity"], # Was return code checked? - {'check_rc': False}, + {"check_rc": False}, # Mock of returned code, stdout and stderr - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ) ], - 'changed': False, - 'msg': 'System already registered.' - } + "changed": False, + "msg": "System already registered.", + }, ], # Already registered system without credentials specified [ { - 'state': 'present', + "state": "present", }, { - 'id': 'test_already_registered_system', - 'run_command.calls': [ + "id": "test_already_registered_system", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ) ], - 'changed': False, - 'msg': 'System already registered.' - } + "changed": False, + "msg": "System already registered.", + }, ], # Test simple registration using username and password [ { - 'state': 'present', - 'server_hostname': 'satellite.company.com', - 'username': 'admin', - 'password': 'admin', + "state": "present", + "server_hostname": "satellite.company.com", + "username": "admin", + "password": "admin", }, { - 'id': 'test_registeration_username_password', - 'run_command.calls': [ + "id": "test_registeration_username_password", + "run_command.calls": [ + (["/testbin/subscription-manager", "identity"], {"check_rc": False}, (1, "", "")), ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, '', '') + ["/testbin/subscription-manager", "config", "--server.hostname=satellite.company.com"], + {"check_rc": True}, + (0, "", ""), ), ( - ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'], - {'check_rc': True}, - (0, '', '') + ["/testbin/subscription-manager", "register", "--username", "admin", "--password", "admin"], + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), - ( - ['/testbin/subscription-manager', 'register', - '--username', 'admin', - '--password', 'admin'], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) ], - 'changed': True, - 'msg': "System successfully registered to 'satellite.company.com'." - } + "changed": True, + "msg": "System successfully registered to 'satellite.company.com'.", + }, ], # Test simple registration using token [ { - 'state': 'present', - 'server_hostname': 'satellite.company.com', - 'token': 'fake_token', + "state": "present", + "server_hostname": "satellite.company.com", + "token": "fake_token", }, { - 'id': 'test_registeration_token', - 'run_command.calls': [ + "id": "test_registeration_token", + "run_command.calls": [ + (["/testbin/subscription-manager", "identity"], {"check_rc": False}, (1, "", "")), ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, '', '') + ["/testbin/subscription-manager", "config", "--server.hostname=satellite.company.com"], + {"check_rc": True}, + (0, "", ""), ), ( - ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'], - {'check_rc': True}, - (0, '', '') + ["/testbin/subscription-manager", "register", "--token", "fake_token"], + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), - ( - ['/testbin/subscription-manager', 'register', - '--token', 'fake_token'], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) ], - 'changed': True, - 'msg': "System successfully registered to 'satellite.company.com'." - } + "changed": True, + "msg": "System successfully registered to 'satellite.company.com'.", + }, ], # Test unregistration, when system is unregistered [ { - 'state': 'absent', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', + "state": "absent", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", }, { - 'id': 'test_unregisteration', - 'run_command.calls': [ + "id": "test_unregisteration", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), - ( - ['/testbin/subscription-manager', 'unregister'], - {'check_rc': True}, - (0, '', '') - ) + (["/testbin/subscription-manager", "unregister"], {"check_rc": True}, (0, "", "")), ], - 'changed': True, - 'msg': "System successfully unregistered from subscription.rhsm.redhat.com." - } + "changed": True, + "msg": "System successfully unregistered from subscription.rhsm.redhat.com.", + }, ], # Test unregistration of already unregistered system [ { - 'state': 'absent', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', + "state": "absent", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", }, { - 'id': 'test_unregisteration_of_unregistered_system', - 'run_command.calls': [ + "id": "test_unregisteration_of_unregistered_system", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ) ], - 'changed': False, - 'msg': "System already unregistered." - } + "changed": False, + "msg": "System already unregistered.", + }, ], # Test registration using activation key [ { - 'state': 'present', - 'server_hostname': 'satellite.company.com', - 'activationkey': 'some-activation-key', - 'org_id': 'admin' + "state": "present", + "server_hostname": "satellite.company.com", + "activationkey": "some-activation-key", + "org_id": "admin", }, { - 'id': 'test_registeration_activation_key', - 'run_command.calls': [ + "id": "test_registeration_activation_key", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( - ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'], - {'check_rc': True}, - (0, '', '') + ["/testbin/subscription-manager", "config", "--server.hostname=satellite.company.com"], + {"check_rc": True}, + (0, "", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--activationkey', 'some-activation-key' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--activationkey", + "some-activation-key", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'satellite.company.com'." - } + "changed": True, + "msg": "System successfully registered to 'satellite.company.com'.", + }, ], # Test of registration using username and password with auto-attach option [ + {"state": "present", "username": "admin", "password": "admin", "org_id": "admin", "auto_attach": "true"}, { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'auto_attach': 'true' - }, - { - 'id': 'test_registeration_username_password_auto_attach', - 'run_command.calls': [ + "id": "test_registeration_username_password_auto_attach", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--auto-attach', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--auto-attach", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of force registration despite the system is already registered [ + {"state": "present", "username": "admin", "password": "admin", "org_id": "admin", "force_register": "true"}, { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'force_register': 'true' - }, - { - 'id': 'test_force_registeration_username_password', - 'run_command.calls': [ + "id": "test_force_registeration_username_password", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'This system already registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "This system already registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--force', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--force", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration with arguments that are not part of register options but needs to be configured [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'force_register': 'true', - 'server_prefix': '/rhsm', - 'server_port': '443' + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "force_register": "true", + "server_prefix": "/rhsm", + "server_port": "443", }, { - 'id': 'test_arguments_not_in_register_options', - 'run_command.calls': [ + "id": "test_arguments_not_in_register_options", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'This system already registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "This system already registered.", ""), ), ( - ['/testbin/subscription-manager', 'config', - '--server.port=443', - '--server.prefix=/rhsm' - ], - {'check_rc': True}, - (0, '', '') + ["/testbin/subscription-manager", "config", "--server.port=443", "--server.prefix=/rhsm"], + {"check_rc": True}, + (0, "", ""), ), ( - ['/testbin/subscription-manager', 'register', - '--force', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin'], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + [ + "/testbin/subscription-manager", + "register", + "--force", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", + ], + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration using username, password and proxy options [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'force_register': 'true', - 'server_proxy_hostname': 'proxy.company.com', - 'server_proxy_scheme': 'https', - 'server_proxy_port': '12345', - 'server_proxy_user': 'proxy_user', - 'server_proxy_password': 'secret_proxy_password' + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "force_register": "true", + "server_proxy_hostname": "proxy.company.com", + "server_proxy_scheme": "https", + "server_proxy_port": "12345", + "server_proxy_user": "proxy_user", + "server_proxy_password": "secret_proxy_password", }, { - 'id': 'test_registeration_username_password_proxy_options', - 'run_command.calls': [ + "id": "test_registeration_username_password_proxy_options", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'This system already registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "This system already registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'config', - '--server.proxy_hostname=proxy.company.com', - '--server.proxy_password=secret_proxy_password', - '--server.proxy_port=12345', - '--server.proxy_scheme=https', - '--server.proxy_user=proxy_user' + "/testbin/subscription-manager", + "config", + "--server.proxy_hostname=proxy.company.com", + "--server.proxy_password=secret_proxy_password", + "--server.proxy_port=12345", + "--server.proxy_scheme=https", + "--server.proxy_user=proxy_user", ], - {'check_rc': True}, - (0, '', '') + {"check_rc": True}, + (0, "", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--force', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--force", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration using username and password and attach to pool ID and quantities [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}] + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "pool_ids": [{"ff8080816b8e967f016b8e99632804a6": 2}, {"ff8080816b8e967f016b8e99747107e9": 4}], }, { - 'id': 'test_registeration_username_password_pool_ids_quantities', - 'run_command.calls': [ + "id": "test_registeration_username_password_pool_ids_quantities", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), ( [ - 'subscription-manager list --available', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, - ''' + "subscription-manager list --available", + {"check_rc": True, "environ_update": {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"}}, + ( + 0, + """ +-------------------------------------------+ Available Subscriptions +-------------------------------------------+ @@ -497,68 +493,78 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs Starts: 06/25/19 Ends: 06/24/20 Entitlement Type: Physical -''', '') +""", + "", + ), ] ), ( [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99632804a6', - '--quantity', '2' + "/testbin/subscription-manager", + "attach", + "--pool", + "ff8080816b8e967f016b8e99632804a6", + "--quantity", + "2", ], - {'check_rc': True}, - (0, '', '') + {"check_rc": True}, + (0, "", ""), ), ( [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99747107e9', - '--quantity', '4' + "/testbin/subscription-manager", + "attach", + "--pool", + "ff8080816b8e967f016b8e99747107e9", + "--quantity", + "4", ], - {'check_rc': True}, - (0, '', '') - ) + {"check_rc": True}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration using username and password and attach to pool ID without quantities [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'pool_ids': ['ff8080816b8e967f016b8e99632804a6', 'ff8080816b8e967f016b8e99747107e9'] + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "pool_ids": ["ff8080816b8e967f016b8e99632804a6", "ff8080816b8e967f016b8e99747107e9"], }, { - 'id': 'test_registeration_username_password_pool_ids', - 'run_command.calls': [ + "id": "test_registeration_username_password_pool_ids", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), ( [ - 'subscription-manager list --available', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, - ''' + "subscription-manager list --available", + {"check_rc": True, "environ_update": {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"}}, + ( + 0, + """ +-------------------------------------------+ Available Subscriptions +-------------------------------------------+ @@ -597,66 +603,64 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs Starts: 06/25/19 Ends: 06/24/20 Entitlement Type: Physical -''', '') +""", + "", + ), ] ), ( - [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99632804a6' - ], - {'check_rc': True}, - (0, '', '') + ["/testbin/subscription-manager", "attach", "--pool", "ff8080816b8e967f016b8e99632804a6"], + {"check_rc": True}, + (0, "", ""), ), ( - [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99747107e9' - ], - {'check_rc': True}, - (0, '', '') - ) + ["/testbin/subscription-manager", "attach", "--pool", "ff8080816b8e967f016b8e99747107e9"], + {"check_rc": True}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration using username and password and attach to pool ID (one pool) [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'pool_ids': ['ff8080816b8e967f016b8e99632804a6'] + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "pool_ids": ["ff8080816b8e967f016b8e99632804a6"], }, { - 'id': 'test_registeration_username_password_one_pool_id', - 'run_command.calls': [ + "id": "test_registeration_username_password_one_pool_id", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), ( [ - 'subscription-manager list --available', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, - ''' + "subscription-manager list --available", + {"check_rc": True, "environ_update": {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"}}, + ( + 0, + """ +-------------------------------------------+ Available Subscriptions +-------------------------------------------+ @@ -695,41 +699,46 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs Starts: 06/25/19 Ends: 06/24/20 Entitlement Type: Physical -''', '') +""", + "", + ), ] ), ( [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99632804a6', + "/testbin/subscription-manager", + "attach", + "--pool", + "ff8080816b8e967f016b8e99632804a6", ], - {'check_rc': True}, - (0, '', '') - ) + {"check_rc": True}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test attaching different set of pool IDs [ { - 'state': 'present', - 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}] + "state": "present", + "pool_ids": [{"ff8080816b8e967f016b8e99632804a6": 2}, {"ff8080816b8e967f016b8e99747107e9": 4}], }, { - 'id': 'test_attaching_different_pool_ids', - 'run_command.calls': [ + "id": "test_attaching_different_pool_ids", + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', ''), + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), ( - 'subscription-manager list --consumed', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, ''' + "subscription-manager list --consumed", + {"check_rc": True, "environ_update": {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"}}, + ( + 0, + """ +-------------------------------------------+ Consumed Subscriptions +-------------------------------------------+ @@ -753,23 +762,26 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs Starts: 06/25/19 Ends: 06/24/20 Entitlement Type: Physical -''', '') +""", + "", + ), ), ( [ - '/testbin/subscription-manager', - 'remove', - '--serial=7807912223970164816', + "/testbin/subscription-manager", + "remove", + "--serial=7807912223970164816", ], - {'check_rc': True}, - (0, '', '') + {"check_rc": True}, + (0, "", ""), ), ( [ - 'subscription-manager list --available', - {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}}, - (0, - ''' + "subscription-manager list --available", + {"check_rc": True, "environ_update": {"LANG": "C", "LC_ALL": "C", "LC_MESSAGES": "C"}}, + ( + 0, + """ +-------------------------------------------+ Available Subscriptions +-------------------------------------------+ @@ -826,52 +838,57 @@ def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subs Starts: 11.7.2019 Ends: 10.7.2020 Entitlement Type: Physical -''', '') +""", + "", + ), ] ), ( [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99632804a6', - '--quantity', '2' + "/testbin/subscription-manager", + "attach", + "--pool", + "ff8080816b8e967f016b8e99632804a6", + "--quantity", + "2", ], - {'check_rc': True}, - (0, '', '') + {"check_rc": True}, + (0, "", ""), ), ( [ - '/testbin/subscription-manager', - 'attach', - '--pool', 'ff8080816b8e967f016b8e99747107e9', - '--quantity', '4' + "/testbin/subscription-manager", + "attach", + "--pool", + "ff8080816b8e967f016b8e99747107e9", + "--quantity", + "4", ], - {'check_rc': True}, - (0, '', '') - ) + {"check_rc": True}, + (0, "", ""), + ), ], - 'changed': True, - } - ] + "changed": True, + }, + ], ] -TEST_CASES_IDS: list[str] = [item[1]['id'] for item in TEST_CASES] # type: ignore +TEST_CASES_IDS: list[str] = [item[1]["id"] for item in TEST_CASES] # type: ignore -@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, ids=TEST_CASES_IDS, indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') +@pytest.mark.parametrize( + "patch_ansible_module, testcase", TEST_CASES, ids=TEST_CASES_IDS, indirect=["patch_ansible_module"] +) +@pytest.mark.usefixtures("patch_ansible_module") def test_redhat_subscription(mocker, capfd, patch_redhat_subscription, testcase): """ Run unit tests for test cases listen in TEST_CASES """ # Mock function used for running commands first - call_results = [item[2] for item in testcase['run_command.calls']] - mock_run_command = mocker.patch.object( - basic.AnsibleModule, - 'run_command', - side_effect=call_results) + call_results = [item[2] for item in testcase["run_command.calls"]] + mock_run_command = mocker.patch.object(basic.AnsibleModule, "run_command", side_effect=call_results) # Try to run test case with pytest.raises(SystemExit): @@ -880,15 +897,15 @@ def test_redhat_subscription(mocker, capfd, patch_redhat_subscription, testcase) out, err = capfd.readouterr() results = json.loads(out) - assert 'changed' in results - assert results['changed'] == testcase['changed'] - if 'msg' in results: - assert results['msg'] == testcase['msg'] + assert "changed" in results + assert results["changed"] == testcase["changed"] + if "msg" in results: + assert results["msg"] == testcase["msg"] - assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls']) + assert basic.AnsibleModule.run_command.call_count == len(testcase["run_command.calls"]) if basic.AnsibleModule.run_command.call_count: call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list] - expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']] + expected_call_args_list = [(item[0], item[1]) for item in testcase["run_command.calls"]] assert call_args_list == expected_call_args_list @@ -897,343 +914,365 @@ def test_redhat_subscription(mocker, capfd, patch_redhat_subscription, testcase) # and synchronization with candlepin server [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Production', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], - 'sync': True - } + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", + "syspurpose": { + "role": "AwesomeOS", + "usage": "Production", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], + "sync": True, + }, }, { - 'id': 'test_setting_syspurpose_attributes', - 'existing_syspurpose': {}, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Production', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], + "id": "test_setting_syspurpose_attributes", + "existing_syspurpose": {}, + "expected_syspurpose": { + "role": "AwesomeOS", + "usage": "Production", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), ( - ['/testbin/subscription-manager', 'status'], - {'check_rc': False}, - (0, ''' + ["/testbin/subscription-manager", "status"], + {"check_rc": False}, + ( + 0, + """ +-------------------------------------------+ System Status Details +-------------------------------------------+ Overall Status: Current System Purpose Status: Matched -''', '') - ) +""", + "", + ), + ), ], - 'changed': True, - 'msg': 'Syspurpose attributes changed.' - } + "changed": True, + "msg": "Syspurpose attributes changed.", + }, ], # Test setting unspupported attributes [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'syspurpose': { - 'foo': 'Bar', - 'role': 'AwesomeOS', - 'usage': 'Production', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], - 'sync': True - } + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", + "syspurpose": { + "foo": "Bar", + "role": "AwesomeOS", + "usage": "Production", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], + "sync": True, + }, }, { - 'id': 'test_setting_syspurpose_wrong_attributes', - 'existing_syspurpose': {}, - 'expected_syspurpose': {}, - 'run_command.calls': [], - 'failed': True - } + "id": "test_setting_syspurpose_wrong_attributes", + "existing_syspurpose": {}, + "expected_syspurpose": {}, + "run_command.calls": [], + "failed": True, + }, ], # Test setting addons not a list [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Production', - 'service_level_agreement': 'Premium', - 'addons': 'ADDON1', - 'sync': True - } + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", + "syspurpose": { + "role": "AwesomeOS", + "usage": "Production", + "service_level_agreement": "Premium", + "addons": "ADDON1", + "sync": True, + }, }, { - 'id': 'test_setting_syspurpose_addons_not_list', - 'existing_syspurpose': {}, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Production', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1'] + "id": "test_setting_syspurpose_addons_not_list", + "existing_syspurpose": {}, + "expected_syspurpose": { + "role": "AwesomeOS", + "usage": "Production", + "service_level_agreement": "Premium", + "addons": ["ADDON1"], }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), ( - ['/testbin/subscription-manager', 'status'], - {'check_rc': False}, - (0, ''' + ["/testbin/subscription-manager", "status"], + {"check_rc": False}, + ( + 0, + """ +-------------------------------------------+ System Status Details +-------------------------------------------+ Overall Status: Current System Purpose Status: Matched -''', '') - ) +""", + "", + ), + ), ], - 'changed': True, - 'msg': 'Syspurpose attributes changed.' - } + "changed": True, + "msg": "Syspurpose attributes changed.", + }, ], # Test setting syspurpose attributes (system is already registered) # without synchronization with candlepin server. Some syspurpose attributes were set # in the past [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'syspurpose': { - 'role': 'AwesomeOS', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], - 'sync': False - } + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", + "syspurpose": { + "role": "AwesomeOS", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], + "sync": False, + }, }, { - 'id': 'test_changing_syspurpose_attributes', - 'existing_syspurpose': { - 'role': 'CoolOS', - 'usage': 'Production', - 'service_level_agreement': 'Super', - 'addons': [], - 'foo': 'bar' + "id": "test_changing_syspurpose_attributes", + "existing_syspurpose": { + "role": "CoolOS", + "usage": "Production", + "service_level_agreement": "Super", + "addons": [], + "foo": "bar", }, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], - 'foo': 'bar' + "expected_syspurpose": { + "role": "AwesomeOS", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], + "foo": "bar", }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), ], - 'changed': True, - 'msg': 'Syspurpose attributes changed.' - } + "changed": True, + "msg": "Syspurpose attributes changed.", + }, ], # Test trying to set syspurpose attributes (system is already registered) # without synchronization with candlepin server. Some syspurpose attributes were set # in the past. Syspurpose attributes are same as before [ { - 'state': 'present', - 'server_hostname': 'subscription.rhsm.redhat.com', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'syspurpose': { - 'role': 'AwesomeOS', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], - 'sync': False - } + "state": "present", + "server_hostname": "subscription.rhsm.redhat.com", + "username": "admin", + "password": "admin", + "org_id": "admin", + "syspurpose": { + "role": "AwesomeOS", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], + "sync": False, + }, }, { - 'id': 'test_not_changing_syspurpose_attributes', - 'existing_syspurpose': { - 'role': 'AwesomeOS', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], + "id": "test_not_changing_syspurpose_attributes", + "existing_syspurpose": { + "role": "AwesomeOS", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], }, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'service_level_agreement': 'Premium', - 'addons': ['ADDON1', 'ADDON2'], + "expected_syspurpose": { + "role": "AwesomeOS", + "service_level_agreement": "Premium", + "addons": ["ADDON1", "ADDON2"], }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (0, "system identity: b26df632-25ed-4452-8f89-0308bfd167cb", ""), ), ], - 'changed': False, - 'msg': 'System already registered.' - } + "changed": False, + "msg": "System already registered.", + }, ], # Test of registration using username and password with auto-attach option, when # syspurpose attributes are set [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'auto_attach': 'true', - 'syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Testing', - 'service_level_agreement': 'Super', - 'addons': ['ADDON1'], - 'sync': False + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "auto_attach": "true", + "syspurpose": { + "role": "AwesomeOS", + "usage": "Testing", + "service_level_agreement": "Super", + "addons": ["ADDON1"], + "sync": False, }, }, { - 'id': 'test_registeration_username_password_auto_attach_syspurpose', - 'existing_syspurpose': None, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Testing', - 'service_level_agreement': 'Super', - 'addons': ['ADDON1'], + "id": "test_registeration_username_password_auto_attach_syspurpose", + "existing_syspurpose": None, + "expected_syspurpose": { + "role": "AwesomeOS", + "usage": "Testing", + "service_level_agreement": "Super", + "addons": ["ADDON1"], }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--auto-attach', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--auto-attach", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') - ) + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], # Test of registration using username and password with auto-attach option, when # syspurpose attributes are set. Syspurpose attributes are also synchronized # in this case [ { - 'state': 'present', - 'username': 'admin', - 'password': 'admin', - 'org_id': 'admin', - 'auto_attach': 'true', - 'syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Testing', - 'service_level_agreement': 'Super', - 'addons': ['ADDON1'], - 'sync': True + "state": "present", + "username": "admin", + "password": "admin", + "org_id": "admin", + "auto_attach": "true", + "syspurpose": { + "role": "AwesomeOS", + "usage": "Testing", + "service_level_agreement": "Super", + "addons": ["ADDON1"], + "sync": True, }, }, { - 'id': 'test_registeration_username_password_auto_attach_syspurpose_sync', - 'existing_syspurpose': None, - 'expected_syspurpose': { - 'role': 'AwesomeOS', - 'usage': 'Testing', - 'service_level_agreement': 'Super', - 'addons': ['ADDON1'], + "id": "test_registeration_username_password_auto_attach_syspurpose_sync", + "existing_syspurpose": None, + "expected_syspurpose": { + "role": "AwesomeOS", + "usage": "Testing", + "service_level_agreement": "Super", + "addons": ["ADDON1"], }, - 'run_command.calls': [ + "run_command.calls": [ ( - ['/testbin/subscription-manager', 'identity'], - {'check_rc': False}, - (1, 'This system is not yet registered.', '') + ["/testbin/subscription-manager", "identity"], + {"check_rc": False}, + (1, "This system is not yet registered.", ""), ), ( [ - '/testbin/subscription-manager', - 'register', - '--org', 'admin', - '--auto-attach', - '--username', 'admin', - '--password', 'admin' + "/testbin/subscription-manager", + "register", + "--org", + "admin", + "--auto-attach", + "--username", + "admin", + "--password", + "admin", ], - {'check_rc': True, 'expand_user_and_vars': False}, - (0, '', '') + {"check_rc": True, "expand_user_and_vars": False}, + (0, "", ""), ), ( - ['/testbin/subscription-manager', 'status'], - {'check_rc': False}, - (0, ''' + ["/testbin/subscription-manager", "status"], + {"check_rc": False}, + ( + 0, + """ +-------------------------------------------+ System Status Details +-------------------------------------------+ Overall Status: Current System Purpose Status: Matched -''', '') - ) +""", + "", + ), + ), ], - 'changed': True, - 'msg': "System successfully registered to 'None'." - } + "changed": True, + "msg": "System successfully registered to 'None'.", + }, ], ] -SYSPURPOSE_TEST_CASES_IDS: list[str] = [item[1]['id'] for item in SYSPURPOSE_TEST_CASES] # type: ignore +SYSPURPOSE_TEST_CASES_IDS: list[str] = [item[1]["id"] for item in SYSPURPOSE_TEST_CASES] # type: ignore -@pytest.mark.parametrize('patch_ansible_module, testcase', SYSPURPOSE_TEST_CASES, ids=SYSPURPOSE_TEST_CASES_IDS, indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') -def test_redhat_subscription_syspurpose(mocker, capfd, patch_redhat_subscription, patch_ansible_module, testcase, tmpdir): +@pytest.mark.parametrize( + "patch_ansible_module, testcase", + SYSPURPOSE_TEST_CASES, + ids=SYSPURPOSE_TEST_CASES_IDS, + indirect=["patch_ansible_module"], +) +@pytest.mark.usefixtures("patch_ansible_module") +def test_redhat_subscription_syspurpose( + mocker, capfd, patch_redhat_subscription, patch_ansible_module, testcase, tmpdir +): """ Run unit tests for test cases listen in SYSPURPOSE_TEST_CASES (syspurpose specific cases) """ # Mock function used for running commands first - call_results = [item[2] for item in testcase['run_command.calls']] - mock_run_command = mocker.patch.object( - basic.AnsibleModule, - 'run_command', - side_effect=call_results) + call_results = [item[2] for item in testcase["run_command.calls"]] + mock_run_command = mocker.patch.object(basic.AnsibleModule, "run_command", side_effect=call_results) mock_syspurpose_file = tmpdir.mkdir("syspurpose").join("syspurpose.json") # When there there are some existing syspurpose attributes specified, then # write them to the file first - if testcase['existing_syspurpose'] is not None: - mock_syspurpose_file.write(json.dumps(testcase['existing_syspurpose'])) + if testcase["existing_syspurpose"] is not None: + mock_syspurpose_file.write(json.dumps(testcase["existing_syspurpose"])) else: mock_syspurpose_file.write("{}") @@ -1246,20 +1285,20 @@ def test_redhat_subscription_syspurpose(mocker, capfd, patch_redhat_subscription out, err = capfd.readouterr() results = json.loads(out) - if 'failed' in testcase: - assert results['failed'] == testcase['failed'] + if "failed" in testcase: + assert results["failed"] == testcase["failed"] else: - assert 'changed' in results - assert results['changed'] == testcase['changed'] - if 'msg' in results: - assert results['msg'] == testcase['msg'] + assert "changed" in results + assert results["changed"] == testcase["changed"] + if "msg" in results: + assert results["msg"] == testcase["msg"] mock_file_content = mock_syspurpose_file.read_text("utf-8") current_syspurpose = json.loads(mock_file_content) - assert current_syspurpose == testcase['expected_syspurpose'] + assert current_syspurpose == testcase["expected_syspurpose"] - assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls']) + assert basic.AnsibleModule.run_command.call_count == len(testcase["run_command.calls"]) if basic.AnsibleModule.run_command.call_count: call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list] - expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']] + expected_call_args_list = [(item[0], item[1]) for item in testcase["run_command.calls"]] assert call_args_list == expected_call_args_list diff --git a/tests/unit/plugins/modules/test_redis_data.py b/tests/unit/plugins/modules/test_redis_data.py index d4b902bae29..0e960202481 100644 --- a/tests/unit/plugins/modules/test_redis_data.py +++ b/tests/unit/plugins/modules/test_redis_data.py @@ -14,7 +14,7 @@ from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args HAS_REDIS_USERNAME_OPTION = True -if tuple(map(int, __version__.split('.'))) < (3, 4, 0): +if tuple(map(int, __version__.split("."))) < (3, 4, 0): HAS_REDIS_USERNAME_OPTION = False @@ -24,253 +24,290 @@ def test_redis_data_without_arguments(capfd): redis_data.main() out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_key(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=True) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "value": "baz", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") + mocker.patch("redis.Redis.set", return_value=True) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['old_value'] == 'bar' - assert json.loads(out)['value'] == 'baz' - assert json.loads(out)['msg'] == 'Set key: foo' - assert json.loads(out)['changed'] is True + assert json.loads(out)["old_value"] == "bar" + assert json.loads(out)["value"] == "baz" + assert json.loads(out)["msg"] == "Set key: foo" + assert json.loads(out)["changed"] is True @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_existing_key_nx(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - 'non_existing': True, - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=None) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "value": "baz", + "non_existing": True, + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") + mocker.patch("redis.Redis.set", return_value=None) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['old_value'] == 'bar' - assert 'value' not in json.loads(out) - assert json.loads( - out)['msg'] == 'Could not set key: foo. Key already present.' - assert json.loads(out)['changed'] is False - assert json.loads(out)['failed'] is True + assert json.loads(out)["old_value"] == "bar" + assert "value" not in json.loads(out) + assert json.loads(out)["msg"] == "Could not set key: foo. Key already present." + assert json.loads(out)["changed"] is False + assert json.loads(out)["failed"] is True @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_non_existing_key_xx(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - 'existing': True, - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value=None) - mocker.patch('redis.Redis.set', return_value=None) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "value": "baz", + "existing": True, + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value=None) + mocker.patch("redis.Redis.set", return_value=None) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['old_value'] is None - assert 'value' not in json.loads(out) - assert json.loads( - out)['msg'] == 'Could not set key: foo. Key not present.' - assert json.loads(out)['changed'] is False - assert json.loads(out)['failed'] is True + assert json.loads(out)["old_value"] is None + assert "value" not in json.loads(out) + assert json.loads(out)["msg"] == "Could not set key: foo. Key not present." + assert json.loads(out)["changed"] is False + assert json.loads(out)["failed"] is True @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_delete_present_key(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent'}): - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.delete', return_value=1) + with set_module_args( + {"login_host": "localhost", "login_user": "root", "login_password": "secret", "key": "foo", "state": "absent"} + ): + mocker.patch("redis.Redis.get", return_value="bar") + mocker.patch("redis.Redis.delete", return_value=1) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Deleted key: foo' - assert json.loads(out)['changed'] is True + assert json.loads(out)["msg"] == "Deleted key: foo" + assert json.loads(out)["changed"] is True @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_delete_absent_key(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent'}): - mocker.patch('redis.Redis.delete', return_value=0) - mocker.patch('redis.Redis.get', return_value=None) + with set_module_args( + {"login_host": "localhost", "login_user": "root", "login_password": "secret", "key": "foo", "state": "absent"} + ): + mocker.patch("redis.Redis.delete", return_value=0) + mocker.patch("redis.Redis.get", return_value=None) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Key: foo not present' - assert json.loads(out)['changed'] is False + assert json.loads(out)["msg"] == "Key: foo not present" + assert json.loads(out)["changed"] is False @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_fail_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}): + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "value": "baz", + "_ansible_check_mode": False, + } + ): with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] - assert json.loads( - out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.' + assert json.loads(out)["failed"] + assert json.loads(out)["msg"] == "The option `username` in only supported with redis >= 3.4.0." def test_redis_data_key_no_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'value': 'baz', - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.set', return_value=True) + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "value": "baz", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") + mocker.patch("redis.Redis.set", return_value=True) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['old_value'] == 'bar' - assert json.loads(out)['value'] == 'baz' - assert json.loads(out)['msg'] == 'Set key: foo' - assert json.loads(out)['changed'] is True + assert json.loads(out)["old_value"] == "bar" + assert json.loads(out)["value"] == "baz" + assert json.loads(out)["msg"] == "Set key: foo" + assert json.loads(out)["changed"] is True def test_redis_delete_key_no_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent', - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value='bar') - mocker.patch('redis.Redis.delete', return_value=1) + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "absent", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") + mocker.patch("redis.Redis.delete", return_value=1) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Deleted key: foo' - assert json.loads(out)['changed'] is True + assert json.loads(out)["msg"] == "Deleted key: foo" + assert json.loads(out)["changed"] is True def test_redis_delete_key_non_existent_key(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'absent', - '_ansible_check_mode': False}): - mocker.patch('redis.Redis.get', return_value=None) - mocker.patch('redis.Redis.delete', return_value=0) + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "absent", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value=None) + mocker.patch("redis.Redis.delete", return_value=0) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Key: foo not present' - assert json.loads(out)['changed'] is False + assert json.loads(out)["msg"] == "Key: foo not present" + assert json.loads(out)["changed"] is False def test_redis_set_key_check_mode_nochange(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'bar', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "present", + "value": "bar", + "_ansible_check_mode": True, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Key foo already has desired value' - assert json.loads(out)['value'] == 'bar' - assert not json.loads(out)['changed'] - assert json.loads(out)['old_value'] == 'bar' + assert json.loads(out)["msg"] == "Key foo already has desired value" + assert json.loads(out)["value"] == "bar" + assert not json.loads(out)["changed"] + assert json.loads(out)["old_value"] == "bar" def test_redis_set_key_check_mode_delete_nx(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value=None) + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "present", + "value": "baz", + "_ansible_check_mode": True, + } + ): + mocker.patch("redis.Redis.get", return_value=None) with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Set key: foo' - assert json.loads(out)['value'] == 'baz' - assert json.loads(out)['old_value'] is None + assert json.loads(out)["msg"] == "Set key: foo" + assert json.loads(out)["value"] == "baz" + assert json.loads(out)["old_value"] is None def test_redis_set_key_check_mode_delete(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "present", + "value": "baz", + "_ansible_check_mode": True, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Set key: foo' - assert json.loads(out)['value'] == 'baz' - assert json.loads(out)['old_value'] == 'bar' + assert json.loads(out)["msg"] == "Set key: foo" + assert json.loads(out)["value"] == "baz" + assert json.loads(out)["old_value"] == "bar" def test_redis_set_key_check_mode(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'state': 'present', - 'value': 'baz', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + "state": "present", + "value": "baz", + "_ansible_check_mode": True, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['msg'] == 'Set key: foo' - assert json.loads(out)['value'] == 'baz' - assert json.loads(out)['old_value'] == 'bar' + assert json.loads(out)["msg"] == "Set key: foo" + assert json.loads(out)["value"] == "baz" + assert json.loads(out)["old_value"] == "bar" diff --git a/tests/unit/plugins/modules/test_redis_data_incr.py b/tests/unit/plugins/modules/test_redis_data_incr.py index 620042d18da..8e80e914591 100644 --- a/tests/unit/plugins/modules/test_redis_data_incr.py +++ b/tests/unit/plugins/modules/test_redis_data_incr.py @@ -16,7 +16,7 @@ HAS_REDIS_USERNAME_OPTION = True -if tuple(map(int, __version__.split('.'))) < (3, 4, 0): +if tuple(map(int, __version__.split("."))) < (3, 4, 0): HAS_REDIS_USERNAME_OPTION = False if HAS_REDIS_USERNAME_OPTION: from redis.exceptions import NoPermissionError @@ -28,181 +28,186 @@ def test_redis_data_incr_without_arguments(capfd): redis_data_incr.main() out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', }): - mocker.patch('redis.Redis.incr', return_value=57) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + } + ): + mocker.patch("redis.Redis.incr", return_value=57) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 57.0 - assert json.loads( - out)['msg'] == 'Incremented key: foo to 57' - assert json.loads(out)['changed'] + assert json.loads(out)["value"] == 57.0 + assert json.loads(out)["msg"] == "Incremented key: foo to 57" + assert json.loads(out)["changed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_int(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_int': 10}): - mocker.patch('redis.Redis.incrby', return_value=57) + with set_module_args( + {"login_host": "localhost", "login_user": "root", "login_password": "secret", "key": "foo", "increment_int": 10} + ): + mocker.patch("redis.Redis.incrby", return_value=57) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 57.0 - assert json.loads( - out)['msg'] == 'Incremented key: foo by 10 to 57' - assert json.loads(out)['changed'] + assert json.loads(out)["value"] == 57.0 + assert json.loads(out)["msg"] == "Incremented key: foo by 10 to 57" + assert json.loads(out)["changed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_inc_float(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': '5.5'}): - mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "increment_float": "5.5", + } + ): + mocker.patch("redis.Redis.incrbyfloat", return_value=57.45) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 57.45 - assert json.loads( - out)['msg'] == 'Incremented key: foo by 5.5 to 57.45' - assert json.loads(out)['changed'] + assert json.loads(out)["value"] == 57.45 + assert json.loads(out)["msg"] == "Incremented key: foo by 5.5 to 57.45" + assert json.loads(out)["changed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_float_wrong_value(capfd): - with set_module_args({ - 'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': 'not_a_number' - }): + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "increment_float": "not_a_number", + } + ): with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_incr_fail_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False}): + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "_ansible_check_mode": False, + } + ): with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] - assert json.loads( - out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.' + assert json.loads(out)["failed"] + assert json.loads(out)["msg"] == "The option `username` in only supported with redis >= 3.4.0." def test_redis_data_incr_no_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', }): - mocker.patch('redis.Redis.incr', return_value=57) + with set_module_args( + { + "login_host": "localhost", + "login_password": "secret", + "key": "foo", + } + ): + mocker.patch("redis.Redis.incr", return_value=57) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 57.0 - assert json.loads( - out)['msg'] == 'Incremented key: foo to 57' - assert json.loads(out)['changed'] + assert json.loads(out)["value"] == 57.0 + assert json.loads(out)["msg"] == "Incremented key: foo to 57" + assert json.loads(out)["changed"] def test_redis_data_incr_float_no_username(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - 'increment_float': '5.5'}): - mocker.patch('redis.Redis.incrbyfloat', return_value=57.45) + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "increment_float": "5.5"} + ): + mocker.patch("redis.Redis.incrbyfloat", return_value=57.45) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 57.45 - assert json.loads( - out)['msg'] == 'Incremented key: foo by 5.5 to 57.45' - assert json.loads(out)['changed'] + assert json.loads(out)["value"] == 57.45 + assert json.loads(out)["msg"] == "Incremented key: foo by 5.5 to 57.45" + assert json.loads(out)["changed"] def test_redis_data_incr_check_mode(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value=10) + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "_ansible_check_mode": True} + ): + mocker.patch("redis.Redis.get", return_value=10) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['value'] == 11.0 - assert json.loads(out)['msg'] == 'Incremented key: foo by 1 to 11.0' - assert not json.loads(out)['changed'] + assert json.loads(out)["value"] == 11.0 + assert json.loads(out)["msg"] == "Incremented key: foo by 1 to 11.0" + assert not json.loads(out)["changed"] def test_redis_data_incr_check_mode_not_incrementable(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "_ansible_check_mode": True} + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] - assert json.loads(out)[ - 'msg'] == "Value: bar of key: foo is not incrementable(int or float)" - assert 'value' not in json.loads(out) - assert not json.loads(out)['changed'] + assert json.loads(out)["failed"] + assert json.loads(out)["msg"] == "Value: bar of key: foo is not incrementable(int or float)" + assert "value" not in json.loads(out) + assert not json.loads(out)["changed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_incr_check_mode_permissions(capfd, mocker): - with set_module_args({'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': True}): - redis.Redis.get = mocker.Mock(side_effect=NoPermissionError( - "this user has no permissions to run the 'get' command or its subcommand")) + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "_ansible_check_mode": True} + ): + redis.Redis.get = mocker.Mock( + side_effect=NoPermissionError("this user has no permissions to run the 'get' command or its subcommand") + ) with pytest.raises(SystemExit): redis_data_incr.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] - assert json.loads(out)['msg'].startswith( - 'Failed to get value of key: foo with exception:') - assert 'value' not in json.loads(out) - assert not json.loads(out)['changed'] + assert json.loads(out)["failed"] + assert json.loads(out)["msg"].startswith("Failed to get value of key: foo with exception:") + assert "value" not in json.loads(out) + assert not json.loads(out)["changed"] diff --git a/tests/unit/plugins/modules/test_redis_data_info.py b/tests/unit/plugins/modules/test_redis_data_info.py index 218b594e581..06f2831b424 100644 --- a/tests/unit/plugins/modules/test_redis_data_info.py +++ b/tests/unit/plugins/modules/test_redis_data_info.py @@ -10,13 +10,12 @@ import json from redis import __version__ -from ansible_collections.community.general.plugins.modules import ( - redis_data_info) +from ansible_collections.community.general.plugins.modules import redis_data_info from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args HAS_REDIS_USERNAME_OPTION = True -if tuple(map(int, __version__.split('.'))) < (3, 4, 0): +if tuple(map(int, __version__.split("."))) < (3, 4, 0): HAS_REDIS_USERNAME_OPTION = False @@ -26,97 +25,96 @@ def test_redis_data_info_without_arguments(capfd): redis_data_info.main() out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_info_existing_key(capfd, mocker): - with set_module_args({ - 'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False - }): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['exists'] - assert json.loads(out)['value'] == 'bar' + assert json.loads(out)["exists"] + assert json.loads(out)["value"] == "bar" @pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") def test_redis_data_info_absent_key(capfd, mocker): - with set_module_args({ - 'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False - }): - mocker.patch('redis.Redis.get', return_value=None) + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "_ansible_check_mode": False, + } + ): + mocker.patch("redis.Redis.get", return_value=None) with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err - assert not json.loads(out)['exists'] - assert 'value' not in json.loads(out) + assert not json.loads(out)["exists"] + assert "value" not in json.loads(out) @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_fail_username(capfd, mocker): - with set_module_args({ - 'login_host': 'localhost', - 'login_user': 'root', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False - }): + with set_module_args( + { + "login_host": "localhost", + "login_user": "root", + "login_password": "secret", + "key": "foo", + "_ansible_check_mode": False, + } + ): with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['failed'] - assert json.loads( - out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.' + assert json.loads(out)["failed"] + assert json.loads(out)["msg"] == "The option `username` in only supported with redis >= 3.4.0." @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_info_absent_key_no_username(capfd, mocker): - with set_module_args({ - 'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False - }): - mocker.patch('redis.Redis.get', return_value=None) + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "_ansible_check_mode": False} + ): + mocker.patch("redis.Redis.get", return_value=None) with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err - assert not json.loads(out)['exists'] - assert 'value' not in json.loads(out) + assert not json.loads(out)["exists"] + assert "value" not in json.loads(out) @pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") def test_redis_data_info_existing_key_no_username(capfd, mocker): - with set_module_args({ - 'login_host': 'localhost', - 'login_password': 'secret', - 'key': 'foo', - '_ansible_check_mode': False - }): - mocker.patch('redis.Redis.get', return_value='bar') + with set_module_args( + {"login_host": "localhost", "login_password": "secret", "key": "foo", "_ansible_check_mode": False} + ): + mocker.patch("redis.Redis.get", return_value="bar") with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err - assert json.loads(out)['exists'] - assert json.loads(out)['value'] == 'bar' + assert json.loads(out)["exists"] + assert json.loads(out)["value"] == "bar" diff --git a/tests/unit/plugins/modules/test_redis_info.py b/tests/unit/plugins/modules/test_redis_info.py index f6e19cd9a14..02841ccc74d 100644 --- a/tests/unit/plugins/modules/test_redis_info.py +++ b/tests/unit/plugins/modules/test_redis_info.py @@ -1,4 +1,3 @@ - # Copyright (c) 2020, Pavlo Bashynskyi (@levonet) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,29 +6,31 @@ from unittest.mock import patch, MagicMock from ansible_collections.community.general.plugins.modules import redis_info -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class FakeRedisClient(MagicMock): - def ping(self): pass def info(self): - return {'redis_version': '999.999.999'} + return {"redis_version": "999.999.999"} class FakeRedisClientFail(MagicMock): - def ping(self): - raise Exception('Test Error') + raise Exception("Test Error") def info(self): pass class TestRedisInfoModule(ModuleTestCase): - def setUp(self): super().setUp() redis_info.HAS_REDIS_PACKAGE = True @@ -39,7 +40,9 @@ def tearDown(self): super().tearDown() def patch_redis_client(self, **kwds): - return patch('ansible_collections.community.general.plugins.modules.redis_info.redis_client', autospec=True, **kwds) + return patch( + "ansible_collections.community.general.plugins.modules.redis_info.redis_client", autospec=True, **kwds + ) def test_without_parameters(self): """Test without parameters""" @@ -48,62 +51,81 @@ def test_without_parameters(self): with set_module_args({}): self.module.main() self.assertEqual(redis_client.call_count, 1) - self.assertEqual(redis_client.call_args, ({'host': 'localhost', - 'port': 6379, - 'password': None, - 'ssl': False, - 'ssl_ca_certs': None, - 'ssl_certfile': None, - 'ssl_keyfile': None, - 'ssl_cert_reqs': 'required'},)) - self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') + self.assertEqual( + redis_client.call_args, + ( + { + "host": "localhost", + "port": 6379, + "password": None, + "ssl": False, + "ssl_ca_certs": None, + "ssl_certfile": None, + "ssl_keyfile": None, + "ssl_cert_reqs": "required", + }, + ), + ) + self.assertEqual(result.exception.args[0]["info"]["redis_version"], "999.999.999") def test_with_parameters(self): """Test with all parameters""" with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'login_host': 'test', - 'login_port': 1234, - 'login_password': 'PASS' - }): + with set_module_args({"login_host": "test", "login_port": 1234, "login_password": "PASS"}): self.module.main() self.assertEqual(redis_client.call_count, 1) - self.assertEqual(redis_client.call_args, ({'host': 'test', - 'port': 1234, - 'password': 'PASS', - 'ssl': False, - 'ssl_ca_certs': None, - 'ssl_certfile': None, - 'ssl_keyfile': None, - 'ssl_cert_reqs': 'required'},)) - self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') + self.assertEqual( + redis_client.call_args, + ( + { + "host": "test", + "port": 1234, + "password": "PASS", + "ssl": False, + "ssl_ca_certs": None, + "ssl_certfile": None, + "ssl_keyfile": None, + "ssl_cert_reqs": "required", + }, + ), + ) + self.assertEqual(result.exception.args[0]["info"]["redis_version"], "999.999.999") def test_with_tls_parameters(self): """Test with tls parameters""" with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'login_host': 'test', - 'login_port': 1234, - 'login_password': 'PASS', - 'tls': True, - 'ca_certs': '/etc/ssl/ca.pem', - 'client_cert_file': '/etc/ssl/client.pem', - 'client_key_file': '/etc/ssl/client.key', - 'validate_certs': False - }): + with set_module_args( + { + "login_host": "test", + "login_port": 1234, + "login_password": "PASS", + "tls": True, + "ca_certs": "/etc/ssl/ca.pem", + "client_cert_file": "/etc/ssl/client.pem", + "client_key_file": "/etc/ssl/client.key", + "validate_certs": False, + } + ): self.module.main() self.assertEqual(redis_client.call_count, 1) - self.assertEqual(redis_client.call_args, ({'host': 'test', - 'port': 1234, - 'password': 'PASS', - 'ssl': True, - 'ssl_ca_certs': '/etc/ssl/ca.pem', - 'ssl_certfile': '/etc/ssl/client.pem', - 'ssl_keyfile': '/etc/ssl/client.key', - 'ssl_cert_reqs': None},)) - self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999') + self.assertEqual( + redis_client.call_args, + ( + { + "host": "test", + "port": 1234, + "password": "PASS", + "ssl": True, + "ssl_ca_certs": "/etc/ssl/ca.pem", + "ssl_certfile": "/etc/ssl/client.pem", + "ssl_keyfile": "/etc/ssl/client.key", + "ssl_cert_reqs": None, + }, + ), + ) + self.assertEqual(result.exception.args[0]["info"]["redis_version"], "999.999.999") def test_with_fail_client(self): """Test failure message""" @@ -112,4 +134,4 @@ def test_with_fail_client(self): with set_module_args({}): self.module.main() self.assertEqual(redis_client.call_count, 1) - self.assertEqual(result.exception.args[0]['msg'], 'unable to connect to database: Test Error') + self.assertEqual(result.exception.args[0]["msg"], "unable to connect to database: Test Error") diff --git a/tests/unit/plugins/modules/test_rhsm_release.py b/tests/unit/plugins/modules/test_rhsm_release.py index 9241b0256a4..65237f6d62f 100644 --- a/tests/unit/plugins/modules/test_rhsm_release.py +++ b/tests/unit/plugins/modules/test_rhsm_release.py @@ -7,7 +7,11 @@ from unittest.mock import call, patch from ansible_collections.community.general.plugins.modules import rhsm_release from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args) + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): @@ -21,19 +25,20 @@ def setUp(self): # Mainly interested that the subscription-manager calls are right # based on the module args, so patch out run_command in the module. # returns (rc, out, err) structure - self.mock_run_command = patch('ansible_collections.community.general.plugins.modules.rhsm_release.' - 'AnsibleModule.run_command') + self.mock_run_command = patch( + "ansible_collections.community.general.plugins.modules.rhsm_release.AnsibleModule.run_command" + ) self.module_main_command = self.mock_run_command.start() # Module does a get_bin_path check before every run_command call - self.mock_get_bin_path = patch('ansible_collections.community.general.plugins.modules.rhsm_release.' - 'AnsibleModule.get_bin_path') + self.mock_get_bin_path = patch( + "ansible_collections.community.general.plugins.modules.rhsm_release.AnsibleModule.get_bin_path" + ) self.get_bin_path = self.mock_get_bin_path.start() - self.get_bin_path.return_value = '/testbin/subscription-manager' + self.get_bin_path.return_value = "/testbin/subscription-manager" # subscription-manager needs to be run as root - self.mock_os_getuid = patch('ansible_collections.community.general.plugins.modules.rhsm_release.' - 'os.getuid') + self.mock_os_getuid = patch("ansible_collections.community.general.plugins.modules.rhsm_release.os.getuid") self.os_getuid = self.mock_os_getuid.start() self.os_getuid.return_value = 0 @@ -51,97 +56,105 @@ def module_main(self, exit_exc): def test_release_set(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - with set_module_args({'release': '7.5'}): + with set_module_args({"release": "7.5"}): self.module_main_command.side_effect = [ # first call, get_release: returns different version so set_release is called - (0, '7.4', ''), + (0, "7.4", ""), # second call, set_release: just needs to exit with 0 rc - (0, '', ''), + (0, "", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.assertEqual('7.5', result['current_release']) - self.module_main_command.assert_has_calls([ - call(['/testbin/subscription-manager', 'release', '--show'], **self.SUBMAN_KWARGS), - call(['/testbin/subscription-manager', 'release', '--set', '7.5'], **self.SUBMAN_KWARGS), - ]) + self.assertTrue(result["changed"]) + self.assertEqual("7.5", result["current_release"]) + self.module_main_command.assert_has_calls( + [ + call(["/testbin/subscription-manager", "release", "--show"], **self.SUBMAN_KWARGS), + call(["/testbin/subscription-manager", "release", "--set", "7.5"], **self.SUBMAN_KWARGS), + ] + ) def test_release_set_idempotent(self): # test that the module does not attempt to change the release when # the current release matches the user-specified target release - with set_module_args({'release': '7.5'}): + with set_module_args({"release": "7.5"}): self.module_main_command.side_effect = [ # first call, get_release: returns same version, set_release is not called - (0, '7.5', ''), + (0, "7.5", ""), ] result = self.module_main(AnsibleExitJson) - self.assertFalse(result['changed']) - self.assertEqual('7.5', result['current_release']) - self.module_main_command.assert_has_calls([ - call(['/testbin/subscription-manager', 'release', '--show'], **self.SUBMAN_KWARGS), - ]) + self.assertFalse(result["changed"]) + self.assertEqual("7.5", result["current_release"]) + self.module_main_command.assert_has_calls( + [ + call(["/testbin/subscription-manager", "release", "--show"], **self.SUBMAN_KWARGS), + ] + ) def test_release_unset(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - with set_module_args({'release': None}): + with set_module_args({"release": None}): self.module_main_command.side_effect = [ # first call, get_release: returns version so set_release is called - (0, '7.5', ''), + (0, "7.5", ""), # second call, set_release: just needs to exit with 0 rc - (0, '', ''), + (0, "", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.assertIsNone(result['current_release']) - self.module_main_command.assert_has_calls([ - call(['/testbin/subscription-manager', 'release', '--show'], **self.SUBMAN_KWARGS), - call(['/testbin/subscription-manager', 'release', '--unset'], **self.SUBMAN_KWARGS), - ]) + self.assertTrue(result["changed"]) + self.assertIsNone(result["current_release"]) + self.module_main_command.assert_has_calls( + [ + call(["/testbin/subscription-manager", "release", "--show"], **self.SUBMAN_KWARGS), + call(["/testbin/subscription-manager", "release", "--unset"], **self.SUBMAN_KWARGS), + ] + ) def test_release_unset_idempotent(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release - with set_module_args({'release': None}): + with set_module_args({"release": None}): self.module_main_command.side_effect = [ # first call, get_release: returns no version, set_release is not called - (0, 'Release not set', ''), + (0, "Release not set", ""), ] result = self.module_main(AnsibleExitJson) - self.assertFalse(result['changed']) - self.assertIsNone(result['current_release']) - self.module_main_command.assert_has_calls([ - call(['/testbin/subscription-manager', 'release', '--show'], **self.SUBMAN_KWARGS), - ]) + self.assertFalse(result["changed"]) + self.assertIsNone(result["current_release"]) + self.module_main_command.assert_has_calls( + [ + call(["/testbin/subscription-manager", "release", "--show"], **self.SUBMAN_KWARGS), + ] + ) def test_release_insane(self): # test that insane values for release trigger fail_json - insane_value = 'this is an insane release value' - with set_module_args({'release': insane_value}): + insane_value = "this is an insane release value" + with set_module_args({"release": insane_value}): result = self.module_main(AnsibleFailJson) # also ensure that the fail msg includes the insane value - self.assertIn(insane_value, result['msg']) + self.assertIn(insane_value, result["msg"]) def test_release_matcher(self): # throw a few values at the release matcher -- only sane_values should match - sane_values = ['1Server', '1Client', '10Server', '1.10', '10.0', '9'] + sane_values = ["1Server", "1Client", "10Server", "1.10", "10.0", "9"] insane_values = [ - '6server', # lowercase 's' - '100Server', # excessively long 'x' component - '100.100', # excessively long 'x' and 'y' components - '+.-', # illegal characters + "6server", # lowercase 's' + "100Server", # excessively long 'x' component + "100.100", # excessively long 'x' and 'y' components + "+.-", # illegal characters ] - matches = self.module.release_matcher.findall(' '.join(sane_values + insane_values)) + matches = self.module.release_matcher.findall(" ".join(sane_values + insane_values)) # matches should be returned in the same order they were parsed, # so sorting shouldn't be necessary here diff --git a/tests/unit/plugins/modules/test_rhsm_repository.py b/tests/unit/plugins/modules/test_rhsm_repository.py index 3dc0bc10d51..fa4c263855f 100644 --- a/tests/unit/plugins/modules/test_rhsm_repository.py +++ b/tests/unit/plugins/modules/test_rhsm_repository.py @@ -28,10 +28,11 @@ def patch_rhsm_repository(mocker): """ Function used for mocking some parts of rhsm_repository module """ - mocker.patch('ansible_collections.community.general.plugins.modules.rhsm_repository.AnsibleModule.get_bin_path', - return_value='/testbin/subscription-manager') - mocker.patch('ansible_collections.community.general.plugins.modules.rhsm_repository.os.getuid', - return_value=0) + mocker.patch( + "ansible_collections.community.general.plugins.modules.rhsm_repository.AnsibleModule.get_bin_path", + return_value="/testbin/subscription-manager", + ) + mocker.patch("ansible_collections.community.general.plugins.modules.rhsm_repository.os.getuid", return_value=0) class Repos: @@ -79,8 +80,8 @@ def copy(self): def _set_status(self, repo_id, status): for repo in self.repos: - if fnmatch.fnmatch(repo['id'], repo_id): - repo['enabled'] = status + if fnmatch.fnmatch(repo["id"], repo_id): + repo["enabled"] = status def enable(self, repo_ids): """ @@ -113,13 +114,9 @@ def disable(self, repo_ids): return self def _filter_by_status(self, filter, status): - return [ - repo['id'] - for repo in self.repos - if repo['enabled'] == status and fnmatch.fnmatch(repo['id'], filter) - ] + return [repo["id"] for repo in self.repos if repo["enabled"] == status and fnmatch.fnmatch(repo["id"], filter)] - def ids_enabled(self, filter='*'): + def ids_enabled(self, filter="*"): """ Get a list with the enabled repositories. @@ -127,7 +124,7 @@ def ids_enabled(self, filter='*'): """ return self._filter_by_status(filter, True) - def ids_disabled(self, filter='*'): + def ids_disabled(self, filter="*"): """ Get a list with the disabled repositories. @@ -244,9 +241,9 @@ def flatten(iter_of_iters): # MUST match what's in the Rhsm class in the module. SUBMAN_KWARGS = { - 'environ_update': dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'), - 'expand_user_and_vars': False, - 'use_unsafe_shell': False, + "environ_update": dict(LANG="C", LC_ALL="C", LC_MESSAGES="C"), + "expand_user_and_vars": False, + "use_unsafe_shell": False, } @@ -254,527 +251,529 @@ def flatten(iter_of_iters): # enable a disabled repository [ { - 'name': 'awesomeos-1000000000000023', + "name": "awesomeos-1000000000000023", }, { - 'id': 'test_enable_single', - 'run_command.calls': [ + "id": "test_enable_single", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-1000000000000023', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-1000000000000023", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().enable('awesomeos-1000000000000023'), - } + "changed": True, + "repositories": REPOS.copy().enable("awesomeos-1000000000000023"), + }, ], # enable an already enabled repository [ { - 'name': 'fake-content-38072', + "name": "fake-content-38072", }, { - 'id': 'test_enable_already_enabled', - 'run_command.calls': [ + "id": "test_enable_already_enabled", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ], - 'changed': False, - 'repositories': REPOS.copy(), - } + "changed": False, + "repositories": REPOS.copy(), + }, ], # enable two disabled repositories [ { - 'name': ['awesomeos-1000000000000023', 'content-label-no-gpg-32060'], + "name": ["awesomeos-1000000000000023", "content-label-no-gpg-32060"], }, { - 'id': 'test_enable_multiple', - 'run_command.calls': [ + "id": "test_enable_multiple", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-1000000000000023', - '--enable', - 'content-label-no-gpg-32060', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-1000000000000023", + "--enable", + "content-label-no-gpg-32060", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().enable('awesomeos-1000000000000023').enable('content-label-no-gpg-32060'), - } + "changed": True, + "repositories": REPOS.copy().enable("awesomeos-1000000000000023").enable("content-label-no-gpg-32060"), + }, ], # enable two repositories, one disabled and one already enabled [ { - 'name': ['awesomeos-1000000000000023', 'fake-content-38072'], + "name": ["awesomeos-1000000000000023", "fake-content-38072"], }, { - 'id': 'test_enable_multiple_mixed', - 'run_command.calls': [ + "id": "test_enable_multiple_mixed", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-1000000000000023', - '--enable', - 'fake-content-38072', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-1000000000000023", + "--enable", + "fake-content-38072", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().enable('awesomeos-1000000000000023'), - } + "changed": True, + "repositories": REPOS.copy().enable("awesomeos-1000000000000023"), + }, ], # purge everything but never-enabled-content-801 (disabled) [ { - 'name': 'never-enabled-content-801', - 'purge': True, + "name": "never-enabled-content-801", + "purge": True, }, { - 'id': 'test_purge_everything_but_one_disabled', - 'run_command.calls': [ + "id": "test_purge_everything_but_one_disabled", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'never-enabled-content-801', - ] + flatten([['--disable', i] for i in REPOS.ids_enabled() if i != 'never-enabled-content-801']), + "/testbin/subscription-manager", + "repos", + "--enable", + "never-enabled-content-801", + ] + + flatten([["--disable", i] for i in REPOS.ids_enabled() if i != "never-enabled-content-801"]), SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*').enable('never-enabled-content-801'), - } + "changed": True, + "repositories": REPOS.copy().disable("*").enable("never-enabled-content-801"), + }, ], # purge everything but awesomeos-99000 (already enabled) [ { - 'name': 'awesomeos-99000', - 'purge': True, + "name": "awesomeos-99000", + "purge": True, }, { - 'id': 'test_purge_everything_but_one_enabled', - 'run_command.calls': [ + "id": "test_purge_everything_but_one_enabled", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-99000', - '--disable', - 'content-label-27060', - '--disable', - 'awesomeos-x86_64-99000', - '--disable', - 'fake-content-38072', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-99000", + "--disable", + "content-label-27060", + "--disable", + "awesomeos-x86_64-99000", + "--disable", + "fake-content-38072", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*').enable('awesomeos-99000'), - } + "changed": True, + "repositories": REPOS.copy().disable("*").enable("awesomeos-99000"), + }, ], # enable everything, then purge everything but content-label-27060 [ { - 'name': 'content-label-27060', - 'purge': True, + "name": "content-label-27060", + "purge": True, }, { - 'id': 'test_enable_everything_purge_everything_but_one_enabled', - 'run_command.calls': [ + "id": "test_enable_everything_purge_everything_but_one_enabled", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS.copy().enable('*').to_subman_list_output(), '') + (0, REPOS.copy().enable("*").to_subman_list_output(), ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'content-label-27060', - '--disable', - 'never-enabled-content-801', - '--disable', - 'never-enabled-content-100000000000060', - '--disable', - 'awesomeos-x86_64-1000000000000023', - '--disable', - 'awesomeos-ppc64-100000000000011', - '--disable', - 'awesomeos-99000', - '--disable', - 'content-label-no-gpg-32060', - '--disable', - 'awesomeos-1000000000000023', - '--disable', - 'awesomeos-x86-100000000000020', - '--disable', - 'awesomeos-x86_64-99000', - '--disable', - 'awesomeos-s390x-99000', - '--disable', - 'awesomeos-modifier-37080', - '--disable', - 'awesomeos-i686-99000', - '--disable', - 'fake-content-38072', + "/testbin/subscription-manager", + "repos", + "--enable", + "content-label-27060", + "--disable", + "never-enabled-content-801", + "--disable", + "never-enabled-content-100000000000060", + "--disable", + "awesomeos-x86_64-1000000000000023", + "--disable", + "awesomeos-ppc64-100000000000011", + "--disable", + "awesomeos-99000", + "--disable", + "content-label-no-gpg-32060", + "--disable", + "awesomeos-1000000000000023", + "--disable", + "awesomeos-x86-100000000000020", + "--disable", + "awesomeos-x86_64-99000", + "--disable", + "awesomeos-s390x-99000", + "--disable", + "awesomeos-modifier-37080", + "--disable", + "awesomeos-i686-99000", + "--disable", + "fake-content-38072", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*').enable('content-label-27060'), - } + "changed": True, + "repositories": REPOS.copy().disable("*").enable("content-label-27060"), + }, ], # enable all awesomeos-* [ { - 'name': 'awesomeos-*', + "name": "awesomeos-*", }, { - 'id': 'test_enable_all_awesomeos_star', - 'run_command.calls': [ + "id": "test_enable_all_awesomeos_star", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-x86_64-1000000000000023', - '--enable', - 'awesomeos-ppc64-100000000000011', - '--enable', - 'awesomeos-99000', - '--enable', - 'awesomeos-1000000000000023', - '--enable', - 'awesomeos-x86-100000000000020', - '--enable', - 'awesomeos-x86_64-99000', - '--enable', - 'awesomeos-s390x-99000', - '--enable', - 'awesomeos-modifier-37080', - '--enable', - 'awesomeos-i686-99000', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-x86_64-1000000000000023", + "--enable", + "awesomeos-ppc64-100000000000011", + "--enable", + "awesomeos-99000", + "--enable", + "awesomeos-1000000000000023", + "--enable", + "awesomeos-x86-100000000000020", + "--enable", + "awesomeos-x86_64-99000", + "--enable", + "awesomeos-s390x-99000", + "--enable", + "awesomeos-modifier-37080", + "--enable", + "awesomeos-i686-99000", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().enable('awesomeos-*'), - } + "changed": True, + "repositories": REPOS.copy().enable("awesomeos-*"), + }, ], # purge everything but awesomeos-* [ { - 'name': REPOS.ids_enabled('awesomeos-*'), - 'purge': True, + "name": REPOS.ids_enabled("awesomeos-*"), + "purge": True, }, { - 'id': 'test_purge_everything_but_awesomeos_list', - 'run_command.calls': [ + "id": "test_purge_everything_but_awesomeos_list", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--enable', - 'awesomeos-99000', - '--enable', - 'awesomeos-x86_64-99000', - '--disable', - 'content-label-27060', - '--disable', - 'fake-content-38072', + "/testbin/subscription-manager", + "repos", + "--enable", + "awesomeos-99000", + "--enable", + "awesomeos-x86_64-99000", + "--disable", + "content-label-27060", + "--disable", + "fake-content-38072", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*').enable(REPOS.ids_enabled('awesomeos-*')), - } + "changed": True, + "repositories": REPOS.copy().disable("*").enable(REPOS.ids_enabled("awesomeos-*")), + }, ], # enable a repository that does not exist [ { - 'name': 'repo-that-does-not-exist', + "name": "repo-that-does-not-exist", }, { - 'id': 'test_enable_nonexisting', - 'run_command.calls': [ + "id": "test_enable_nonexisting", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ], - 'failed': True, - 'msg': 'repo-that-does-not-exist is not a valid repository ID', - } + "failed": True, + "msg": "repo-that-does-not-exist is not a valid repository ID", + }, ], # disable an enabled repository [ { - 'name': 'awesomeos-99000', - 'state': 'disabled', + "name": "awesomeos-99000", + "state": "disabled", }, { - 'id': 'test_disable_single', - 'run_command.calls': [ + "id": "test_disable_single", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - '--disable', - 'awesomeos-99000', + "/testbin/subscription-manager", + "repos", + "--disable", + "awesomeos-99000", ], SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('awesomeos-99000'), - } + "changed": True, + "repositories": REPOS.copy().disable("awesomeos-99000"), + }, ], # disable an already disabled repository [ { - 'name': 'never-enabled-content-801', - 'state': 'disabled', + "name": "never-enabled-content-801", + "state": "disabled", }, { - 'id': 'test_disable_already_disabled', - 'run_command.calls': [ + "id": "test_disable_already_disabled", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ], - 'changed': False, - 'repositories': REPOS.copy(), - } + "changed": False, + "repositories": REPOS.copy(), + }, ], # disable an already disabled repository, and purge [ { - 'name': 'never-enabled-content-801', - 'state': 'disabled', - 'purge': True, + "name": "never-enabled-content-801", + "state": "disabled", + "purge": True, }, { - 'id': 'test_disable_already_disabled_and_purge', - 'run_command.calls': [ + "id": "test_disable_already_disabled_and_purge", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - ] + flatten([['--disable', i] for i in REPOS.ids_enabled()]), + "/testbin/subscription-manager", + "repos", + ] + + flatten([["--disable", i] for i in REPOS.ids_enabled()]), SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*'), - } + "changed": True, + "repositories": REPOS.copy().disable("*"), + }, ], # disable an enabled repository, and purge [ { - 'name': 'awesomeos-99000', - 'state': 'disabled', - 'purge': True, + "name": "awesomeos-99000", + "state": "disabled", + "purge": True, }, { - 'id': 'test_disable_single_and_purge', - 'run_command.calls': [ + "id": "test_disable_single_and_purge", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ( [ - '/testbin/subscription-manager', - 'repos', - ] + flatten([['--disable', i] for i in REPOS.ids_enabled()]), + "/testbin/subscription-manager", + "repos", + ] + + flatten([["--disable", i] for i in REPOS.ids_enabled()]), SUBMAN_KWARGS, - (0, '', '') + (0, "", ""), ), ], - 'changed': True, - 'repositories': REPOS.copy().disable('*'), - } + "changed": True, + "repositories": REPOS.copy().disable("*"), + }, ], # disable a repository that does not exist [ { - 'name': 'repo-that-does-not-exist', - 'state': 'disabled', + "name": "repo-that-does-not-exist", + "state": "disabled", }, { - 'id': 'test_disable_nonexisting', - 'run_command.calls': [ + "id": "test_disable_nonexisting", + "run_command.calls": [ ( [ - '/testbin/subscription-manager', - 'repos', - '--list', + "/testbin/subscription-manager", + "repos", + "--list", ], SUBMAN_KWARGS, - (0, REPOS_LIST_OUTPUT, '') + (0, REPOS_LIST_OUTPUT, ""), ), ], - 'failed': True, - 'msg': 'repo-that-does-not-exist is not a valid repository ID', - } + "failed": True, + "msg": "repo-that-does-not-exist is not a valid repository ID", + }, ], ] -TEST_CASES_IDS: list[str] = [item[1]['id'] for item in TEST_CASES] # type: ignore +TEST_CASES_IDS: list[str] = [item[1]["id"] for item in TEST_CASES] # type: ignore -@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, ids=TEST_CASES_IDS, indirect=['patch_ansible_module']) -@pytest.mark.usefixtures('patch_ansible_module') +@pytest.mark.parametrize( + "patch_ansible_module, testcase", TEST_CASES, ids=TEST_CASES_IDS, indirect=["patch_ansible_module"] +) +@pytest.mark.usefixtures("patch_ansible_module") def test_rhsm_repository(mocker, capfd, patch_rhsm_repository, testcase): """ Run unit tests for test cases listen in TEST_CASES """ # Mock function used for running commands first - call_results = [item[2] for item in testcase['run_command.calls']] - mock_run_command = mocker.patch.object( - basic.AnsibleModule, - 'run_command', - side_effect=call_results) + call_results = [item[2] for item in testcase["run_command.calls"]] + mock_run_command = mocker.patch.object(basic.AnsibleModule, "run_command", side_effect=call_results) # Try to run test case with pytest.raises(SystemExit): @@ -783,15 +782,15 @@ def test_rhsm_repository(mocker, capfd, patch_rhsm_repository, testcase): out, err = capfd.readouterr() results = json.loads(out) - if 'failed' in testcase: - assert results['failed'] == testcase['failed'] - assert results['msg'] == testcase['msg'] + if "failed" in testcase: + assert results["failed"] == testcase["failed"] + assert results["msg"] == testcase["msg"] else: - assert 'changed' in results - assert results['changed'] == testcase['changed'] - assert results['repositories'] == testcase['repositories'].to_list() + assert "changed" in results + assert results["changed"] == testcase["changed"] + assert results["repositories"] == testcase["repositories"].to_list() - assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls']) + assert basic.AnsibleModule.run_command.call_count == len(testcase["run_command.calls"]) # FIXME ideally we need also to compare the actual calls with the expected # ones; the problem is that the module uses a dict to collect the repositories # to enable and disable, so the order of the --enable/--disable parameters to diff --git a/tests/unit/plugins/modules/test_rpm_ostree_pkg.py b/tests/unit/plugins/modules/test_rpm_ostree_pkg.py index 37404395783..49c845d83e7 100644 --- a/tests/unit/plugins/modules/test_rpm_ostree_pkg.py +++ b/tests/unit/plugins/modules/test_rpm_ostree_pkg.py @@ -8,7 +8,11 @@ from unittest.mock import call, patch from ansible_collections.community.general.plugins.modules import rpm_ostree_pkg from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( - AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args) + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class RpmOSTreeModuleTestCase(ModuleTestCase): @@ -17,11 +21,11 @@ class RpmOSTreeModuleTestCase(ModuleTestCase): def setUp(self): super().setUp() ansible_module_path = "ansible_collections.community.general.plugins.modules.rpm_ostree_pkg.AnsibleModule" - self.mock_run_command = patch(f'{ansible_module_path}.run_command') + self.mock_run_command = patch(f"{ansible_module_path}.run_command") self.module_main_command = self.mock_run_command.start() - self.mock_get_bin_path = patch(f'{ansible_module_path}.get_bin_path') + self.mock_get_bin_path = patch(f"{ansible_module_path}.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() - self.get_bin_path.return_value = '/testbin/rpm-ostree' + self.get_bin_path.return_value = "/testbin/rpm-ostree" def tearDown(self): self.mock_run_command.stop() @@ -34,74 +38,129 @@ def module_main(self, exit_exc): return exc.exception.args[0] def test_present(self): - with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + with set_module_args({"name": "nfs-utils", "state": "present"}): self.module_main_command.side_effect = [ - (0, '', ''), + (0, "", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.assertEqual(['nfs-utils'], result['packages']) - self.module_main_command.assert_has_calls([ - call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']), - ]) + self.assertTrue(result["changed"]) + self.assertEqual(["nfs-utils"], result["packages"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/rpm-ostree", + "install", + "--allow-inactive", + "--idempotent", + "--unchanged-exit-77", + "nfs-utils", + ] + ), + ] + ) def test_present_unchanged(self): - with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + with set_module_args({"name": "nfs-utils", "state": "present"}): self.module_main_command.side_effect = [ - (77, '', ''), + (77, "", ""), ] result = self.module_main(AnsibleExitJson) - self.assertFalse(result['changed']) - self.assertEqual(0, result['rc']) - self.assertEqual(['nfs-utils'], result['packages']) - self.module_main_command.assert_has_calls([ - call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']), - ]) + self.assertFalse(result["changed"]) + self.assertEqual(0, result["rc"]) + self.assertEqual(["nfs-utils"], result["packages"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/rpm-ostree", + "install", + "--allow-inactive", + "--idempotent", + "--unchanged-exit-77", + "nfs-utils", + ] + ), + ] + ) def test_present_failed(self): - with set_module_args({'name': 'nfs-utils', 'state': 'present'}): + with set_module_args({"name": "nfs-utils", "state": "present"}): self.module_main_command.side_effect = [ - (1, '', ''), + (1, "", ""), ] result = self.module_main(AnsibleFailJson) - self.assertFalse(result['changed']) - self.assertEqual(1, result['rc']) - self.assertEqual(['nfs-utils'], result['packages']) - self.module_main_command.assert_has_calls([ - call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']), - ]) + self.assertFalse(result["changed"]) + self.assertEqual(1, result["rc"]) + self.assertEqual(["nfs-utils"], result["packages"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/rpm-ostree", + "install", + "--allow-inactive", + "--idempotent", + "--unchanged-exit-77", + "nfs-utils", + ] + ), + ] + ) def test_absent(self): - with set_module_args({'name': 'nfs-utils', 'state': 'absent'}): + with set_module_args({"name": "nfs-utils", "state": "absent"}): self.module_main_command.side_effect = [ - (0, '', ''), + (0, "", ""), ] result = self.module_main(AnsibleExitJson) - self.assertTrue(result['changed']) - self.assertEqual(['nfs-utils'], result['packages']) - self.module_main_command.assert_has_calls([ - call(['/testbin/rpm-ostree', 'uninstall', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']), - ]) + self.assertTrue(result["changed"]) + self.assertEqual(["nfs-utils"], result["packages"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/rpm-ostree", + "uninstall", + "--allow-inactive", + "--idempotent", + "--unchanged-exit-77", + "nfs-utils", + ] + ), + ] + ) def test_absent_failed(self): - with set_module_args({'name': 'nfs-utils', 'state': 'absent'}): + with set_module_args({"name": "nfs-utils", "state": "absent"}): self.module_main_command.side_effect = [ - (1, '', ''), + (1, "", ""), ] result = self.module_main(AnsibleFailJson) - self.assertFalse(result['changed']) - self.assertEqual(1, result['rc']) - self.assertEqual(['nfs-utils'], result['packages']) - self.module_main_command.assert_has_calls([ - call(['/testbin/rpm-ostree', 'uninstall', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']), - ]) + self.assertFalse(result["changed"]) + self.assertEqual(1, result["rc"]) + self.assertEqual(["nfs-utils"], result["packages"]) + self.module_main_command.assert_has_calls( + [ + call( + [ + "/testbin/rpm-ostree", + "uninstall", + "--allow-inactive", + "--idempotent", + "--unchanged-exit-77", + "nfs-utils", + ] + ), + ] + ) diff --git a/tests/unit/plugins/modules/test_rundeck_acl_policy.py b/tests/unit/plugins/modules/test_rundeck_acl_policy.py index 597b569b711..e9af79aadc1 100644 --- a/tests/unit/plugins/modules/test_rundeck_acl_policy.py +++ b/tests/unit/plugins/modules/test_rundeck_acl_policy.py @@ -1,4 +1,3 @@ - # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -13,7 +12,7 @@ set_module_args, AnsibleExitJson, exit_json, - fail_json + fail_json, ) @@ -35,25 +34,25 @@ def module(): @pytest.mark.parametrize("project, prefix", PROJECT_TABLE) -@patch.object(rundeck_acl_policy, 'api_request') +@patch.object(rundeck_acl_policy, "api_request") def test_acl_create(api_request_mock, project, prefix): """Test creating a new ACL, both system-level and project-level.""" name = "my_policy" policy = "test_policy_yaml" # simulate: GET→404, POST→201, final GET→200 api_request_mock.side_effect = [ - (None, {'status': 404}), - (None, {'status': 201}), - ({"contents": policy}, {'status': 200}), + (None, {"status": 404}), + (None, {"status": 201}), + ({"contents": policy}, {"status": 200}), ] args = { - 'name': name, - 'url': "https://rundeck.example.org", - 'api_token': "mytoken", - 'policy': policy, + "name": name, + "url": "https://rundeck.example.org", + "api_token": "mytoken", + "policy": policy, } if project: - args['project'] = project + args["project"] = project with pytest.raises(AnsibleExitJson): with set_module_args(args): @@ -62,27 +61,27 @@ def test_acl_create(api_request_mock, project, prefix): # should have done GET → POST → GET assert api_request_mock.call_count == 3 args, kwargs = api_request_mock.call_args_list[1] - assert kwargs['endpoint'] == f"{prefix}/{name}.aclpolicy" - assert kwargs['method'] == 'POST' + assert kwargs["endpoint"] == f"{prefix}/{name}.aclpolicy" + assert kwargs["method"] == "POST" @pytest.mark.parametrize("project, prefix", PROJECT_TABLE) -@patch.object(rundeck_acl_policy, 'api_request') +@patch.object(rundeck_acl_policy, "api_request") def test_acl_unchanged(api_request_mock, project, prefix): """Test no-op when existing ACL contents match the desired policy.""" name = "unchanged_policy" policy = "same_policy_yaml" # first GET returns matching contents - api_request_mock.return_value = ({"contents": policy}, {'status': 200}) + api_request_mock.return_value = ({"contents": policy}, {"status": 200}) args = { - 'name': name, - 'url': "https://rundeck.example.org", - 'api_token': "mytoken", - 'policy': policy, + "name": name, + "url": "https://rundeck.example.org", + "api_token": "mytoken", + "policy": policy, } if project: - args['project'] = project + args["project"] = project with pytest.raises(AnsibleExitJson): with set_module_args(args): @@ -91,30 +90,30 @@ def test_acl_unchanged(api_request_mock, project, prefix): # only a single GET assert api_request_mock.call_count == 1 args, kwargs = api_request_mock.call_args - assert kwargs['endpoint'] == f"{prefix}/{name}.aclpolicy" + assert kwargs["endpoint"] == f"{prefix}/{name}.aclpolicy" # default method is GET - assert kwargs.get('method', 'GET') == 'GET' + assert kwargs.get("method", "GET") == "GET" @pytest.mark.parametrize("project, prefix", PROJECT_TABLE) -@patch.object(rundeck_acl_policy, 'api_request') +@patch.object(rundeck_acl_policy, "api_request") def test_acl_remove(api_request_mock, project, prefix): """Test removing an existing ACL, both system- and project-level.""" name = "remove_me" # GET finds it, DELETE removes it api_request_mock.side_effect = [ - ({"contents": "old_yaml"}, {'status': 200}), - (None, {'status': 204}), + ({"contents": "old_yaml"}, {"status": 200}), + (None, {"status": 204}), ] args = { - 'name': name, - 'url': "https://rundeck.example.org", - 'api_token': "mytoken", - 'state': 'absent', + "name": name, + "url": "https://rundeck.example.org", + "api_token": "mytoken", + "state": "absent", } if project: - args['project'] = project + args["project"] = project with pytest.raises(AnsibleExitJson): with set_module_args(args): @@ -123,26 +122,26 @@ def test_acl_remove(api_request_mock, project, prefix): # GET → DELETE assert api_request_mock.call_count == 2 args, kwargs = api_request_mock.call_args_list[1] - assert kwargs['endpoint'] == f"{prefix}/{name}.aclpolicy" - assert kwargs['method'] == 'DELETE' + assert kwargs["endpoint"] == f"{prefix}/{name}.aclpolicy" + assert kwargs["method"] == "DELETE" @pytest.mark.parametrize("project, prefix", PROJECT_TABLE) -@patch.object(rundeck_acl_policy, 'api_request') +@patch.object(rundeck_acl_policy, "api_request") def test_acl_remove_nonexistent(api_request_mock, project, prefix): """Test removing a non-existent ACL results in no change.""" name = "not_there" # GET returns 404 - api_request_mock.return_value = (None, {'status': 404}) + api_request_mock.return_value = (None, {"status": 404}) args = { - 'name': name, - 'url': "https://rundeck.example.org", - 'api_token': "mytoken", - 'state': 'absent', + "name": name, + "url": "https://rundeck.example.org", + "api_token": "mytoken", + "state": "absent", } if project: - args['project'] = project + args["project"] = project with pytest.raises(AnsibleExitJson): with set_module_args(args): @@ -151,5 +150,5 @@ def test_acl_remove_nonexistent(api_request_mock, project, prefix): # only the initial GET assert api_request_mock.call_count == 1 args, kwargs = api_request_mock.call_args - assert kwargs['endpoint'] == f"{prefix}/{name}.aclpolicy" - assert kwargs.get('method', 'GET') == 'GET' + assert kwargs["endpoint"] == f"{prefix}/{name}.aclpolicy" + assert kwargs.get("method", "GET") == "GET" diff --git a/tests/unit/plugins/modules/test_scaleway_compute_private_network.py b/tests/unit/plugins/modules/test_scaleway_compute_private_network.py index 15d0a694120..244ebc590bd 100644 --- a/tests/unit/plugins/modules/test_scaleway_compute_private_network.py +++ b/tests/unit/plugins/modules/test_scaleway_compute_private_network.py @@ -15,43 +15,45 @@ def response_without_nics(): - info = {"status": 200, - "body": '{ "private_nics": []}' - } + info = {"status": 200, "body": '{ "private_nics": []}'} return Response(None, info) def response_with_nics(): - info = {"status": 200, - "body": ('{ "private_nics": [{' - '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",' - '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",' - '"mac_address": "02:00:00:00:12:23",' - '"state": "available",' - '"creation_date": "2022-03-30T06:25:28.155973+00:00",' - '"modification_date": "2022-03-30T06:25:28.155973+00:00",' - '"zone": "fr-par-1"' - '}]}' - ) - } + info = { + "status": 200, + "body": ( + '{ "private_nics": [{' + '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",' + '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",' + '"mac_address": "02:00:00:00:12:23",' + '"state": "available",' + '"creation_date": "2022-03-30T06:25:28.155973+00:00",' + '"modification_date": "2022-03-30T06:25:28.155973+00:00",' + '"zone": "fr-par-1"' + "}]}" + ), + } return Response(None, info) def response_when_add_nics(): - info = {"status": 200, - "body": ('{ "private_nics": {' - '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",' - '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",' - '"mac_address": "02:00:00:00:12:23",' - '"state": "available",' - '"creation_date": "2022-03-30T06:25:28.155973+00:00",' - '"modification_date": "2022-03-30T06:25:28.155973+00:00",' - '"zone": "fr-par-1"' - '}}' - ) - } + info = { + "status": 200, + "body": ( + '{ "private_nics": {' + '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",' + '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",' + '"mac_address": "02:00:00:00:12:23",' + '"state": "available",' + '"creation_date": "2022-03-30T06:25:28.155973+00:00",' + '"modification_date": "2022-03-30T06:25:28.155973+00:00",' + '"zone": "fr-par-1"' + "}}" + ), + } return Response(None, info) @@ -67,26 +69,27 @@ def test_scaleway_private_network_without_arguments(capfd): out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] def test_scaleway_add_nic(capfd): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90' - cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' + os.environ["SCW_API_TOKEN"] = "notrealtoken" + pnid = "b589b4cd-ef5g-678h-90i1-jk2345678l90" + cid = "c004b4cd-ef5g-678h-90i1-jk2345678l90" url = f"servers/{cid}/private_nics" - with set_module_args({ - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "present", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }): - - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "present", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid, + } + ): + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_without_nics() - with patch.object(Scaleway, 'post') as mock_scw_post: + with patch.object(Scaleway, "post") as mock_scw_post: mock_scw_post.return_value = response_when_add_nics() with pytest.raises(SystemExit) as results: scaleway_compute_private_network.main() @@ -94,56 +97,58 @@ def test_scaleway_add_nic(capfd): mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert json.loads(out)['changed'] + assert json.loads(out)["changed"] def test_scaleway_add_existing_nic(capfd): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90' - cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' + os.environ["SCW_API_TOKEN"] = "notrealtoken" + pnid = "b589b4cd-ef5g-678h-90i1-jk2345678l90" + cid = "c004b4cd-ef5g-678h-90i1-jk2345678l90" url = f"servers/{cid}/private_nics" - with set_module_args({ - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "present", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }): - - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "present", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid, + } + ): + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_nics() with pytest.raises(SystemExit) as results: scaleway_compute_private_network.main() mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert not json.loads(out)['changed'] + assert not json.loads(out)["changed"] def test_scaleway_remove_existing_nic(capfd): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90' - cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' - nicid = 'c123b4cd-ef5g-678h-90i1-jk2345678l90' + os.environ["SCW_API_TOKEN"] = "notrealtoken" + pnid = "b589b4cd-ef5g-678h-90i1-jk2345678l90" + cid = "c004b4cd-ef5g-678h-90i1-jk2345678l90" + nicid = "c123b4cd-ef5g-678h-90i1-jk2345678l90" url = f"servers/{cid}/private_nics" urlremove = f"servers/{cid}/private_nics/{nicid}" - with set_module_args({ - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "absent", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }): - - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "absent", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid, + } + ): + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_nics() - with patch.object(Scaleway, 'delete') as mock_scw_delete: + with patch.object(Scaleway, "delete") as mock_scw_delete: mock_scw_delete.return_value = response_remove_nics() with pytest.raises(SystemExit) as results: scaleway_compute_private_network.main() @@ -152,32 +157,33 @@ def test_scaleway_remove_existing_nic(capfd): out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert json.loads(out)['changed'] + assert json.loads(out)["changed"] def test_scaleway_remove_absent_nic(capfd): - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90' - cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90' + os.environ["SCW_API_TOKEN"] = "notrealtoken" + pnid = "b589b4cd-ef5g-678h-90i1-jk2345678l90" + cid = "c004b4cd-ef5g-678h-90i1-jk2345678l90" url = f"servers/{cid}/private_nics" - with set_module_args({ - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "state": "absent", - "region": "par1", - "compute_id": cid, - "private_network_id": pnid - }): - - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "state": "absent", + "region": "par1", + "compute_id": cid, + "private_network_id": pnid, + } + ): + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_without_nics() with pytest.raises(SystemExit) as results: scaleway_compute_private_network.main() mock_scw_get.assert_any_call(url) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert not json.loads(out)['changed'] + assert not json.loads(out)["changed"] diff --git a/tests/unit/plugins/modules/test_scaleway_private_network.py b/tests/unit/plugins/modules/test_scaleway_private_network.py index 0d8089c0ccc..0424e269e73 100644 --- a/tests/unit/plugins/modules/test_scaleway_private_network.py +++ b/tests/unit/plugins/modules/test_scaleway_private_network.py @@ -1,4 +1,3 @@ - # Copyright (c) 2019, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -16,51 +15,55 @@ def response_with_zero_network(): - info = {"status": 200, - "body": '{ "private_networks": [], "total_count": 0}' - } + info = {"status": 200, "body": '{ "private_networks": [], "total_count": 0}'} return Response(None, info) def response_with_new_network(): - info = {"status": 200, - "body": ('{ "private_networks": [{' - '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"name": "new_network_name",' - '"tags": ["tag1"]' - '}], "total_count": 1}' - ) - } + info = { + "status": 200, + "body": ( + '{ "private_networks": [{' + '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"name": "new_network_name",' + '"tags": ["tag1"]' + '}], "total_count": 1}' + ), + } return Response(None, info) def response_create_new(): - info = {"status": 200, - "body": ('{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"name": "anoter_network",' - '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"zone": "fr-par-2",' - '"tags": ["tag1"],' - '"created_at": "2019-04-18T15:27:24.177854Z",' - '"updated_at": "2019-04-18T15:27:24.177854Z"}' - ) - } + info = { + "status": 200, + "body": ( + '{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"name": "anoter_network",' + '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"zone": "fr-par-2",' + '"tags": ["tag1"],' + '"created_at": "2019-04-18T15:27:24.177854Z",' + '"updated_at": "2019-04-18T15:27:24.177854Z"}' + ), + } return Response(None, info) def response_create_new_newtag(): - info = {"status": 200, - "body": ('{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"name": "anoter_network",' - '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' - '"zone": "fr-par-2",' - '"tags": ["newtag"],' - '"created_at": "2019-04-18T15:27:24.177854Z",' - '"updated_at": "2020-01-18T15:27:24.177854Z"}' - ) - } + info = { + "status": 200, + "body": ( + '{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"name": "anoter_network",' + '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",' + '"zone": "fr-par-2",' + '"tags": ["newtag"],' + '"created_at": "2019-04-18T15:27:24.177854Z",' + '"updated_at": "2020-01-18T15:27:24.177854Z"}' + ), + } return Response(None, info) @@ -76,127 +79,146 @@ def test_scaleway_private_network_without_arguments(capfd): out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] + assert json.loads(out)["failed"] def test_scaleway_create_pn(capfd): - with set_module_args({ - "state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["tag1"] - }): - - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["tag1"], + } + ): + os.environ["SCW_API_TOKEN"] = "notrealtoken" + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_zero_network() - with patch.object(Scaleway, 'post') as mock_scw_post: + with patch.object(Scaleway, "post") as mock_scw_post: mock_scw_post.return_value = response_create_new() with pytest.raises(SystemExit) as results: scaleway_private_network.main() - mock_scw_post.assert_any_call(path='private-networks/', data={'name': 'new_network_name', - 'project_id': 'a123b4cd-ef5g-678h-90i1-jk2345678l90', - 'tags': ['tag1']}) - mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) + mock_scw_post.assert_any_call( + path="private-networks/", + data={"name": "new_network_name", "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", "tags": ["tag1"]}, + ) + mock_scw_get.assert_any_call( + "private-networks", params={"name": "new_network_name", "order_by": "name_asc", "page": 1, "page_size": 10} + ) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] def test_scaleway_existing_pn(capfd): - with set_module_args({ - "state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["tag1"] - }): - - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["tag1"], + } + ): + os.environ["SCW_API_TOKEN"] = "notrealtoken" + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_new_network() with pytest.raises(SystemExit) as results: scaleway_private_network.main() - mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) + mock_scw_get.assert_any_call( + "private-networks", params={"name": "new_network_name", "order_by": "name_asc", "page": 1, "page_size": 10} + ) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert not json.loads(out)['changed'] + assert not json.loads(out)["changed"] def test_scaleway_add_tag_pn(capfd): - with set_module_args({ - "state": "present", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }): - - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "state": "present", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"], + } + ): + os.environ["SCW_API_TOKEN"] = "notrealtoken" + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_new_network() - with patch.object(Scaleway, 'patch') as mock_scw_patch: + with patch.object(Scaleway, "patch") as mock_scw_patch: mock_scw_patch.return_value = response_create_new_newtag() with pytest.raises(SystemExit) as results: scaleway_private_network.main() - mock_scw_patch.assert_any_call(path='private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90', data={'name': 'new_network_name', 'tags': ['newtag']}) - mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) + mock_scw_patch.assert_any_call( + path="private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90", + data={"name": "new_network_name", "tags": ["newtag"]}, + ) + mock_scw_get.assert_any_call( + "private-networks", params={"name": "new_network_name", "order_by": "name_asc", "page": 1, "page_size": 10} + ) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert json.loads(out)['changed'] + assert json.loads(out)["changed"] def test_scaleway_remove_pn(capfd): - with set_module_args({ - "state": "absent", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }): - - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "state": "absent", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"], + } + ): + os.environ["SCW_API_TOKEN"] = "notrealtoken" + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_new_network() - with patch.object(Scaleway, 'delete') as mock_scw_delete: + with patch.object(Scaleway, "delete") as mock_scw_delete: mock_scw_delete.return_value = response_delete() with pytest.raises(SystemExit) as results: scaleway_private_network.main() - mock_scw_delete.assert_any_call('private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90') - mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) + mock_scw_delete.assert_any_call("private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90") + mock_scw_get.assert_any_call( + "private-networks", params={"name": "new_network_name", "order_by": "name_asc", "page": 1, "page_size": 10} + ) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert json.loads(out)['changed'] + assert json.loads(out)["changed"] def test_scaleway_absent_pn_not_exists(capfd): - with set_module_args({ - "state": "absent", - "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "region": "par2", - "name": "new_network_name", - "tags": ["newtag"] - }): - - os.environ['SCW_API_TOKEN'] = 'notrealtoken' - with patch.object(Scaleway, 'get') as mock_scw_get: + with set_module_args( + { + "state": "absent", + "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "region": "par2", + "name": "new_network_name", + "tags": ["newtag"], + } + ): + os.environ["SCW_API_TOKEN"] = "notrealtoken" + with patch.object(Scaleway, "get") as mock_scw_get: mock_scw_get.return_value = response_with_zero_network() with pytest.raises(SystemExit) as results: scaleway_private_network.main() - mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10}) + mock_scw_get.assert_any_call( + "private-networks", params={"name": "new_network_name", "order_by": "name_asc", "page": 1, "page_size": 10} + ) out, err = capfd.readouterr() - del os.environ['SCW_API_TOKEN'] + del os.environ["SCW_API_TOKEN"] assert not err - assert not json.loads(out)['changed'] + assert not json.loads(out)["changed"] diff --git a/tests/unit/plugins/modules/test_simpleinit_msb.py b/tests/unit/plugins/modules/test_simpleinit_msb.py index 3daa7d4c342..5103dfab865 100644 --- a/tests/unit/plugins/modules/test_simpleinit_msb.py +++ b/tests/unit/plugins/modules/test_simpleinit_msb.py @@ -6,7 +6,11 @@ from unittest.mock import patch -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) from ansible_collections.community.general.plugins.modules.simpleinit_msb import SimpleinitMSB, build_module @@ -83,44 +87,49 @@ class TestSimpleinitMSB(ModuleTestCase): - def setUp(self): super().setUp() def tearDown(self): super().tearDown() - @patch('os.path.exists', return_value=True) - @patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', return_value="/sbin/telinit") + @patch("os.path.exists", return_value=True) + @patch("ansible.module_utils.basic.AnsibleModule.get_bin_path", return_value="/sbin/telinit") def test_get_service_tools(self, *args, **kwargs): - with set_module_args({ - 'name': 'smgl-suspend-single', - 'state': 'running', - }): + with set_module_args( + { + "name": "smgl-suspend-single", + "state": "running", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) simpleinit_msb.get_service_tools() self.assertEqual(simpleinit_msb.telinit_cmd, "/sbin/telinit") - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command") def test_service_exists(self, execute_command): - with set_module_args({ - 'name': 'smgl-suspend-single', - 'state': 'running', - }): + with set_module_args( + { + "name": "smgl-suspend-single", + "state": "running", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) execute_command.return_value = (0, _TELINIT_LIST, "") simpleinit_msb.service_exists() - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command") def test_service_exists_not(self, execute_command): - with set_module_args({ - 'name': 'ntp', - 'state': 'running', - }): + with set_module_args( + { + "name": "ntp", + "state": "running", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) execute_command.return_value = (0, _TELINIT_LIST, "") @@ -130,14 +139,16 @@ def test_service_exists_not(self, execute_command): self.assertEqual("telinit could not find the requested service: ntp", context.exception.args[0]["msg"]) - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists') - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists") + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command") def test_check_service_enabled(self, execute_command, service_exists): - with set_module_args({ - 'name': 'nscd', - 'state': 'running', - 'enabled': 'true', - }): + with set_module_args( + { + "name": "nscd", + "state": "running", + "enabled": "true", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) service_exists.return_value = True @@ -146,21 +157,26 @@ def test_check_service_enabled(self, execute_command, service_exists): self.assertTrue(simpleinit_msb.service_enabled()) # Race condition check - with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=False): + with patch( + "ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled", + return_value=False, + ): execute_command.return_value = (0, "", _TELINIT_ALREADY_ENABLED) simpleinit_msb.service_enable() self.assertFalse(simpleinit_msb.changed) - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists') - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_exists") + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.execute_command") def test_check_service_disabled(self, execute_command, service_exists): - with set_module_args({ - 'name': 'sysstat', - 'state': 'stopped', - 'enabled': 'false', - }): + with set_module_args( + { + "name": "sysstat", + "state": "stopped", + "enabled": "false", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) service_exists.return_value = True @@ -169,31 +185,38 @@ def test_check_service_disabled(self, execute_command, service_exists): self.assertFalse(simpleinit_msb.service_enabled()) # Race condition check - with patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled', return_value=True): + with patch( + "ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_enabled", + return_value=True, + ): execute_command.return_value = (0, "", _TELINIT_ALREADY_DISABLED) simpleinit_msb.service_enable() self.assertFalse(simpleinit_msb.changed) - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control") def test_check_service_running(self, service_control): - with set_module_args({ - 'name': 'sshd', - 'state': 'running', - }): + with set_module_args( + { + "name": "sshd", + "state": "running", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) service_control.return_value = (0, _TELINIT_STATUS_RUNNING, "") self.assertFalse(simpleinit_msb.get_service_status()) - @patch('ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control') + @patch("ansible_collections.community.general.plugins.modules.simpleinit_msb.SimpleinitMSB.service_control") def test_check_service_running_not(self, service_control): - with set_module_args({ - 'name': 'smgl-metalog', - 'state': 'running', - }): + with set_module_args( + { + "name": "smgl-metalog", + "state": "running", + } + ): simpleinit_msb = SimpleinitMSB(build_module()) service_control.return_value = (0, _TELINIT_STATUS_RUNNING_NOT, "") diff --git a/tests/unit/plugins/modules/test_slack.py b/tests/unit/plugins/modules/test_slack.py index 4fdea48c99d..d43a455a692 100644 --- a/tests/unit/plugins/modules/test_slack.py +++ b/tests/unit/plugins/modules/test_slack.py @@ -8,11 +8,15 @@ import pytest from unittest.mock import Mock, patch from ansible_collections.community.general.plugins.modules import slack -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class TestSlackModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = slack @@ -22,7 +26,7 @@ def tearDown(self): @pytest.fixture def fetch_url_mock(self, mocker): - return mocker.patch('ansible.module_utils.notification.slack.fetch_url') + return mocker.patch("ansible.module_utils.notification.slack.fetch_url") def test_without_required_parameters(self): """Failure must occurs when all parameters are missing""" @@ -32,66 +36,55 @@ def test_without_required_parameters(self): def test_invalid_old_token(self): """Failure if there is an old style token""" - with set_module_args({ - 'token': 'test', - }): + with set_module_args( + { + "token": "test", + } + ): with self.assertRaises(AnsibleFailJson): self.module.main() def test_successful_message(self): """tests sending a message. This is example 1 from the docs""" - with set_module_args({ - 'token': 'XXXX/YYYY/ZZZZ', - 'msg': 'test' - }): + with set_module_args({"token": "XXXX/YYYY/ZZZZ", "msg": "test"}): with patch.object(slack, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 200}) with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['text'] == "test" - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["username"] == "Ansible" + assert call_data["text"] == "test" + assert fetch_url_mock.call_args[1]["url"] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" def test_failed_message(self): """tests failing to send a message""" - with set_module_args({ - 'token': 'XXXX/YYYY/ZZZZ', - 'msg': 'test' - }): + with set_module_args({"token": "XXXX/YYYY/ZZZZ", "msg": "test"}): with patch.object(slack, "fetch_url") as fetch_url_mock: - fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'}) + fetch_url_mock.return_value = (None, {"status": 404, "msg": "test"}) with self.assertRaises(AnsibleFailJson): self.module.main() def test_message_with_thread(self): """tests sending a message with a thread""" - with set_module_args({ - 'token': 'XXXX/YYYY/ZZZZ', - 'msg': 'test', - 'thread_id': '100.00' - }): + with set_module_args({"token": "XXXX/YYYY/ZZZZ", "msg": "test", "thread_id": "100.00"}): with patch.object(slack, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 200}) with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['text'] == "test" - assert call_data['thread_ts'] == '100.00' - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["username"] == "Ansible" + assert call_data["text"] == "test" + assert call_data["thread_ts"] == "100.00" + assert fetch_url_mock.call_args[1]["url"] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" # https://github.com/ansible-collections/community.general/issues/1097 def test_ts_in_message_does_not_cause_edit(self): - with set_module_args({ - 'token': 'xoxa-123456789abcdef', - 'msg': 'test with ts' - }): + with set_module_args({"token": "xoxa-123456789abcdef", "msg": "test with ts"}): with patch.object(slack, "fetch_url") as fetch_url_mock: mock_response = Mock() mock_response.read.return_value = '{"fake":"data"}' @@ -100,14 +93,10 @@ def test_ts_in_message_does_not_cause_edit(self): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage") + self.assertEqual(fetch_url_mock.call_args[1]["url"], "https://slack.com/api/chat.postMessage") def test_govslack_message(self): - with set_module_args({ - 'token': 'xoxa-123456789abcdef', - 'domain': 'slack-gov.com', - 'msg': 'test with ts' - }): + with set_module_args({"token": "xoxa-123456789abcdef", "domain": "slack-gov.com", "msg": "test with ts"}): with patch.object(slack, "fetch_url") as fetch_url_mock: mock_response = Mock() mock_response.read.return_value = '{"fake":"data"}' @@ -116,14 +105,10 @@ def test_govslack_message(self): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack-gov.com/api/chat.postMessage") + self.assertEqual(fetch_url_mock.call_args[1]["url"], "https://slack-gov.com/api/chat.postMessage") def test_edit_message(self): - with set_module_args({ - 'token': 'xoxa-123456789abcdef', - 'msg': 'test2', - 'message_id': '12345' - }): + with set_module_args({"token": "xoxa-123456789abcdef", "msg": "test2", "message_id": "12345"}): with patch.object(slack, "fetch_url") as fetch_url_mock: mock_response = Mock() mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}' @@ -135,74 +120,73 @@ def test_edit_message(self): self.module.main() self.assertTrue(fetch_url_mock.call_count, 2) - self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update") - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - self.assertEqual(call_data['ts'], "12345") + self.assertEqual(fetch_url_mock.call_args[1]["url"], "https://slack.com/api/chat.update") + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + self.assertEqual(call_data["ts"], "12345") def test_message_with_blocks(self): """tests sending a message with blocks""" - with set_module_args({ - 'token': 'XXXX/YYYY/ZZZZ', - 'msg': 'test', - 'blocks': [{ - 'type': 'section', - 'text': { - 'type': 'mrkdwn', - 'text': '*test*' - }, - 'accessory': { - 'type': 'image', - 'image_url': 'https://docs.ansible.com/favicon.ico', - 'alt_text': 'test' - } - }, { - 'type': 'section', - 'text': { - 'type': 'plain_text', - 'text': 'test', - 'emoji': True - } - }] - }): + with set_module_args( + { + "token": "XXXX/YYYY/ZZZZ", + "msg": "test", + "blocks": [ + { + "type": "section", + "text": {"type": "mrkdwn", "text": "*test*"}, + "accessory": { + "type": "image", + "image_url": "https://docs.ansible.com/favicon.ico", + "alt_text": "test", + }, + }, + {"type": "section", "text": {"type": "plain_text", "text": "test", "emoji": True}}, + ], + } + ): with patch.object(slack, "fetch_url") as fetch_url_mock: fetch_url_mock.return_value = (None, {"status": 200}) with self.assertRaises(AnsibleExitJson): self.module.main() self.assertTrue(fetch_url_mock.call_count, 1) - call_data = json.loads(fetch_url_mock.call_args[1]['data']) - assert call_data['username'] == "Ansible" - assert call_data['blocks'][1]['text']['text'] == "test" - assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" + call_data = json.loads(fetch_url_mock.call_args[1]["data"]) + assert call_data["username"] == "Ansible" + assert call_data["blocks"][1]["text"]["text"] == "test" + assert fetch_url_mock.call_args[1]["url"] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ" def test_message_with_invalid_color(self): """tests sending invalid color value to module""" - with set_module_args({ - 'token': 'XXXX/YYYY/ZZZZ', - 'msg': 'test', - 'color': 'aa', - }): + with set_module_args( + { + "token": "XXXX/YYYY/ZZZZ", + "msg": "test", + "color": "aa", + } + ): with self.assertRaises(AnsibleFailJson) as exec_info: self.module.main() - msg = "Color value specified should be either one of" \ - " ['normal', 'good', 'warning', 'danger'] or any valid" \ - " hex value with length 3 or 6." - assert exec_info.exception.args[0]['msg'] == msg + msg = ( + "Color value specified should be either one of" + " ['normal', 'good', 'warning', 'danger'] or any valid" + " hex value with length 3 or 6." + ) + assert exec_info.exception.args[0]["msg"] == msg color_test = [ - ('#111111', True), - ('#00aabb', True), - ('#abc', True), - ('#gghhjj', False), - ('#ghj', False), - ('#a', False), - ('#aaaaaaaa', False), - ('', False), - ('aaaa', False), - ('$00aabb', False), - ('$00a', False), + ("#111111", True), + ("#00aabb", True), + ("#abc", True), + ("#gghhjj", False), + ("#ghj", False), + ("#a", False), + ("#aaaaaaaa", False), + ("", False), + ("aaaa", False), + ("$00aabb", False), + ("$00a", False), ] diff --git a/tests/unit/plugins/modules/test_snap.py b/tests/unit/plugins/modules/test_snap.py index e52f9689ef4..f2b0bafc97c 100644 --- a/tests/unit/plugins/modules/test_snap.py +++ b/tests/unit/plugins/modules/test_snap.py @@ -17,365 +17,365 @@ """ issue_6803_microk8s_out = ( - "\rEnsure prerequisites for \"microk8s\" are available /" - "\rDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" " - "\rDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" \\" - "\rDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" " - "\rDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" /\u001b[?25" - "\r\u001b[7m\u001b[0mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 0% 880kB/s 3m21" - "\r\u001b[7m\u001b[0mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 1% 2.82MB/s 1m02" - "\r\u001b[7mD\u001b[0mownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 2% 4.71MB/s 37.0" - "\r\u001b[7mDo\u001b[0mwnload snap \"microk8s\" (5372) from channel \"1.27/stable\" 4% 9.09MB/s 18.8" - "\r\u001b[7mDown\u001b[0mload snap \"microk8s\" (5372) from channel \"1.27/stable\" 6% 12.4MB/s 13.5" - "\r\u001b[7mDownl\u001b[0moad snap \"microk8s\" (5372) from channel \"1.27/stable\" 7% 14.5MB/s 11.3" - "\r\u001b[7mDownloa\u001b[0md snap \"microk8s\" (5372) from channel \"1.27/stable\" 9% 15.9MB/s 10.1" - "\r\u001b[7mDownload \u001b[0msnap \"microk8s\" (5372) from channel \"1.27/stable\" 11% 18.0MB/s 8.75" - "\r\u001b[7mDownload s\u001b[0mnap \"microk8s\" (5372) from channel \"1.27/stable\" 13% 19.4MB/s 7.91" - "\r\u001b[7mDownload sn\u001b[0map \"microk8s\" (5372) from channel \"1.27/stable\" 15% 20.1MB/s 7.50" - "\r\u001b[7mDownload snap\u001b[0m \"microk8s\" (5372) from channel \"1.27/stable\" 17% 20.9MB/s 7.05" - "\r\u001b[7mDownload snap \"\u001b[0mmicrok8s\" (5372) from channel \"1.27/stable\" 19% 22.1MB/s 6.50" - "\r\u001b[7mDownload snap \"m\u001b[0microk8s\" (5372) from channel \"1.27/stable\" 21% 22.9MB/s 6.11" - "\r\u001b[7mDownload snap \"mic\u001b[0mrok8s\" (5372) from channel \"1.27/stable\" 23% 23.2MB/s 5.90" - "\r\u001b[7mDownload snap \"micr\u001b[0mok8s\" (5372) from channel \"1.27/stable\" 25% 23.9MB/s 5.58" - "\r\u001b[7mDownload snap \"microk\u001b[0m8s\" (5372) from channel \"1.27/stable\" 27% 24.5MB/s 5.30" - "\r\u001b[7mDownload snap \"microk8\u001b[0ms\" (5372) from channel \"1.27/stable\" 29% 24.9MB/s 5.09" - "\r\u001b[7mDownload snap \"microk8s\"\u001b[0m (5372) from channel \"1.27/stable\" 31% 25.4MB/s 4.85" - "\r\u001b[7mDownload snap \"microk8s\" (\u001b[0m5372) from channel \"1.27/stable\" 33% 25.8MB/s 4.63" - "\r\u001b[7mDownload snap \"microk8s\" (5\u001b[0m372) from channel \"1.27/stable\" 35% 26.2MB/s 4.42" - "\r\u001b[7mDownload snap \"microk8s\" (53\u001b[0m72) from channel \"1.27/stable\" 36% 26.3MB/s 4.30" - "\r\u001b[7mDownload snap \"microk8s\" (5372\u001b[0m) from channel \"1.27/stable\" 38% 26.7MB/s 4.10" - "\r\u001b[7mDownload snap \"microk8s\" (5372) \u001b[0mfrom channel \"1.27/stable\" 40% 26.9MB/s 3.95" - "\r\u001b[7mDownload snap \"microk8s\" (5372) f\u001b[0mrom channel \"1.27/stable\" 42% 27.2MB/s 3.77" - "\r\u001b[7mDownload snap \"microk8s\" (5372) fro\u001b[0mm channel \"1.27/stable\" 44% 27.4MB/s 3.63" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from\u001b[0m channel \"1.27/stable\" 46% 27.8MB/s 3.44" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from c\u001b[0mhannel \"1.27/stable\" 48% 27.9MB/s 3.31" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from cha\u001b[0mnnel \"1.27/stable\" 50% 28.1MB/s 3.15" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from chan\u001b[0mnel \"1.27/stable\" 52% 28.3MB/s 3.02" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channe\u001b[0ml \"1.27/stable\" 54% 28.5MB/s 2.87" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel\u001b[0m \"1.27/stable\" 56% 28.6MB/s 2.75" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \u001b[0m\"1.27/stable\" 57% 28.7MB/s 2.63" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1\u001b[0m.27/stable\" 60% 28.9MB/s 2.47" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.2\u001b[0m7/stable\" 62% 29.0MB/s 2.35" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27\u001b[0m/stable\" 63% 29.1MB/s 2.23" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/s\u001b[0mtable\" 65% 29.2MB/s 2.10" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/st\u001b[0mable\" 67% 29.4MB/s 1.97" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stab\u001b[0mle\" 69% 29.5MB/s 1.85" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stabl\u001b[0me\" 71% 29.5MB/s 1.74" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\"\u001b[0m 73% 29.7MB/s 1.59" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" \u001b[0m 75% 29.8MB/s 1.48" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" \u001b[0m 77% 29.8MB/s 1.37" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 7\u001b[0m9% 29.9MB/s 1.26" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 81\u001b[0m% 30.0MB/s 1.14" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 83% \u001b[0m30.1MB/s 1.01" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 84% 3\u001b[0m0.1MB/s 919m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 86% 30.\u001b[0m1MB/s 810m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 88% 30.2\u001b[0mMB/s 676m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 91% 30.3MB\u001b[0m/s 555m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 93% 30.4MB/s\u001b[0m 436m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 95% 30.5MB/s \u001b[0m317m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 96% 30.5MB/s 21\u001b[0m1m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 98% 30.5MB/s 117\u001b[0mm" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 100% 30.5MB/s 11m\u001b[0m" - "\r\u001b[7mDownload snap \"microk8s\" (5372) from channel \"1.27/stable\" 100% 30.0MB/s 0.0ns\u001b[0" - "\rFetch and check assertions for snap \"microk8s\" (5372) " - "\rMount snap \"microk8s\" (5372) \\" - "\rMount snap \"microk8s\" (5372) " - "\rMount snap \"microk8s\" (5372) " - "\rMount snap \"microk8s\" (5372) " - "\rSetup snap \"microk8s\" (5372) security profiles \\" - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles \\" - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles \\" - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rSetup snap \"microk8s\" (5372) security profiles " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present \\" - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rRun install hook of \"microk8s\" snap if present " - "\rStart snap \"microk8s\" (5372) services \\" - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services \\" - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services \\" - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services \\" - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services " - "\rStart snap \"microk8s\" (5372) services \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun configure hook of \"microk8s\" snap if present \\" - "\rRun configure hook of \"microk8s\" snap if present " - "\rRun service command \"restart\" for services [\"daemon-apiserver-proxy\"] of snap \"" + '\rEnsure prerequisites for "microk8s" are available /' + '\rDownload snap "microk8s" (5372) from channel "1.27/stable" ' + '\rDownload snap "microk8s" (5372) from channel "1.27/stable" \\' + '\rDownload snap "microk8s" (5372) from channel "1.27/stable" ' + '\rDownload snap "microk8s" (5372) from channel "1.27/stable" /\u001b[?25' + '\r\u001b[7m\u001b[0mDownload snap "microk8s" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "microk8s" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "microk8s" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "microk8s" (5372) from channel "1.27/stable" 0% 880kB/s 3m21' + '\r\u001b[7m\u001b[0mDownload snap "microk8s" (5372) from channel "1.27/stable" 1% 2.82MB/s 1m02' + '\r\u001b[7mD\u001b[0mownload snap "microk8s" (5372) from channel "1.27/stable" 2% 4.71MB/s 37.0' + '\r\u001b[7mDo\u001b[0mwnload snap "microk8s" (5372) from channel "1.27/stable" 4% 9.09MB/s 18.8' + '\r\u001b[7mDown\u001b[0mload snap "microk8s" (5372) from channel "1.27/stable" 6% 12.4MB/s 13.5' + '\r\u001b[7mDownl\u001b[0moad snap "microk8s" (5372) from channel "1.27/stable" 7% 14.5MB/s 11.3' + '\r\u001b[7mDownloa\u001b[0md snap "microk8s" (5372) from channel "1.27/stable" 9% 15.9MB/s 10.1' + '\r\u001b[7mDownload \u001b[0msnap "microk8s" (5372) from channel "1.27/stable" 11% 18.0MB/s 8.75' + '\r\u001b[7mDownload s\u001b[0mnap "microk8s" (5372) from channel "1.27/stable" 13% 19.4MB/s 7.91' + '\r\u001b[7mDownload sn\u001b[0map "microk8s" (5372) from channel "1.27/stable" 15% 20.1MB/s 7.50' + '\r\u001b[7mDownload snap\u001b[0m "microk8s" (5372) from channel "1.27/stable" 17% 20.9MB/s 7.05' + '\r\u001b[7mDownload snap "\u001b[0mmicrok8s" (5372) from channel "1.27/stable" 19% 22.1MB/s 6.50' + '\r\u001b[7mDownload snap "m\u001b[0microk8s" (5372) from channel "1.27/stable" 21% 22.9MB/s 6.11' + '\r\u001b[7mDownload snap "mic\u001b[0mrok8s" (5372) from channel "1.27/stable" 23% 23.2MB/s 5.90' + '\r\u001b[7mDownload snap "micr\u001b[0mok8s" (5372) from channel "1.27/stable" 25% 23.9MB/s 5.58' + '\r\u001b[7mDownload snap "microk\u001b[0m8s" (5372) from channel "1.27/stable" 27% 24.5MB/s 5.30' + '\r\u001b[7mDownload snap "microk8\u001b[0ms" (5372) from channel "1.27/stable" 29% 24.9MB/s 5.09' + '\r\u001b[7mDownload snap "microk8s"\u001b[0m (5372) from channel "1.27/stable" 31% 25.4MB/s 4.85' + '\r\u001b[7mDownload snap "microk8s" (\u001b[0m5372) from channel "1.27/stable" 33% 25.8MB/s 4.63' + '\r\u001b[7mDownload snap "microk8s" (5\u001b[0m372) from channel "1.27/stable" 35% 26.2MB/s 4.42' + '\r\u001b[7mDownload snap "microk8s" (53\u001b[0m72) from channel "1.27/stable" 36% 26.3MB/s 4.30' + '\r\u001b[7mDownload snap "microk8s" (5372\u001b[0m) from channel "1.27/stable" 38% 26.7MB/s 4.10' + '\r\u001b[7mDownload snap "microk8s" (5372) \u001b[0mfrom channel "1.27/stable" 40% 26.9MB/s 3.95' + '\r\u001b[7mDownload snap "microk8s" (5372) f\u001b[0mrom channel "1.27/stable" 42% 27.2MB/s 3.77' + '\r\u001b[7mDownload snap "microk8s" (5372) fro\u001b[0mm channel "1.27/stable" 44% 27.4MB/s 3.63' + '\r\u001b[7mDownload snap "microk8s" (5372) from\u001b[0m channel "1.27/stable" 46% 27.8MB/s 3.44' + '\r\u001b[7mDownload snap "microk8s" (5372) from c\u001b[0mhannel "1.27/stable" 48% 27.9MB/s 3.31' + '\r\u001b[7mDownload snap "microk8s" (5372) from cha\u001b[0mnnel "1.27/stable" 50% 28.1MB/s 3.15' + '\r\u001b[7mDownload snap "microk8s" (5372) from chan\u001b[0mnel "1.27/stable" 52% 28.3MB/s 3.02' + '\r\u001b[7mDownload snap "microk8s" (5372) from channe\u001b[0ml "1.27/stable" 54% 28.5MB/s 2.87' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel\u001b[0m "1.27/stable" 56% 28.6MB/s 2.75' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel \u001b[0m"1.27/stable" 57% 28.7MB/s 2.63' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1\u001b[0m.27/stable" 60% 28.9MB/s 2.47' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.2\u001b[0m7/stable" 62% 29.0MB/s 2.35' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27\u001b[0m/stable" 63% 29.1MB/s 2.23' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/s\u001b[0mtable" 65% 29.2MB/s 2.10' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/st\u001b[0mable" 67% 29.4MB/s 1.97' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stab\u001b[0mle" 69% 29.5MB/s 1.85' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stabl\u001b[0me" 71% 29.5MB/s 1.74' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable"\u001b[0m 73% 29.7MB/s 1.59' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" \u001b[0m 75% 29.8MB/s 1.48' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" \u001b[0m 77% 29.8MB/s 1.37' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 7\u001b[0m9% 29.9MB/s 1.26' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 81\u001b[0m% 30.0MB/s 1.14' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 83% \u001b[0m30.1MB/s 1.01' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 84% 3\u001b[0m0.1MB/s 919m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 86% 30.\u001b[0m1MB/s 810m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 88% 30.2\u001b[0mMB/s 676m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 91% 30.3MB\u001b[0m/s 555m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 93% 30.4MB/s\u001b[0m 436m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 95% 30.5MB/s \u001b[0m317m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 96% 30.5MB/s 21\u001b[0m1m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 98% 30.5MB/s 117\u001b[0mm' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 100% 30.5MB/s 11m\u001b[0m' + '\r\u001b[7mDownload snap "microk8s" (5372) from channel "1.27/stable" 100% 30.0MB/s 0.0ns\u001b[0' + '\rFetch and check assertions for snap "microk8s" (5372) ' + '\rMount snap "microk8s" (5372) \\' + '\rMount snap "microk8s" (5372) ' + '\rMount snap "microk8s" (5372) ' + '\rMount snap "microk8s" (5372) ' + '\rSetup snap "microk8s" (5372) security profiles \\' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles \\' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles \\' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rSetup snap "microk8s" (5372) security profiles ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present \\' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rRun install hook of "microk8s" snap if present ' + '\rStart snap "microk8s" (5372) services \\' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services \\' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services \\' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services \\' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services ' + '\rStart snap "microk8s" (5372) services \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun configure hook of "microk8s" snap if present \\' + '\rRun configure hook of "microk8s" snap if present ' + '\rRun service command "restart" for services ["daemon-apiserver-proxy"] of snap "' "\r\u001b[0m\u001b[?25h\u001b[Kmicrok8s (1.27/stable) v1.27.2 from Canonical** installed\n" ) issue_6803_kubectl_out = ( - "\rEnsure prerequisites for \"kubectl\" are available /" - "\rDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" " - "\rDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" \\" - "\rDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" " - "\rDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" /\u001b[?25" - "\r\u001b[7m\u001b[0mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 0% 0B/s ages" - "\r\u001b[7m\u001b[0mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 0% 880kB/s 3m21" - "\r\u001b[7m\u001b[0mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 1% 2.82MB/s 1m02" - "\r\u001b[7mD\u001b[0mownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 2% 4.71MB/s 37.0" - "\r\u001b[7mDo\u001b[0mwnload snap \"kubectl\" (5372) from channel \"1.27/stable\" 4% 9.09MB/s 18.8" - "\r\u001b[7mDown\u001b[0mload snap \"kubectl\" (5372) from channel \"1.27/stable\" 6% 12.4MB/s 13.5" - "\r\u001b[7mDownl\u001b[0moad snap \"kubectl\" (5372) from channel \"1.27/stable\" 7% 14.5MB/s 11.3" - "\r\u001b[7mDownloa\u001b[0md snap \"kubectl\" (5372) from channel \"1.27/stable\" 9% 15.9MB/s 10.1" - "\r\u001b[7mDownload \u001b[0msnap \"kubectl\" (5372) from channel \"1.27/stable\" 11% 18.0MB/s 8.75" - "\r\u001b[7mDownload s\u001b[0mnap \"kubectl\" (5372) from channel \"1.27/stable\" 13% 19.4MB/s 7.91" - "\r\u001b[7mDownload sn\u001b[0map \"kubectl\" (5372) from channel \"1.27/stable\" 15% 20.1MB/s 7.50" - "\r\u001b[7mDownload snap\u001b[0m \"kubectl\" (5372) from channel \"1.27/stable\" 17% 20.9MB/s 7.05" - "\r\u001b[7mDownload snap \"\u001b[0mkubectl\" (5372) from channel \"1.27/stable\" 19% 22.1MB/s 6.50" - "\r\u001b[7mDownload snap \"m\u001b[0kubectl\" (5372) from channel \"1.27/stable\" 21% 22.9MB/s 6.11" - "\r\u001b[7mDownload snap \"mic\u001b[0mrok8s\" (5372) from channel \"1.27/stable\" 23% 23.2MB/s 5.90" - "\r\u001b[7mDownload snap \"micr\u001b[0mok8s\" (5372) from channel \"1.27/stable\" 25% 23.9MB/s 5.58" - "\r\u001b[7mDownload snap \"microk\u001b[0m8s\" (5372) from channel \"1.27/stable\" 27% 24.5MB/s 5.30" - "\r\u001b[7mDownload snap \"microk8\u001b[0ms\" (5372) from channel \"1.27/stable\" 29% 24.9MB/s 5.09" - "\r\u001b[7mDownload snap \"kubectl\"\u001b[0m (5372) from channel \"1.27/stable\" 31% 25.4MB/s 4.85" - "\r\u001b[7mDownload snap \"kubectl\" (\u001b[0m5372) from channel \"1.27/stable\" 33% 25.8MB/s 4.63" - "\r\u001b[7mDownload snap \"kubectl\" (5\u001b[0m372) from channel \"1.27/stable\" 35% 26.2MB/s 4.42" - "\r\u001b[7mDownload snap \"kubectl\" (53\u001b[0m72) from channel \"1.27/stable\" 36% 26.3MB/s 4.30" - "\r\u001b[7mDownload snap \"kubectl\" (5372\u001b[0m) from channel \"1.27/stable\" 38% 26.7MB/s 4.10" - "\r\u001b[7mDownload snap \"kubectl\" (5372) \u001b[0mfrom channel \"1.27/stable\" 40% 26.9MB/s 3.95" - "\r\u001b[7mDownload snap \"kubectl\" (5372) f\u001b[0mrom channel \"1.27/stable\" 42% 27.2MB/s 3.77" - "\r\u001b[7mDownload snap \"kubectl\" (5372) fro\u001b[0mm channel \"1.27/stable\" 44% 27.4MB/s 3.63" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from\u001b[0m channel \"1.27/stable\" 46% 27.8MB/s 3.44" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from c\u001b[0mhannel \"1.27/stable\" 48% 27.9MB/s 3.31" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from cha\u001b[0mnnel \"1.27/stable\" 50% 28.1MB/s 3.15" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from chan\u001b[0mnel \"1.27/stable\" 52% 28.3MB/s 3.02" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channe\u001b[0ml \"1.27/stable\" 54% 28.5MB/s 2.87" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel\u001b[0m \"1.27/stable\" 56% 28.6MB/s 2.75" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \u001b[0m\"1.27/stable\" 57% 28.7MB/s 2.63" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1\u001b[0m.27/stable\" 60% 28.9MB/s 2.47" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.2\u001b[0m7/stable\" 62% 29.0MB/s 2.35" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27\u001b[0m/stable\" 63% 29.1MB/s 2.23" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/s\u001b[0mtable\" 65% 29.2MB/s 2.10" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/st\u001b[0mable\" 67% 29.4MB/s 1.97" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stab\u001b[0mle\" 69% 29.5MB/s 1.85" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stabl\u001b[0me\" 71% 29.5MB/s 1.74" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\"\u001b[0m 73% 29.7MB/s 1.59" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" \u001b[0m 75% 29.8MB/s 1.48" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" \u001b[0m 77% 29.8MB/s 1.37" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 7\u001b[0m9% 29.9MB/s 1.26" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 81\u001b[0m% 30.0MB/s 1.14" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 83% \u001b[0m30.1MB/s 1.01" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 84% 3\u001b[0m0.1MB/s 919m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 86% 30.\u001b[0m1MB/s 810m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 88% 30.2\u001b[0mMB/s 676m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 91% 30.3MB\u001b[0m/s 555m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 93% 30.4MB/s\u001b[0m 436m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 95% 30.5MB/s \u001b[0m317m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 96% 30.5MB/s 21\u001b[0m1m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 98% 30.5MB/s 117\u001b[0mm" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 100% 30.5MB/s 11m\u001b[0m" - "\r\u001b[7mDownload snap \"kubectl\" (5372) from channel \"1.27/stable\" 100% 30.0MB/s 0.0ns\u001b[0" - "\rFetch and check assertions for snap \"kubectl\" (5372) " - "\rMount snap \"kubectl\" (5372) \\" - "\rMount snap \"kubectl\" (5372) " - "\rMount snap \"kubectl\" (5372) " - "\rMount snap \"kubectl\" (5372) " - "\rSetup snap \"kubectl\" (5372) security profiles \\" - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles \\" - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles \\" - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rSetup snap \"kubectl\" (5372) security profiles " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present \\" - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rRun install hook of \"kubectl\" snap if present " - "\rStart snap \"kubectl\" (5372) services \\" - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services \\" - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services \\" - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services \\" - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services " - "\rStart snap \"kubectl\" (5372) services \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun configure hook of \"kubectl\" snap if present \\" - "\rRun configure hook of \"kubectl\" snap if present " - "\rRun service command \"restart\" for services [\"daemon-apiserver-proxy\"] of snap \"" + '\rEnsure prerequisites for "kubectl" are available /' + '\rDownload snap "kubectl" (5372) from channel "1.27/stable" ' + '\rDownload snap "kubectl" (5372) from channel "1.27/stable" \\' + '\rDownload snap "kubectl" (5372) from channel "1.27/stable" ' + '\rDownload snap "kubectl" (5372) from channel "1.27/stable" /\u001b[?25' + '\r\u001b[7m\u001b[0mDownload snap "kubectl" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "kubectl" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "kubectl" (5372) from channel "1.27/stable" 0% 0B/s ages' + '\r\u001b[7m\u001b[0mDownload snap "kubectl" (5372) from channel "1.27/stable" 0% 880kB/s 3m21' + '\r\u001b[7m\u001b[0mDownload snap "kubectl" (5372) from channel "1.27/stable" 1% 2.82MB/s 1m02' + '\r\u001b[7mD\u001b[0mownload snap "kubectl" (5372) from channel "1.27/stable" 2% 4.71MB/s 37.0' + '\r\u001b[7mDo\u001b[0mwnload snap "kubectl" (5372) from channel "1.27/stable" 4% 9.09MB/s 18.8' + '\r\u001b[7mDown\u001b[0mload snap "kubectl" (5372) from channel "1.27/stable" 6% 12.4MB/s 13.5' + '\r\u001b[7mDownl\u001b[0moad snap "kubectl" (5372) from channel "1.27/stable" 7% 14.5MB/s 11.3' + '\r\u001b[7mDownloa\u001b[0md snap "kubectl" (5372) from channel "1.27/stable" 9% 15.9MB/s 10.1' + '\r\u001b[7mDownload \u001b[0msnap "kubectl" (5372) from channel "1.27/stable" 11% 18.0MB/s 8.75' + '\r\u001b[7mDownload s\u001b[0mnap "kubectl" (5372) from channel "1.27/stable" 13% 19.4MB/s 7.91' + '\r\u001b[7mDownload sn\u001b[0map "kubectl" (5372) from channel "1.27/stable" 15% 20.1MB/s 7.50' + '\r\u001b[7mDownload snap\u001b[0m "kubectl" (5372) from channel "1.27/stable" 17% 20.9MB/s 7.05' + '\r\u001b[7mDownload snap "\u001b[0mkubectl" (5372) from channel "1.27/stable" 19% 22.1MB/s 6.50' + '\r\u001b[7mDownload snap "m\u001b[0kubectl" (5372) from channel "1.27/stable" 21% 22.9MB/s 6.11' + '\r\u001b[7mDownload snap "mic\u001b[0mrok8s" (5372) from channel "1.27/stable" 23% 23.2MB/s 5.90' + '\r\u001b[7mDownload snap "micr\u001b[0mok8s" (5372) from channel "1.27/stable" 25% 23.9MB/s 5.58' + '\r\u001b[7mDownload snap "microk\u001b[0m8s" (5372) from channel "1.27/stable" 27% 24.5MB/s 5.30' + '\r\u001b[7mDownload snap "microk8\u001b[0ms" (5372) from channel "1.27/stable" 29% 24.9MB/s 5.09' + '\r\u001b[7mDownload snap "kubectl"\u001b[0m (5372) from channel "1.27/stable" 31% 25.4MB/s 4.85' + '\r\u001b[7mDownload snap "kubectl" (\u001b[0m5372) from channel "1.27/stable" 33% 25.8MB/s 4.63' + '\r\u001b[7mDownload snap "kubectl" (5\u001b[0m372) from channel "1.27/stable" 35% 26.2MB/s 4.42' + '\r\u001b[7mDownload snap "kubectl" (53\u001b[0m72) from channel "1.27/stable" 36% 26.3MB/s 4.30' + '\r\u001b[7mDownload snap "kubectl" (5372\u001b[0m) from channel "1.27/stable" 38% 26.7MB/s 4.10' + '\r\u001b[7mDownload snap "kubectl" (5372) \u001b[0mfrom channel "1.27/stable" 40% 26.9MB/s 3.95' + '\r\u001b[7mDownload snap "kubectl" (5372) f\u001b[0mrom channel "1.27/stable" 42% 27.2MB/s 3.77' + '\r\u001b[7mDownload snap "kubectl" (5372) fro\u001b[0mm channel "1.27/stable" 44% 27.4MB/s 3.63' + '\r\u001b[7mDownload snap "kubectl" (5372) from\u001b[0m channel "1.27/stable" 46% 27.8MB/s 3.44' + '\r\u001b[7mDownload snap "kubectl" (5372) from c\u001b[0mhannel "1.27/stable" 48% 27.9MB/s 3.31' + '\r\u001b[7mDownload snap "kubectl" (5372) from cha\u001b[0mnnel "1.27/stable" 50% 28.1MB/s 3.15' + '\r\u001b[7mDownload snap "kubectl" (5372) from chan\u001b[0mnel "1.27/stable" 52% 28.3MB/s 3.02' + '\r\u001b[7mDownload snap "kubectl" (5372) from channe\u001b[0ml "1.27/stable" 54% 28.5MB/s 2.87' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel\u001b[0m "1.27/stable" 56% 28.6MB/s 2.75' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel \u001b[0m"1.27/stable" 57% 28.7MB/s 2.63' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1\u001b[0m.27/stable" 60% 28.9MB/s 2.47' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.2\u001b[0m7/stable" 62% 29.0MB/s 2.35' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27\u001b[0m/stable" 63% 29.1MB/s 2.23' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/s\u001b[0mtable" 65% 29.2MB/s 2.10' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/st\u001b[0mable" 67% 29.4MB/s 1.97' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stab\u001b[0mle" 69% 29.5MB/s 1.85' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stabl\u001b[0me" 71% 29.5MB/s 1.74' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable"\u001b[0m 73% 29.7MB/s 1.59' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" \u001b[0m 75% 29.8MB/s 1.48' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" \u001b[0m 77% 29.8MB/s 1.37' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 7\u001b[0m9% 29.9MB/s 1.26' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 81\u001b[0m% 30.0MB/s 1.14' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 83% \u001b[0m30.1MB/s 1.01' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 84% 3\u001b[0m0.1MB/s 919m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 86% 30.\u001b[0m1MB/s 810m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 88% 30.2\u001b[0mMB/s 676m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 91% 30.3MB\u001b[0m/s 555m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 93% 30.4MB/s\u001b[0m 436m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 95% 30.5MB/s \u001b[0m317m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 96% 30.5MB/s 21\u001b[0m1m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 98% 30.5MB/s 117\u001b[0mm' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 100% 30.5MB/s 11m\u001b[0m' + '\r\u001b[7mDownload snap "kubectl" (5372) from channel "1.27/stable" 100% 30.0MB/s 0.0ns\u001b[0' + '\rFetch and check assertions for snap "kubectl" (5372) ' + '\rMount snap "kubectl" (5372) \\' + '\rMount snap "kubectl" (5372) ' + '\rMount snap "kubectl" (5372) ' + '\rMount snap "kubectl" (5372) ' + '\rSetup snap "kubectl" (5372) security profiles \\' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles \\' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles \\' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rSetup snap "kubectl" (5372) security profiles ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present \\' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rRun install hook of "kubectl" snap if present ' + '\rStart snap "kubectl" (5372) services \\' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services \\' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services \\' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services \\' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services ' + '\rStart snap "kubectl" (5372) services \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun configure hook of "kubectl" snap if present \\' + '\rRun configure hook of "kubectl" snap if present ' + '\rRun service command "restart" for services ["daemon-apiserver-proxy"] of snap "' "\r\u001b[0m\u001b[?25h\u001b[Kkubectl (1.27/stable) v1.27.2 from Canonical** installed\n" ) -default_env = {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False} +default_env = {"environ_update": {"LANGUAGE": "C", "LC_ALL": "C"}, "check_rc": False} default_version_out = """\ snap 2.66.1+24.04 snapd 2.66.1+24.04 @@ -394,35 +394,35 @@ mocks=dict( run_command=[ dict( - command=['/testbin/snap', 'version'], + command=["/testbin/snap", "version"], environ=default_env, rc=0, out=default_version_out, err="", ), dict( - command=['/testbin/snap', 'info', 'hello-world'], + command=["/testbin/snap", "info", "hello-world"], environ=default_env, rc=0, - out='name: hello-world\n', + out="name: hello-world\n", err="", ), dict( - command=['/testbin/snap', 'list'], + command=["/testbin/snap", "list"], environ=default_env, rc=0, out="", err="", ), dict( - command=['/testbin/snap', 'install', 'hello-world'], + command=["/testbin/snap", "install", "hello-world"], environ=default_env, rc=0, out="hello-world (12345/stable) v12345 from Canonical** installed\n", err="", ), dict( - command=['/testbin/snap', 'list'], + command=["/testbin/snap", "list"], environ=default_env, rc=0, out=( @@ -431,7 +431,8 @@ "lxd 5.6-794016a 23680 latest/stable/… canonical** -" "hello-world 5.6-794016a 23680 latest/stable/… canonical** -" "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), + "" + ), err="", ), ], @@ -445,42 +446,42 @@ mocks=dict( run_command=[ dict( - command=['/testbin/snap', 'version'], + command=["/testbin/snap", "version"], environ=default_env, rc=0, out=default_version_out, err="", ), dict( - command=['/testbin/snap', 'info', 'microk8s', 'kubectl'], + command=["/testbin/snap", "info", "microk8s", "kubectl"], environ=default_env, rc=0, - out='name: microk8s\n---\nname: kubectl\n', + out="name: microk8s\n---\nname: kubectl\n", err="", ), dict( - command=['/testbin/snap', 'list'], + command=["/testbin/snap", "list"], environ=default_env, rc=0, out=issue_6803_status_out, err="", ), dict( - command=['/testbin/snap', 'install', '--classic', 'microk8s'], + command=["/testbin/snap", "install", "--classic", "microk8s"], environ=default_env, rc=0, out=issue_6803_microk8s_out, err="", ), dict( - command=['/testbin/snap', 'install', '--classic', 'kubectl'], + command=["/testbin/snap", "install", "--classic", "kubectl"], environ=default_env, rc=0, out=issue_6803_kubectl_out, err="", ), dict( - command=['/testbin/snap', 'list'], + command=["/testbin/snap", "list"], environ=default_env, rc=0, out=( @@ -490,7 +491,8 @@ "microk8s 5.6-794016a 23680 latest/stable/… canonical** -" "kubectl 5.6-794016a 23680 latest/stable/… canonical** -" "snapd 2.57.4 17336 latest/stable canonical** snapd" - ""), + "" + ), err="", ), ], diff --git a/tests/unit/plugins/modules/test_solaris_zone.py b/tests/unit/plugins/modules/test_solaris_zone.py index 5c4dcc70755..f03b8b675e7 100644 --- a/tests/unit/plugins/modules/test_solaris_zone.py +++ b/tests/unit/plugins/modules/test_solaris_zone.py @@ -9,9 +9,7 @@ import pytest from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.modules import ( - solaris_zone -) +from ansible_collections.community.general.plugins.modules import solaris_zone from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( set_module_args, ) @@ -98,7 +96,7 @@ def test_zone_create_invalid_names(mocked_zone_create, capfd): # 1. Invalid character ('!'). # 2. Zone name > 64 characters. # 3. Zone name beginning with non-alphanumeric character. - for invalid_name in ('foo!bar', 'z' * 65, '_zone'): + for invalid_name in ("foo!bar", "z" * 65, "_zone"): with set_module_args( { "name": invalid_name, diff --git a/tests/unit/plugins/modules/test_ss_3par_cpg.py b/tests/unit/plugins/modules/test_ss_3par_cpg.py index 64c0eab7e6c..f4658d287f7 100644 --- a/tests/unit/plugins/modules/test_ss_3par_cpg.py +++ b/tests/unit/plugins/modules/test_ss_3par_cpg.py @@ -6,40 +6,41 @@ import sys from unittest import mock -sys.modules['hpe3par_sdk'] = mock.Mock() -sys.modules['hpe3par_sdk.client'] = mock.Mock() -sys.modules['hpe3parclient'] = mock.Mock() -sys.modules['hpe3parclient.exceptions'] = mock.Mock() + +sys.modules["hpe3par_sdk"] = mock.Mock() +sys.modules["hpe3par_sdk.client"] = mock.Mock() +sys.modules["hpe3parclient"] = mock.Mock() +sys.modules["hpe3parclient.exceptions"] = mock.Mock() from ansible_collections.community.general.plugins.modules import ss_3par_cpg from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg") def test_module_args(mock_create_cpg, mock_module, mock_client): """ hpe3par CPG - test module arguments """ PARAMS_FOR_PRESENT = { - 'storage_system_ip': '192.168.0.1', - 'storage_system_username': 'USER', - 'storage_system_password': 'PASS', - 'cpg_name': 'test_cpg', - 'domain': 'test_domain', - 'growth_increment': 32768, - 'growth_increment_unit': 'MiB', - 'growth_limit': 32768, - 'growth_limit_unit': 'MiB', - 'growth_warning': 32768, - 'growth_warning_unit': 'MiB', - 'raid_type': 'R6', - 'set_size': 8, - 'high_availability': 'MAG', - 'disk_type': 'FC', - 'state': 'present', - 'secure': False + "storage_system_ip": "192.168.0.1", + "storage_system_username": "USER", + "storage_system_password": "PASS", + "cpg_name": "test_cpg", + "domain": "test_domain", + "growth_increment": 32768, + "growth_increment_unit": "MiB", + "growth_limit": 32768, + "growth_limit_unit": "MiB", + "growth_warning": 32768, + "growth_warning_unit": "MiB", + "raid_type": "R6", + "set_size": 8, + "high_availability": "MAG", + "disk_type": "FC", + "state": "present", + "secure": False, } mock_module.params = PARAMS_FOR_PRESENT mock_module.return_value = mock_module @@ -47,112 +48,109 @@ def test_module_args(mock_create_cpg, mock_module, mock_client): mock_create_cpg.return_value = (True, True, "Created CPG successfully.") ss_3par_cpg.main() mock_module.assert_called_with( - argument_spec=hpe3par.cpg_argument_spec(), - required_together=[['raid_type', 'set_size']]) + argument_spec=hpe3par.cpg_argument_spec(), required_together=[["raid_type", "set_size"]] + ) -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg") def test_main_exit_functionality_present_success_without_issue_attr_dict(mock_create_cpg, mock_module, mock_client): """ hpe3par flash cache - success check """ PARAMS_FOR_PRESENT = { - 'storage_system_ip': '192.168.0.1', - 'storage_system_name': '3PAR', - 'storage_system_username': 'USER', - 'storage_system_password': 'PASS', - 'cpg_name': 'test_cpg', - 'domain': 'test_domain', - 'growth_increment': 32768, - 'growth_increment_unit': 'MiB', - 'growth_limit': 32768, - 'growth_limit_unit': 'MiB', - 'growth_warning': 32768, - 'growth_warning_unit': 'MiB', - 'raid_type': 'R6', - 'set_size': 8, - 'high_availability': 'MAG', - 'disk_type': 'FC', - 'state': 'present', - 'secure': False + "storage_system_ip": "192.168.0.1", + "storage_system_name": "3PAR", + "storage_system_username": "USER", + "storage_system_password": "PASS", + "cpg_name": "test_cpg", + "domain": "test_domain", + "growth_increment": 32768, + "growth_increment_unit": "MiB", + "growth_limit": 32768, + "growth_limit_unit": "MiB", + "growth_warning": 32768, + "growth_warning_unit": "MiB", + "raid_type": "R6", + "set_size": 8, + "high_availability": "MAG", + "disk_type": "FC", + "state": "present", + "secure": False, } # This creates a instance of the AnsibleModule mock. mock_module.params = PARAMS_FOR_PRESENT mock_module.return_value = mock_module instance = mock_module.return_value mock_client.HPE3ParClient.login.return_value = True - mock_create_cpg.return_value = ( - True, True, "Created CPG successfully.") + mock_create_cpg.return_value = (True, True, "Created CPG successfully.") ss_3par_cpg.main() # AnsibleModule.exit_json should be called - instance.exit_json.assert_called_with( - changed=True, msg="Created CPG successfully.") + instance.exit_json.assert_called_with(changed=True, msg="Created CPG successfully.") # AnsibleModule.fail_json should not be called assert instance.fail_json.call_count == 0 -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule') -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.delete_cpg') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule") +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.delete_cpg") def test_main_exit_functionality_absent_success_without_issue_attr_dict(mock_delete_cpg, mock_module, mock_client): """ hpe3par flash cache - success check """ PARAMS_FOR_DELETE = { - 'storage_system_ip': '192.168.0.1', - 'storage_system_name': '3PAR', - 'storage_system_username': 'USER', - 'storage_system_password': 'PASS', - 'cpg_name': 'test_cpg', - 'domain': None, - 'growth_increment': None, - 'growth_increment_unit': None, - 'growth_limit': None, - 'growth_limit_unit': None, - 'growth_warning': None, - 'growth_warning_unit': None, - 'raid_type': None, - 'set_size': None, - 'high_availability': None, - 'disk_type': None, - 'state': 'absent', - 'secure': False + "storage_system_ip": "192.168.0.1", + "storage_system_name": "3PAR", + "storage_system_username": "USER", + "storage_system_password": "PASS", + "cpg_name": "test_cpg", + "domain": None, + "growth_increment": None, + "growth_increment_unit": None, + "growth_limit": None, + "growth_limit_unit": None, + "growth_warning": None, + "growth_warning_unit": None, + "raid_type": None, + "set_size": None, + "high_availability": None, + "disk_type": None, + "state": "absent", + "secure": False, } # This creates a instance of the AnsibleModule mock. mock_module.params = PARAMS_FOR_DELETE mock_module.return_value = mock_module instance = mock_module.return_value - mock_delete_cpg.return_value = ( - True, True, "Deleted CPG test_cpg successfully.") + mock_delete_cpg.return_value = (True, True, "Deleted CPG test_cpg successfully.") mock_client.HPE3ParClient.login.return_value = True ss_3par_cpg.main() # AnsibleModule.exit_json should be called - instance.exit_json.assert_called_with( - changed=True, msg="Deleted CPG test_cpg successfully.") + instance.exit_json.assert_called_with(changed=True, msg="Deleted CPG test_cpg successfully.") # AnsibleModule.fail_json should not be called assert instance.fail_json.call_count == 0 def test_convert_to_binary_multiple(): assert hpe3par.convert_to_binary_multiple(None) == -1 - assert hpe3par.convert_to_binary_multiple('-1.0 MiB') == -1 - assert hpe3par.convert_to_binary_multiple('-1.0GiB') == -1 - assert hpe3par.convert_to_binary_multiple('1.0 MiB') == 1 - assert hpe3par.convert_to_binary_multiple('1.5GiB') == 1.5 * 1024 - assert hpe3par.convert_to_binary_multiple('1.5 TiB') == 1.5 * 1024 * 1024 - assert hpe3par.convert_to_binary_multiple(' 1.5 TiB ') == 1.5 * 1024 * 1024 + assert hpe3par.convert_to_binary_multiple("-1.0 MiB") == -1 + assert hpe3par.convert_to_binary_multiple("-1.0GiB") == -1 + assert hpe3par.convert_to_binary_multiple("1.0 MiB") == 1 + assert hpe3par.convert_to_binary_multiple("1.5GiB") == 1.5 * 1024 + assert hpe3par.convert_to_binary_multiple("1.5 TiB") == 1.5 * 1024 * 1024 + assert hpe3par.convert_to_binary_multiple(" 1.5 TiB ") == 1.5 * 1024 * 1024 -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") def test_validate_set_size(mock_client): - mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]}, - 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]}, - 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]}, - 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]} - } - raid_type = 'R0' + mock_client.HPE3ParClient.RAID_MAP = { + "R0": {"raid_value": 1, "set_sizes": [1]}, + "R1": {"raid_value": 2, "set_sizes": [2, 3, 4]}, + "R5": {"raid_value": 3, "set_sizes": [3, 4, 5, 6, 7, 8, 9]}, + "R6": {"raid_value": 4, "set_sizes": [6, 8, 10, 12, 16]}, + } + raid_type = "R0" set_size = 1 assert ss_3par_cpg.validate_set_size(raid_type, set_size) @@ -163,85 +161,85 @@ def test_validate_set_size(mock_client): assert not ss_3par_cpg.validate_set_size(raid_type, set_size) -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") def test_cpg_ldlayout_map(mock_client): mock_client.HPE3ParClient.PORT = 1 - mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]}, - 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]}, - 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]}, - 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]} - } - ldlayout_dict = {'RAIDType': 'R6', 'HA': 'PORT'} - assert ss_3par_cpg.cpg_ldlayout_map(ldlayout_dict) == { - 'RAIDType': 4, 'HA': 1} + mock_client.HPE3ParClient.RAID_MAP = { + "R0": {"raid_value": 1, "set_sizes": [1]}, + "R1": {"raid_value": 2, "set_sizes": [2, 3, 4]}, + "R5": {"raid_value": 3, "set_sizes": [3, 4, 5, 6, 7, 8, 9]}, + "R6": {"raid_value": 4, "set_sizes": [6, 8, 10, 12, 16]}, + } + ldlayout_dict = {"RAIDType": "R6", "HA": "PORT"} + assert ss_3par_cpg.cpg_ldlayout_map(ldlayout_dict) == {"RAIDType": 4, "HA": 1} -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") def test_create_cpg(mock_client): ss_3par_cpg.validate_set_size = mock.Mock(return_value=True) - ss_3par_cpg.cpg_ldlayout_map = mock.Mock( - return_value={'RAIDType': 4, 'HA': 1}) + ss_3par_cpg.cpg_ldlayout_map = mock.Mock(return_value={"RAIDType": 4, "HA": 1}) mock_client.HPE3ParClient.login.return_value = True mock_client.HPE3ParClient.cpgExists.return_value = False mock_client.HPE3ParClient.FC = 1 mock_client.HPE3ParClient.createCPG.return_value = True - assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient, - 'test_cpg', - 'test_domain', - '32768 MiB', - '32768 MiB', - '32768 MiB', - 'R6', - 8, - 'MAG', - 'FC' - ) == (True, True, "Created CPG test_cpg successfully.") + assert ss_3par_cpg.create_cpg( + mock_client.HPE3ParClient, + "test_cpg", + "test_domain", + "32768 MiB", + "32768 MiB", + "32768 MiB", + "R6", + 8, + "MAG", + "FC", + ) == (True, True, "Created CPG test_cpg successfully.") mock_client.HPE3ParClient.cpgExists.return_value = True - assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient, - 'test_cpg', - 'test_domain', - '32768.0 MiB', - '32768.0 MiB', - '32768.0 MiB', - 'R6', - 8, - 'MAG', - 'FC' - ) == (True, False, 'CPG already present') + assert ss_3par_cpg.create_cpg( + mock_client.HPE3ParClient, + "test_cpg", + "test_domain", + "32768.0 MiB", + "32768.0 MiB", + "32768.0 MiB", + "R6", + 8, + "MAG", + "FC", + ) == (True, False, "CPG already present") ss_3par_cpg.validate_set_size = mock.Mock(return_value=False) - assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient, - 'test_cpg', - 'test_domain', - '32768.0 MiB', - '32768 MiB', - '32768.0 MiB', - 'R6', - 3, - 'MAG', - 'FC' - ) == (False, False, 'Set size 3 not part of RAID set R6') - - -@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client') + assert ss_3par_cpg.create_cpg( + mock_client.HPE3ParClient, + "test_cpg", + "test_domain", + "32768.0 MiB", + "32768 MiB", + "32768.0 MiB", + "R6", + 3, + "MAG", + "FC", + ) == (False, False, "Set size 3 not part of RAID set R6") + + +@mock.patch("ansible_collections.community.general.plugins.modules.ss_3par_cpg.client") def test_delete_cpg(mock_client): mock_client.HPE3ParClient.login.return_value = True mock_client.HPE3ParClient.cpgExists.return_value = True mock_client.HPE3ParClient.FC = 1 mock_client.HPE3ParClient.deleteCPG.return_value = True - assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, - 'test_cpg' - ) == (True, True, "Deleted CPG test_cpg successfully.") + assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, "test_cpg") == ( + True, + True, + "Deleted CPG test_cpg successfully.", + ) mock_client.HPE3ParClient.cpgExists.return_value = False - assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, - 'test_cpg' - ) == (True, False, "CPG does not exist") - assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, - None - ) == (True, False, "CPG does not exist") + assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, "test_cpg") == (True, False, "CPG does not exist") + assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient, None) == (True, False, "CPG does not exist") diff --git a/tests/unit/plugins/modules/test_statsd.py b/tests/unit/plugins/modules/test_statsd.py index f077f4cfbaa..a02f07415ab 100644 --- a/tests/unit/plugins/modules/test_statsd.py +++ b/tests/unit/plugins/modules/test_statsd.py @@ -7,11 +7,15 @@ from unittest.mock import patch, MagicMock from ansible_collections.community.general.plugins.modules import statsd -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) class FakeStatsD(MagicMock): - def incr(self, *args, **kwargs): pass @@ -23,7 +27,6 @@ def close(self, *args, **kwargs): class TestStatsDModule(ModuleTestCase): - def setUp(self): super().setUp() statsd.HAS_STATSD = True @@ -33,10 +36,14 @@ def tearDown(self): super().tearDown() def patch_udp_statsd_client(self, **kwargs): - return patch('ansible_collections.community.general.plugins.modules.statsd.udp_statsd_client', autospec=True, **kwargs) + return patch( + "ansible_collections.community.general.plugins.modules.statsd.udp_statsd_client", autospec=True, **kwargs + ) def patch_tcp_statsd_client(self, **kwargs): - return patch('ansible_collections.community.general.plugins.modules.statsd.tcp_statsd_client', autospec=True, **kwargs) + return patch( + "ansible_collections.community.general.plugins.modules.statsd.tcp_statsd_client", autospec=True, **kwargs + ) def test_udp_without_parameters(self): """Test udp without parameters""" @@ -56,46 +63,54 @@ def test_udp_with_parameters(self): """Test udp with parameters""" with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'metric': 'my_counter', - 'metric_type': 'counter', - 'value': 1, - }): + with set_module_args( + { + "metric": "my_counter", + "metric_type": "counter", + "value": 1, + } + ): self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') - self.assertEqual(result.exception.args[0]['changed'], True) + self.assertEqual(result.exception.args[0]["msg"], "Sent counter my_counter -> 1 to StatsD") + self.assertEqual(result.exception.args[0]["changed"], True) with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'metric': 'my_gauge', - 'metric_type': 'gauge', - 'value': 3, - }): + with set_module_args( + { + "metric": "my_gauge", + "metric_type": "gauge", + "value": 3, + } + ): self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') - self.assertEqual(result.exception.args[0]['changed'], True) + self.assertEqual(result.exception.args[0]["msg"], "Sent gauge my_gauge -> 3 (delta=False) to StatsD") + self.assertEqual(result.exception.args[0]["changed"], True) def test_tcp_with_parameters(self): """Test tcp with parameters""" with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'protocol': 'tcp', - 'metric': 'my_counter', - 'metric_type': 'counter', - 'value': 1, - }): + with set_module_args( + { + "protocol": "tcp", + "metric": "my_counter", + "metric_type": "counter", + "value": 1, + } + ): self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') - self.assertEqual(result.exception.args[0]['changed'], True) + self.assertEqual(result.exception.args[0]["msg"], "Sent counter my_counter -> 1 to StatsD") + self.assertEqual(result.exception.args[0]["changed"], True) with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: with self.assertRaises(AnsibleExitJson) as result: - with set_module_args({ - 'protocol': 'tcp', - 'metric': 'my_gauge', - 'metric_type': 'gauge', - 'value': 3, - }): + with set_module_args( + { + "protocol": "tcp", + "metric": "my_gauge", + "metric_type": "gauge", + "value": 3, + } + ): self.module.main() - self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') - self.assertEqual(result.exception.args[0]['changed'], True) + self.assertEqual(result.exception.args[0]["msg"], "Sent gauge my_gauge -> 3 (delta=False) to StatsD") + self.assertEqual(result.exception.args[0]["changed"], True) diff --git a/tests/unit/plugins/modules/test_sysupgrade.py b/tests/unit/plugins/modules/test_sysupgrade.py index 39cc6e0bf98..544037caade 100644 --- a/tests/unit/plugins/modules/test_sysupgrade.py +++ b/tests/unit/plugins/modules/test_sysupgrade.py @@ -6,16 +6,20 @@ from unittest.mock import patch from ansible.module_utils import basic -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, AnsibleExitJson, AnsibleFailJson, ModuleTestCase +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, +) from ansible_collections.community.general.plugins.modules import sysupgrade class TestSysupgradeModule(ModuleTestCase): - def setUp(self): super().setUp() self.module = sysupgrade - self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')) + self.mock_get_bin_path = patch("ansible.module_utils.basic.AnsibleModule.get_bin_path") self.get_bin_path = self.mock_get_bin_path.start() def tearDown(self): @@ -23,7 +27,7 @@ def tearDown(self): self.mock_get_bin_path.stop() def test_upgrade_success(self): - """ Upgrade was successful """ + """Upgrade was successful""" rc = 0 stdout = """ @@ -52,10 +56,10 @@ def test_upgrade_success(self): run_command.return_value = (rc, stdout, stderr) with self.assertRaises(AnsibleExitJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_upgrade_failed(self): - """ Upgrade failed """ + """Upgrade failed""" rc = 1 stdout = "" @@ -66,5 +70,5 @@ def test_upgrade_failed(self): run_command_mock.return_value = (rc, stdout, stderr) with self.assertRaises(AnsibleFailJson) as result: self.module.main() - self.assertTrue(result.exception.args[0]['failed']) - self.assertIn('need root', result.exception.args[0]['msg']) + self.assertTrue(result.exception.args[0]["failed"]) + self.assertIn("need root", result.exception.args[0]["msg"]) diff --git a/tests/unit/plugins/modules/test_terraform.py b/tests/unit/plugins/modules/test_terraform.py index deff4a7770e..d67902a8090 100644 --- a/tests/unit/plugins/modules/test_terraform.py +++ b/tests/unit/plugins/modules/test_terraform.py @@ -18,5 +18,5 @@ def test_terraform_without_argument(capfd): out, err = capfd.readouterr() assert not err - assert json.loads(out)['failed'] - assert 'project_path' in json.loads(out)['msg'] + assert json.loads(out)["failed"] + assert "project_path" in json.loads(out)["msg"] diff --git a/tests/unit/plugins/modules/test_ufw.py b/tests/unit/plugins/modules/test_ufw.py index 9131dc7f966..c3d40ec37bf 100644 --- a/tests/unit/plugins/modules/test_ufw.py +++ b/tests/unit/plugins/modules/test_ufw.py @@ -31,7 +31,9 @@ ufw_status_verbose_with_port_7000 = ( - f"{ufw_verbose_header}\n7000/tcp ALLOW IN Anywhere\n7000/tcp (v6) ALLOW IN Anywhere (v6)\n") + f"{ufw_verbose_header}\n7000/tcp ALLOW IN Anywhere\n" + "7000/tcp (v6) ALLOW IN Anywhere (v6)\n" +) user_rules_with_port_7000 = """### tuple ### allow tcp 7000 0.0.0.0/0 any 0.0.0.0/0 in ### tuple ### allow tcp 7000 ::/0 any ::/0 in @@ -42,13 +44,17 @@ """ ufw_status_verbose_with_ipv6 = ( - f"{ufw_verbose_header}\n5353/udp ALLOW IN 224.0.0.251\n5353/udp ALLOW IN ff02::fb\n") + f"{ufw_verbose_header}\n5353/udp ALLOW IN 224.0.0.251\n" + "5353/udp ALLOW IN ff02::fb\n" +) ufw_status_verbose_nothing = ufw_verbose_header skippg_adding_existing_rules = "Skipping adding existing rule\nSkipping adding existing rule (v6)\n" -grep_config_cli = "grep -h '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules " +grep_config_cli = ( + "grep -h '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules " +) grep_config_cli += "/var/lib/ufw/user.rules /var/lib/ufw/user6.rules" dry_mode_cmd_with_port_700 = { @@ -63,7 +69,7 @@ "ufw --dry-run allow in on foo from 1.1.1.1 port 7002 to 8.8.8.8 port 7003 proto tcp": "", "ufw --dry-run allow out on foo from any to any port 7004 proto tcp": "", "ufw --dry-run allow out on foo from 1.1.1.1 port 7003 to 8.8.8.8 port 7004 proto tcp": "", - grep_config_cli: user_rules_with_port_7000 + grep_config_cli: user_rules_with_port_7000, } # setup configuration : @@ -81,14 +87,14 @@ "ufw --dry-run allow from 224.0.0.252 to any port 5353 proto udp": """### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in ### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.252 in """, - "ufw --dry-run allow from 10.0.0.0/24 to any port 1577 proto udp": "### tuple ### allow udp 1577 0.0.0.0/0 any 10.0.0.0/24 in" + "ufw --dry-run allow from 10.0.0.0/24 to any port 1577 proto udp": "### tuple ### allow udp 1577 0.0.0.0/0 any 10.0.0.0/24 in", } dry_mode_cmd_nothing = { "ufw status verbose": ufw_status_verbose_nothing, "ufw --version": ufw_version_35, grep_config_cli: "", - "ufw --dry-run allow from any to :: port 23": "### tuple ### allow any 23 :: any ::/0 in" + "ufw --dry-run allow from any to :: port 23": "### tuple ### allow any 23 :: any ::/0 in", } @@ -110,12 +116,10 @@ def get_bin_path(self, arg, required=False): class TestUFW(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) @@ -142,295 +146,286 @@ def test_filter_line_that_contains_ipv6(self): self.assertTrue(reg.match("::") is not None) def test_check_mode_add_rules(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7000', - '_ansible_check_mode': True - }): + with set_module_args({"rule": "allow", "proto": "tcp", "port": "7000", "_ansible_check_mode": True}): result = self.__getResult(do_nothing_func_port_7000) - self.assertFalse(result.exception.args[0]['changed']) + self.assertFalse(result.exception.args[0]["changed"]) def test_check_mode_add_insert_rules(self): - with set_module_args({ - 'insert': '1', - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7000', - '_ansible_check_mode': True - }): + with set_module_args( + {"insert": "1", "rule": "allow", "proto": "tcp", "port": "7000", "_ansible_check_mode": True} + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertFalse(result.exception.args[0]['changed']) + self.assertFalse(result.exception.args[0]["changed"]) def test_check_mode_add_detailed_route(self): - with set_module_args({ - 'rule': 'allow', - 'route': 'yes', - 'interface_in': 'foo', - 'interface_out': 'bar', - 'proto': 'tcp', - 'from_ip': '1.1.1.1', - 'to_ip': '8.8.8.8', - 'from_port': '7000', - 'to_port': '7001', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "route": "yes", + "interface_in": "foo", + "interface_out": "bar", + "proto": "tcp", + "from_ip": "1.1.1.1", + "to_ip": "8.8.8.8", + "from_port": "7000", + "to_port": "7001", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_ambiguous_route(self): - with set_module_args({ - 'rule': 'allow', - 'route': 'yes', - 'interface_in': 'foo', - 'interface_out': 'bar', - 'direction': 'in', - 'interface': 'baz', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "route": "yes", + "interface_in": "foo", + "interface_out": "bar", + "direction": "in", + "interface": "baz", + "_ansible_check_mode": True, + } + ): with self.assertRaises(AnsibleFailJson) as result: self.__getResult(do_nothing_func_port_7000) exc = result.exception.args[0] - self.assertTrue(exc['failed']) - self.assertIn('mutually exclusive', exc['msg']) + self.assertTrue(exc["failed"]) + self.assertIn("mutually exclusive", exc["msg"]) def test_check_mode_add_interface_in(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7003', - 'interface_in': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + {"rule": "allow", "proto": "tcp", "port": "7003", "interface_in": "foo", "_ansible_check_mode": True} + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_interface_out(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7004', - 'interface_out': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + {"rule": "allow", "proto": "tcp", "port": "7004", "interface_out": "foo", "_ansible_check_mode": True} + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_non_route_interface_both(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7004', - 'interface_in': 'foo', - 'interface_out': 'bar', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "port": "7004", + "interface_in": "foo", + "interface_out": "bar", + "_ansible_check_mode": True, + } + ): with self.assertRaises(AnsibleFailJson) as result: self.__getResult(do_nothing_func_port_7000) exc = result.exception.args[0] - self.assertTrue(exc['failed']) - self.assertIn('combine', exc['msg']) + self.assertTrue(exc["failed"]) + self.assertIn("combine", exc["msg"]) def test_check_mode_add_direction_in(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7003', - 'direction': 'in', - 'interface': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "port": "7003", + "direction": "in", + "interface": "foo", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_direction_in_with_ip(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'from_ip': '1.1.1.1', - 'from_port': '7002', - 'to_ip': '8.8.8.8', - 'to_port': '7003', - 'direction': 'in', - 'interface': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "from_ip": "1.1.1.1", + "from_port": "7002", + "to_ip": "8.8.8.8", + "to_port": "7003", + "direction": "in", + "interface": "foo", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_direction_out(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7004', - 'direction': 'out', - 'interface': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "port": "7004", + "direction": "out", + "interface": "foo", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_add_direction_out_with_ip(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'from_ip': '1.1.1.1', - 'from_port': '7003', - 'to_ip': '8.8.8.8', - 'to_port': '7004', - 'direction': 'out', - 'interface': 'foo', - '_ansible_check_mode': True - }): + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "from_ip": "1.1.1.1", + "from_port": "7003", + "to_ip": "8.8.8.8", + "to_port": "7004", + "direction": "out", + "interface": "foo", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_port_7000) - self.assertTrue(result.exception.args[0]['changed']) + self.assertTrue(result.exception.args[0]["changed"]) def test_check_mode_delete_existing_rules(self): - - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7000', - 'delete': 'yes', - '_ansible_check_mode': True, - }): - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "port": "7000", + "delete": "yes", + "_ansible_check_mode": True, + } + ): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_check_mode_delete_existing_insert_rules(self): - - with set_module_args({ - 'insert': '1', - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7000', - 'delete': 'yes', - '_ansible_check_mode': True, - }): - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args( + { + "insert": "1", + "rule": "allow", + "proto": "tcp", + "port": "7000", + "delete": "yes", + "_ansible_check_mode": True, + } + ): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_check_mode_delete_not_existing_rules(self): - - with set_module_args({ - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7001', - 'delete': 'yes', - '_ansible_check_mode': True, - }): - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "tcp", + "port": "7001", + "delete": "yes", + "_ansible_check_mode": True, + } + ): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_check_mode_delete_not_existing_insert_rules(self): - - with set_module_args({ - 'insert': '1', - 'rule': 'allow', - 'proto': 'tcp', - 'port': '7001', - 'delete': 'yes', - '_ansible_check_mode': True, - }): - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args( + { + "insert": "1", + "rule": "allow", + "proto": "tcp", + "port": "7001", + "delete": "yes", + "_ansible_check_mode": True, + } + ): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_enable_mode(self): - with set_module_args({ - 'state': 'enabled', - '_ansible_check_mode': True - }): - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"state": "enabled", "_ansible_check_mode": True}): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_disable_mode(self): - with set_module_args({ - 'state': 'disabled', - '_ansible_check_mode': True - }): - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"state": "disabled", "_ansible_check_mode": True}): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_logging_off(self): - with set_module_args({ - 'logging': 'off', - '_ansible_check_mode': True - }): - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"logging": "off", "_ansible_check_mode": True}): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_logging_on(self): - with set_module_args({ - 'logging': 'on', - '_ansible_check_mode': True - }): - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"logging": "on", "_ansible_check_mode": True}): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_default_changed(self): - with set_module_args({ - 'default': 'allow', - "direction": "incoming", - '_ansible_check_mode': True - }): - self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"default": "allow", "direction": "incoming", "_ansible_check_mode": True}): + self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_default_not_changed(self): - with set_module_args({ - 'default': 'deny', - "direction": "incoming", - '_ansible_check_mode': True - }): - self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed']) + with set_module_args({"default": "deny", "direction": "incoming", "_ansible_check_mode": True}): + self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]["changed"]) def test_ipv6_remove(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'udp', - 'port': '5353', - 'from': 'ff02::fb', - 'delete': 'yes', - '_ansible_check_mode': True, - }): - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "udp", + "port": "5353", + "from": "ff02::fb", + "delete": "yes", + "_ansible_check_mode": True, + } + ): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]["changed"]) def test_ipv6_add_existing(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'udp', - 'port': '5353', - 'from': 'ff02::fb', - '_ansible_check_mode': True, - }): - self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "udp", + "port": "5353", + "from": "ff02::fb", + "_ansible_check_mode": True, + } + ): + self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]["changed"]) def test_add_not_existing_ipv4_submask(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'udp', - 'port': '1577', - 'from': '10.0.0.0/24', - '_ansible_check_mode': True, - }): - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "udp", + "port": "1577", + "from": "10.0.0.0/24", + "_ansible_check_mode": True, + } + ): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]["changed"]) def test_ipv4_add_with_existing_ipv6(self): - with set_module_args({ - 'rule': 'allow', - 'proto': 'udp', - 'port': '5353', - 'from': '224.0.0.252', - '_ansible_check_mode': True, - }): - self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed']) + with set_module_args( + { + "rule": "allow", + "proto": "udp", + "port": "5353", + "from": "224.0.0.252", + "_ansible_check_mode": True, + } + ): + self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]["changed"]) def test_ipv6_add_from_nothing(self): - with set_module_args({ - 'rule': 'allow', - 'port': '23', - 'to': '::', - '_ansible_check_mode': True, - }): + with set_module_args( + { + "rule": "allow", + "port": "23", + "to": "::", + "_ansible_check_mode": True, + } + ): result = self.__getResult(do_nothing_func_nothing).exception.args[0] print(result) - self.assertTrue(result['changed']) + self.assertTrue(result["changed"]) def __getResult(self, cmd_fun): - with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command: + with patch.object(basic.AnsibleModule, "run_command") as mock_run_command: mock_run_command.side_effect = cmd_fun with self.assertRaises(AnsibleExitJson) as result: module.main() diff --git a/tests/unit/plugins/modules/test_usb_facts.py b/tests/unit/plugins/modules/test_usb_facts.py index f5cc91827ea..d551d8244b6 100644 --- a/tests/unit/plugins/modules/test_usb_facts.py +++ b/tests/unit/plugins/modules/test_usb_facts.py @@ -8,25 +8,28 @@ from unittest import mock from ansible.module_utils import basic from ansible_collections.community.general.plugins.modules import usb_facts -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + set_module_args, + exit_json, + fail_json, +) def get_bin_path(self, arg, required=False): """Mock AnsibleModule.get_bin_path""" - if arg == 'lsusb': - return '/usr/bin/lsusb' + if arg == "lsusb": + return "/usr/bin/lsusb" else: if required: - fail_json(msg=f'{arg!r} not found !') + fail_json(msg=f"{arg!r} not found !") class TestUsbFacts(unittest.TestCase): - def setUp(self): - self.mock_module_helper = mock.patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = mock.patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) self.testing_data = [ @@ -35,39 +38,43 @@ def setUp(self): "bus": "001", "device": "001", "id": "1d6b:0002", - "name": "Linux Foundation 2.0 root hub" + "name": "Linux Foundation 2.0 root hub", }, { "input": "Bus 003 Device 002: ID 8087:8008 Intel Corp. Integrated Rate Matching Hub", "bus": "003", "device": "002", "id": "8087:8008", - "name": "Intel Corp. Integrated Rate Matching Hub" - } + "name": "Intel Corp. Integrated Rate Matching Hub", + }, ] self.output_fields = ["bus", "device", "id", "name"] def test_parsing_single_line(self): for data in self.testing_data: - with mock.patch.object(basic.AnsibleModule, 'run_command') as mock_run_command: + with mock.patch.object(basic.AnsibleModule, "run_command") as mock_run_command: command_output = data["input"] mock_run_command.return_value = 0, command_output, None with self.assertRaises(AnsibleExitJson) as result: with set_module_args({}): usb_facts.main() for output_field in self.output_fields: - self.assertEqual(result.exception.args[0]["ansible_facts"]["usb_devices"][0][output_field], data[output_field]) + self.assertEqual( + result.exception.args[0]["ansible_facts"]["usb_devices"][0][output_field], data[output_field] + ) def test_parsing_multiple_lines(self): input = "" for data in self.testing_data: input += f"{data['input']}\n" - with mock.patch.object(basic.AnsibleModule, 'run_command') as mock_run_command: + with mock.patch.object(basic.AnsibleModule, "run_command") as mock_run_command: mock_run_command.return_value = 0, input, None with self.assertRaises(AnsibleExitJson) as result: with set_module_args({}): usb_facts.main() for index in range(0, len(self.testing_data)): for output_field in self.output_fields: - self.assertEqual(result.exception.args[0]["ansible_facts"]["usb_devices"][index][output_field], - self.testing_data[index][output_field]) + self.assertEqual( + result.exception.args[0]["ansible_facts"]["usb_devices"][index][output_field], + self.testing_data[index][output_field], + ) diff --git a/tests/unit/plugins/modules/test_wdc_redfish_command.py b/tests/unit/plugins/modules/test_wdc_redfish_command.py index ab397cc0cb4..3f30e66f827 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_command.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_command.py @@ -14,57 +14,30 @@ from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.wdc_redfish_command as module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + exit_json, + fail_json, +) -MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = { - "ret": True, - "data": { - } -} +MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = {"ret": True, "data": {}} -MOCK_GET_ENCLOSURE_RESPONSE_SINGLE_TENANT = { - "ret": True, - "data": { - "SerialNumber": "12345" - } -} +MOCK_GET_ENCLOSURE_RESPONSE_SINGLE_TENANT = {"ret": True, "data": {"SerialNumber": "12345"}} -MOCK_GET_ENCLOSURE_RESPONSE_MULTI_TENANT = { - "ret": True, - "data": { - "SerialNumber": "12345-A" - } -} +MOCK_GET_ENCLOSURE_RESPONSE_MULTI_TENANT = {"ret": True, "data": {"SerialNumber": "12345-A"}} -MOCK_URL_ERROR = { - "ret": False, - "msg": "This is a mock URL error", - "status": 500 -} +MOCK_URL_ERROR = {"ret": False, "msg": "This is a mock URL error", "status": 500} MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE = { "ret": True, - "data": { - "UpdateService": { - "@odata.id": "/UpdateService" - }, - "Chassis": { - "@odata.id": "/Chassis" - } - } + "data": {"UpdateService": {"@odata.id": "/UpdateService"}, "Chassis": {"@odata.id": "/Chassis"}}, } -MOCK_SUCCESSFUL_RESPONSE_CHASSIS = { - "ret": True, - "data": { - "Members": [ - { - "@odata.id": "/redfish/v1/Chassis/Enclosure" - } - ] - } -} +MOCK_SUCCESSFUL_RESPONSE_CHASSIS = {"ret": True, "data": {"Members": [{"@odata.id": "/redfish/v1/Chassis/Enclosure"}]}} MOCK_SUCCESSFUL_RESPONSE_CHASSIS_ENCLOSURE = { "ret": True, @@ -74,95 +47,51 @@ "Actions": { "Oem": { "WDC": { - "#Chassis.Locate": { - "target": "/Chassis.Locate" - }, + "#Chassis.Locate": {"target": "/Chassis.Locate"}, "#Chassis.PowerMode": { "target": "/redfish/v1/Chassis/Enclosure/Actions/Chassis.PowerMode", - } + }, } } }, - "Oem": { - "WDC": { - "PowerMode": "Normal" - } - } - } + "Oem": {"WDC": {"PowerMode": "Normal"}}, + }, } MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_AND_FW_ACTIVATE = { "ret": True, "data": { "Actions": { - "#UpdateService.SimpleUpdate": { - "target": "mocked value" - }, + "#UpdateService.SimpleUpdate": {"target": "mocked value"}, "Oem": { "WDC": { "#UpdateService.FWActivate": { "title": "Activate the downloaded firmware.", - "target": "/redfish/v1/UpdateService/Actions/UpdateService.FWActivate" + "target": "/redfish/v1/UpdateService/Actions/UpdateService.FWActivate", } } - } + }, } - } + }, } -MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = { - "ret": True, - "data": { - "Actions": {} - } -} +MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = {"ret": True, "data": {"Actions": {}}} -MOCK_GET_IOM_A_MULTI_TENANT = { - "ret": True, - "data": { - "Id": "IOModuleAFRU" - } -} +MOCK_GET_IOM_A_MULTI_TENANT = {"ret": True, "data": {"Id": "IOModuleAFRU"}} -MOCK_GET_IOM_B_MULTI_TENANAT = { - "ret": True, - "data": { - "error": { - "message": "IOM Module B cannot be read" - } - } -} +MOCK_GET_IOM_B_MULTI_TENANAT = {"ret": True, "data": {"error": {"message": "IOM Module B cannot be read"}}} -MOCK_READY_FOR_FW_UPDATE = { - "ret": True, - "entries": { - "Description": "Ready for FW update", - "StatusCode": 0 - } -} +MOCK_READY_FOR_FW_UPDATE = {"ret": True, "entries": {"Description": "Ready for FW update", "StatusCode": 0}} -MOCK_FW_UPDATE_IN_PROGRESS = { - "ret": True, - "entries": { - "Description": "FW update in progress", - "StatusCode": 1 - } -} +MOCK_FW_UPDATE_IN_PROGRESS = {"ret": True, "entries": {"Description": "FW update in progress", "StatusCode": 1}} MOCK_WAITING_FOR_ACTIVATION = { "ret": True, - "entries": { - "Description": "FW update completed. Waiting for activation.", - "StatusCode": 2 - } + "entries": {"Description": "FW update completed. Waiting for activation.", "StatusCode": 2}, } -MOCK_SIMPLE_UPDATE_STATUS_LIST = [ - MOCK_READY_FOR_FW_UPDATE, - MOCK_FW_UPDATE_IN_PROGRESS, - MOCK_WAITING_FOR_ACTIVATION -] +MOCK_SIMPLE_UPDATE_STATUS_LIST = [MOCK_READY_FOR_FW_UPDATE, MOCK_FW_UPDATE_IN_PROGRESS, MOCK_WAITING_FOR_ACTIVATION] def get_bin_path(self, arg, required=False): @@ -181,9 +110,7 @@ def is_changed(ansible_exit_json): def mock_simple_update(*args, **kwargs): - return { - "ret": True - } + return {"ret": True} def mocked_url_response(*args, **kwargs): @@ -264,40 +191,28 @@ def mock_post_request(*args, **kwargs): ] for endpoint in valid_endpoints: if args[1].endswith(endpoint): - return { - "ret": True, - "data": ACTION_WAS_SUCCESSFUL_MESSAGE - } + return {"ret": True, "data": ACTION_WAS_SUCCESSFUL_MESSAGE} raise RuntimeError(f"Illegal POST call to: {args[1]}") def mock_get_firmware_inventory_version_1_2_3(*args, **kwargs): return { "ret": True, - "entries": [ - { - "Id": "IOModuleA_OOBM", - "Version": "1.2.3" - }, - { - "Id": "IOModuleB_OOBM", - "Version": "1.2.3" - } - ] + "entries": [{"Id": "IOModuleA_OOBM", "Version": "1.2.3"}, {"Id": "IOModuleB_OOBM", "Version": "1.2.3"}], } -ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status or generation from update image file" +ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = ( + "Unable to extract bundle version or multi-tenant status or generation from update image file" +) ACTION_WAS_SUCCESSFUL_MESSAGE = "Action was successful" class TestWdcRedfishCommand(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) self.tempdir = tempfile.mkdtemp() @@ -312,155 +227,162 @@ def test_module_fail_when_required_args_missing(self): def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'unknown', - 'command': 'FWActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': [], - }): + with set_module_args( + { + "category": "unknown", + "command": "FWActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": [], + } + ): module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'Update', - 'command': 'unknown', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': [], - }): + with set_module_args( + { + "category": "Update", + "command": "unknown", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": [], + } + ): module.main() def test_module_chassis_power_mode_low(self): """Test setting chassis power mode to low (happy path).""" module_args = { - 'category': 'Chassis', - 'command': 'PowerModeLow', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_id': 'Enclosure', - 'baseuri': 'example.com' + "category": "Chassis", + "command": "PowerModeLow", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_id": "Enclosure", + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_module_chassis_power_mode_normal_when_already_normal(self): """Test setting chassis power mode to normal when it already is. Verify we get changed=False.""" module_args = { - 'category': 'Chassis', - 'command': 'PowerModeNormal', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_id': 'Enclosure', - 'baseuri': 'example.com' + "category": "Chassis", + "command": "PowerModeNormal", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_id": "Enclosure", + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_module_chassis_power_mode_invalid_command(self): """Test that we get an error when issuing an invalid PowerMode command.""" module_args = { - 'category': 'Chassis', - 'command': 'PowerModeExtraHigh', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_id': 'Enclosure', - 'baseuri': 'example.com' + "category": "Chassis", + "command": "PowerModeExtraHigh", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_id": "Enclosure", + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: module.main() expected_error_message = "Invalid Command 'PowerModeExtraHigh'" - self.assertIn(expected_error_message, - get_exception_message(ansible_fail_json)) + self.assertIn(expected_error_message, get_exception_message(ansible_fail_json)) def test_module_enclosure_led_indicator_on(self): """Test turning on a valid LED indicator (in this case we use the Enclosure resource).""" module_args = { - 'category': 'Chassis', - 'command': 'IndicatorLedOn', - 'username': 'USERID', - 'password': 'PASSW0RD=21', + "category": "Chassis", + "command": "IndicatorLedOn", + "username": "USERID", + "password": "PASSW0RD=21", "resource_id": "Enclosure", - "baseuri": "example.com" + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_module_invalid_resource_led_indicator_on(self): """Test turning LED on for an invalid resource id.""" module_args = { - 'category': 'Chassis', - 'command': 'IndicatorLedOn', - 'username': 'USERID', - 'password': 'PASSW0RD=21', + "category": "Chassis", + "command": "IndicatorLedOn", + "username": "USERID", + "password": "PASSW0RD=21", "resource_id": "Disk99", - "baseuri": "example.com" + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleFailJson) as ansible_fail_json: module.main() expected_error_message = "Chassis resource Disk99 not found" - self.assertEqual(expected_error_message, - get_exception_message(ansible_fail_json)) + self.assertEqual(expected_error_message, get_exception_message(ansible_fail_json)) def test_module_enclosure_led_off_already_off(self): """Test turning LED indicator off when it's already off. Confirm changed is False and no POST occurs.""" module_args = { - 'category': 'Chassis', - 'command': 'IndicatorLedOff', - 'username': 'USERID', - 'password': 'PASSW0RD=21', + "category": "Chassis", + "command": "IndicatorLedOff", + "username": "USERID", + "password": "PASSW0RD=21", "resource_id": "Enclosure", - "baseuri": "example.com" + "baseuri": "example.com", } with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_request=mock_get_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_request=mock_get_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) self.assertFalse(is_changed(ansible_exit_json)) def test_module_fw_activate_first_iom_unavailable(self): """Test that if the first IOM is not available, the 2nd one is used.""" - ioms = [ - "bad.example.com", - "good.example.com" - ] + ioms = ["bad.example.com", "good.example.com"] module_args = { - 'category': 'Update', - 'command': 'FWActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ioms + "category": "Update", + "command": "FWActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ioms, } with set_module_args(module_args): @@ -471,296 +393,303 @@ def mock_get_request(*args, **kwargs): else: return mock_get_request_enclosure_single_tenant(*args, **kwargs) - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mock_fw_activate_url, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request, - post_request=mock_post_request): + with patch.multiple( + module.WdcRedfishUtils, + _firmware_activate_uri=mock_fw_activate_url, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as cm: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(cm)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(cm)) def test_module_fw_activate_pass(self): """Test the FW Activate command in a passing scenario.""" # Run the same test twice -- once specifying ioms, and once specifying baseuri. # Both should work the same way. - uri_specifiers = [ - { - "ioms": ["example1.example.com"] - }, - { - "baseuri": "example1.example.com" - } - ] + uri_specifiers = [{"ioms": ["example1.example.com"]}, {"baseuri": "example1.example.com"}] for uri_specifier in uri_specifiers: module_args = { - 'category': 'Update', - 'command': 'FWActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', + "category": "Update", + "command": "FWActivate", + "username": "USERID", + "password": "PASSW0RD=21", } module_args.update(uri_specifier) with set_module_args(module_args): - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - _firmware_activate_uri=mock_fw_activate_url, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant, - post_request=mock_post_request): + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + _firmware_activate_uri=mock_fw_activate_url, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + post_request=mock_post_request, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() - self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, - get_exception_message(ansible_exit_json)) + self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json)) self.assertTrue(is_changed(ansible_exit_json)) def test_module_fw_activate_service_does_not_support_fw_activate(self): """Test FW Activate when it is not supported.""" expected_error_message = "Service does not support FWActivate" - with set_module_args({ - 'category': 'Update', - 'command': 'FWActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"] - }): + with set_module_args( + { + "category": "Update", + "command": "FWActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + } + ): def mock_update_uri_response(*args, **kwargs): return { "ret": True, - "data": {} # No Actions + "data": {}, # No Actions } - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_update_uri_response): + with patch.multiple( + module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_update_uri_response, + ): with self.assertRaises(AnsibleFailJson) as cm: module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + self.assertEqual(expected_error_message, get_exception_message(cm)) def test_module_update_and_activate_image_uri_not_http(self): """Test Update and Activate when URI is not http(s)""" expected_error_message = "Bundle URI must be HTTP or HTTPS" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "ftp://example.com/image" - }): - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "ftp://example.com/image", + } + ): + with patch.multiple( + module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + ): with self.assertRaises(AnsibleFailJson) as cm: module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + self.assertEqual(expected_error_message, get_exception_message(cm)) def test_module_update_and_activate_target_not_ready_for_fw_update(self): """Test Update and Activate when target is not in the correct state.""" mock_status_code = 999 mock_status_description = "mock status description" - expected_error_message = f"Target is not ready for FW update. Current status: {mock_status_code} ({mock_status_description})" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image" - }): + expected_error_message = ( + f"Target is not ready for FW update. Current status: {mock_status_code} ({mock_status_description})" + ) + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + } + ): with patch.object(module.WdcRedfishUtils, "get_simple_update_status") as mock_get_simple_update_status: mock_get_simple_update_status.return_value = { "ret": True, - "entries": { - "StatusCode": mock_status_code, - "Description": mock_status_description - } + "entries": {"StatusCode": mock_status_code, "Description": mock_status_description}, } - with patch.multiple(module.WdcRedfishUtils, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): + with patch.multiple( + module.WdcRedfishUtils, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + ): with self.assertRaises(AnsibleFailJson) as cm: module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + self.assertEqual(expected_error_message, get_exception_message(cm)) def test_module_update_and_activate_bundle_not_a_tarfile(self): """Test Update and Activate when bundle is not a tarfile""" mock_filename = os.path.abspath(__file__) expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + ): + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = mock_filename - with patch.multiple(module.WdcRedfishUtils, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): + with patch.multiple( + module.WdcRedfishUtils, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + ): with self.assertRaises(AnsibleFailJson) as cm: module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + self.assertEqual(expected_error_message, get_exception_message(cm)) def test_module_update_and_activate_bundle_contains_no_firmware_version(self): """Test Update and Activate when bundle contains no firmware version""" expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - + ): tar_name = f"empty_tarfile{uuid.uuid4()}.tar" empty_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w") empty_tarfile.close() - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return): + with patch.multiple( + module.WdcRedfishUtils, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + ): with self.assertRaises(AnsibleFailJson) as cm: module.main() - self.assertEqual(expected_error_message, - get_exception_message(cm)) + self.assertEqual(expected_error_message, get_exception_message(cm)) def test_module_update_and_activate_version_already_installed(self): """Test Update and Activate when the bundle version is already installed""" mock_firmware_version = "1.2.3" expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=False) + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant): + with patch.multiple( + module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + ): with self.assertRaises(AnsibleExitJson) as result: module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + self.assertEqual(expected_error_message, get_exception_message(result)) self.assertFalse(is_changed(result)) def test_module_update_and_activate_version_already_installed_multi_tenant(self): """Test Update and Activate on multi-tenant when version is already installed""" mock_firmware_version = "1.2.3" expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=True) + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant): + with patch.multiple( + module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant, + ): with self.assertRaises(AnsibleExitJson) as result: module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + self.assertEqual(expected_error_message, get_exception_message(result)) self.assertFalse(is_changed(result)) def test_module_update_and_activate_pass(self): """Test Update and Activate (happy path)""" mock_firmware_version = "1.2.2" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - simple_update=mock_simple_update, - _simple_update_status_uri=mocked_url_response, - # _find_updateservice_resource=empty_return, - # _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant, - post_request=mock_post_request): - - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" - ) as mock_get_simple_update_status: + with patch.multiple( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils", + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + simple_update=mock_simple_update, + _simple_update_status_uri=mocked_url_response, + # _find_updateservice_resource=empty_return, + # _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + post_request=mock_post_request, + ): + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" + ) as mock_get_simple_update_status: mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() @@ -770,34 +699,36 @@ def test_module_update_and_activate_pass(self): def test_module_update_and_activate_pass_multi_tenant(self): """Test Update and Activate with multi-tenant (happy path)""" mock_firmware_version = "1.2.2" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=True) - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) - - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, - simple_update=mock_simple_update, - _simple_update_status_uri=mocked_url_response, - # _find_updateservice_resource=empty_return, - # _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant, - post_request=mock_post_request): - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" - ) as mock_get_simple_update_status: + with patch.multiple( + module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3, + simple_update=mock_simple_update, + _simple_update_status_uri=mocked_url_response, + # _find_updateservice_resource=empty_return, + # _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant, + post_request=mock_post_request, + ): + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status" + ) as mock_get_simple_update_status: mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() @@ -808,73 +739,71 @@ def test_module_fw_update_multi_tenant_firmware_single_tenant_enclosure(self): """Test Update and Activate using multi-tenant bundle on single-tenant enclosure""" mock_firmware_version = "1.1.1" expected_error_message = "Enclosure multi-tenant is False but bundle multi-tenant is True" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=True) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=True) + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_single_tenant): + with patch.multiple( + module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_single_tenant, + ): with self.assertRaises(AnsibleFailJson) as result: module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + self.assertEqual(expected_error_message, get_exception_message(result)) def test_module_fw_update_single_tentant_firmware_multi_tenant_enclosure(self): """Test Update and Activate using singe-tenant bundle on multi-tenant enclosure""" mock_firmware_version = "1.1.1" expected_error_message = "Enclosure multi-tenant is True but bundle multi-tenant is False" - with set_module_args({ - 'category': 'Update', - 'command': 'UpdateAndActivate', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - 'update_image_uri': "http://example.com/image", - "update_creds": { - "username": "image_user", - "password": "image_password" + with set_module_args( + { + "category": "Update", + "command": "UpdateAndActivate", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + "update_image_uri": "http://example.com/image", + "update_creds": {"username": "image_user", "password": "image_password"}, } - }): - - tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, - is_multi_tenant=False) - with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file: + ): + tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version, is_multi_tenant=False) + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file" + ) as mock_fetch_file: mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name) - with patch.multiple(module.WdcRedfishUtils, - get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), - get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, - _firmware_activate_uri=mocked_url_response, - _update_uri=mock_update_url, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_get_request_enclosure_multi_tenant): + with patch.multiple( + module.WdcRedfishUtils, + get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(), + get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update, + _firmware_activate_uri=mocked_url_response, + _update_uri=mock_update_url, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_get_request_enclosure_multi_tenant, + ): with self.assertRaises(AnsibleFailJson) as result: module.main() - self.assertEqual(expected_error_message, - get_exception_message(result)) + self.assertEqual(expected_error_message, get_exception_message(result)) - def generate_temp_bundlefile(self, - mock_firmware_version, - is_multi_tenant): + def generate_temp_bundlefile(self, mock_firmware_version, is_multi_tenant): """Generate a temporary fake bundle file. :param str mock_firmware_version: The simulated firmware version for the bundle. @@ -893,7 +822,7 @@ def generate_temp_bundlefile(self, bin_filename = "firmware.bin" bin_filename_path = os.path.join(self.tempdir, bin_filename) with open(bin_filename_path, "wb") as bin_file: - byte_to_write = b'\x80' if is_multi_tenant else b'\xFF' + byte_to_write = b"\x80" if is_multi_tenant else b"\xff" bin_file.write(byte_to_write * 12) for filename in [package_filename, bin_filename]: bundle_tarfile.add(os.path.join(self.tempdir, filename), arcname=filename) diff --git a/tests/unit/plugins/modules/test_wdc_redfish_info.py b/tests/unit/plugins/modules/test_wdc_redfish_info.py index 7dcfa7ba435..0c94f9925d2 100644 --- a/tests/unit/plugins/modules/test_wdc_redfish_info.py +++ b/tests/unit/plugins/modules/test_wdc_redfish_info.py @@ -9,43 +9,35 @@ from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.wdc_redfish_info as module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + exit_json, + fail_json, +) -MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = { - "ret": True, - "data": { - "Actions": {} - } -} +MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = {"ret": True, "data": {"Actions": {}}} -MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = { - "ret": True, - "data": { - } -} +MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = {"ret": True, "data": {}} MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE = { "ret": True, - "data": { - "UpdateService": { - "@odata.id": "/UpdateService" - } - } + "data": {"UpdateService": {"@odata.id": "/UpdateService"}}, } MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_BUT_NO_FW_ACTIVATE = { "ret": True, "data": { "Actions": { - "#UpdateService.SimpleUpdate": { - "target": "mocked value" - }, + "#UpdateService.SimpleUpdate": {"target": "mocked value"}, "Oem": { "WDC": {} # No #UpdateService.FWActivate - } + }, } - } + }, } @@ -65,12 +57,10 @@ def get_exception_message(ansible_exit_json): class TestWdcRedfishInfo(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) @@ -81,34 +71,40 @@ def test_module_fail_when_required_args_missing(self): def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'unknown', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': [], - }): + with set_module_args( + { + "category": "unknown", + "command": "SimpleUpdateStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": [], + } + ): module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'Update', - 'command': 'unknown', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': [], - }): + with set_module_args( + { + "category": "Update", + "command": "unknown", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": [], + } + ): module.main() def test_module_simple_update_status_pass(self): - with set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }): + with set_module_args( + { + "category": "Update", + "command": "SimpleUpdateStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + } + ): def mock_simple_update_status(*args, **kwargs): return { @@ -117,8 +113,8 @@ def mock_simple_update_status(*args, **kwargs): "Description": "Ready for FW update", "ErrorCode": 0, "EstimatedRemainingMinutes": 0, - "StatusCode": 0 - } + "StatusCode": 0, + }, } def mocked_string_response(*args, **kwargs): @@ -127,43 +123,47 @@ def mocked_string_response(*args, **kwargs): def empty_return(*args, **kwargs): return {"ret": True} - with patch.multiple(module.WdcRedfishUtils, - _simple_update_status_uri=mocked_string_response, - _find_updateservice_resource=empty_return, - _find_updateservice_additional_uris=empty_return, - get_request=mock_simple_update_status): + with patch.multiple( + module.WdcRedfishUtils, + _simple_update_status_uri=mocked_string_response, + _find_updateservice_resource=empty_return, + _find_updateservice_additional_uris=empty_return, + get_request=mock_simple_update_status, + ): with self.assertRaises(AnsibleExitJson) as ansible_exit_json: module.main() redfish_facts = get_redfish_facts(ansible_exit_json) - self.assertEqual(mock_simple_update_status()["data"], - redfish_facts["simple_update_status"]["entries"]) + self.assertEqual(mock_simple_update_status()["data"], redfish_facts["simple_update_status"]["entries"]) def test_module_simple_update_status_updateservice_resource_not_found(self): - with set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }): - with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Update", + "command": "SimpleUpdateStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + } + ): + with patch.object(module.WdcRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { "ret": True, - "data": {} # Missing UpdateService property + "data": {}, # Missing UpdateService property } with self.assertRaises(AnsibleFailJson) as ansible_exit_json: module.main() - self.assertEqual("UpdateService resource not found", - get_exception_message(ansible_exit_json)) + self.assertEqual("UpdateService resource not found", get_exception_message(ansible_exit_json)) def test_module_simple_update_status_service_does_not_support_simple_update(self): - with set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }): + with set_module_args( + { + "category": "Update", + "command": "SimpleUpdateStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + } + ): def mock_get_request_function(uri): mock_url_string = "mockURL" @@ -173,29 +173,27 @@ def mock_get_request_function(uri): "data": { "Actions": { # No #UpdateService.SimpleUpdate } - } + }, } else: - return { - "ret": True, - "data": mock_url_string - } + return {"ret": True, "data": mock_url_string} - with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request: + with patch.object(module.WdcRedfishUtils, "get_request") as mock_get_request: mock_get_request.side_effect = mock_get_request_function with self.assertRaises(AnsibleFailJson) as ansible_exit_json: module.main() - self.assertEqual("UpdateService resource not found", - get_exception_message(ansible_exit_json)) + self.assertEqual("UpdateService resource not found", get_exception_message(ansible_exit_json)) def test_module_simple_update_status_service_does_not_support_fw_activate(self): - with set_module_args({ - 'category': 'Update', - 'command': 'SimpleUpdateStatus', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'ioms': ["example1.example.com"], - }): + with set_module_args( + { + "category": "Update", + "command": "SimpleUpdateStatus", + "username": "USERID", + "password": "PASSW0RD=21", + "ioms": ["example1.example.com"], + } + ): def mock_get_request_function(uri): if uri.endswith("/redfish/v1") or uri.endswith("/redfish/v1/"): @@ -207,9 +205,10 @@ def mock_get_request_function(uri): else: raise RuntimeError(f"Illegal call to get_request in test: {uri}") - with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_request") as mock_get_request: + with patch( + "ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_request" + ) as mock_get_request: mock_get_request.side_effect = mock_get_request_function with self.assertRaises(AnsibleFailJson) as ansible_exit_json: module.main() - self.assertEqual("Service does not support FWActivate", - get_exception_message(ansible_exit_json)) + self.assertEqual("Service does not support FWActivate", get_exception_message(ansible_exit_json)) diff --git a/tests/unit/plugins/modules/test_xcc_redfish_command.py b/tests/unit/plugins/modules/test_xcc_redfish_command.py index 14c1011346c..84d77b82cbc 100644 --- a/tests/unit/plugins/modules/test_xcc_redfish_command.py +++ b/tests/unit/plugins/modules/test_xcc_redfish_command.py @@ -9,8 +9,15 @@ from ansible.module_utils import basic import ansible_collections.community.general.plugins.modules.xcc_redfish_command as module -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson -from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, +) +from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import ( + set_module_args, + exit_json, + fail_json, +) def get_bin_path(self, arg, required=False): @@ -19,12 +26,10 @@ def get_bin_path(self, arg, required=False): class TestXCCRedfishCommand(unittest.TestCase): - def setUp(self): - self.mock_module_helper = patch.multiple(basic.AnsibleModule, - exit_json=exit_json, - fail_json=fail_json, - get_bin_path=get_bin_path) + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path + ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) @@ -35,574 +40,626 @@ def test_module_fail_when_required_args_missing(self): def test_module_fail_when_unknown_category(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'unknown', - 'command': 'VirtualMediaEject', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): + with set_module_args( + { + "category": "unknown", + "command": "VirtualMediaEject", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): module.main() def test_module_fail_when_unknown_command(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'Manager', - 'command': 'unknown', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): + with set_module_args( + { + "category": "Manager", + "command": "unknown", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): module.main() def test_module_command_VirtualMediaInsert_pass(self): - with set_module_args({ - 'category': 'Manager', - 'command': 'VirtualMediaInsert', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'timeout': 30, - 'virtual_media': { - 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", - 'media_types': ['CD'], - 'inserted': True, - 'write_protected': True, - 'transfer_protocol_type': 'NFS' + with set_module_args( + { + "category": "Manager", + "command": "VirtualMediaInsert", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "timeout": 30, + "virtual_media": { + "image_url": "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", + "media_types": ["CD"], + "inserted": True, + "write_protected": True, + "transfer_protocol_type": "NFS", + }, } - }): - with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: - mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: - mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + ): + with patch.object(module.XCCRedfishUtils, "_find_systems_resource") as mock__find_systems_resource: + mock__find_systems_resource.return_value = {"ret": True, "changed": True, "msg": "success"} + with patch.object(module.XCCRedfishUtils, "_find_managers_resource") as mock__find_managers_resource: + mock__find_managers_resource.return_value = {"ret": True, "changed": True, "msg": "success"} - with patch.object(module.XCCRedfishUtils, 'virtual_media_insert') as mock_virtual_media_insert: - mock_virtual_media_insert.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, "virtual_media_insert") as mock_virtual_media_insert: + mock_virtual_media_insert.return_value = {"ret": True, "changed": True, "msg": "success"} with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_VirtualMediaEject_pass(self): - with set_module_args({ - 'category': 'Manager', - 'command': 'VirtualMediaEject', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'timeout': 30, - 'virtual_media': { - 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", + with set_module_args( + { + "category": "Manager", + "command": "VirtualMediaEject", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "timeout": 30, + "virtual_media": { + "image_url": "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", + }, } - }): - with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource: - mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} - with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: - mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + ): + with patch.object(module.XCCRedfishUtils, "_find_systems_resource") as mock__find_systems_resource: + mock__find_systems_resource.return_value = {"ret": True, "changed": True, "msg": "success"} + with patch.object(module.XCCRedfishUtils, "_find_managers_resource") as mock__find_managers_resource: + mock__find_managers_resource.return_value = {"ret": True, "changed": True, "msg": "success"} - with patch.object(module.XCCRedfishUtils, 'virtual_media_eject') as mock_virtual_media_eject: - mock_virtual_media_eject.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + with patch.object(module.XCCRedfishUtils, "virtual_media_eject") as mock_virtual_media_eject: + mock_virtual_media_eject.return_value = {"ret": True, "changed": True, "msg": "success"} with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_VirtualMediaEject_fail_when_required_args_missing(self): with self.assertRaises(AnsibleFailJson): - with set_module_args({ - 'category': 'Manager', - 'command': 'VirtualMediaEject', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): + with set_module_args( + { + "category": "Manager", + "command": "VirtualMediaEject", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): module.main() def test_module_command_GetResource_fail_when_required_args_missing(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "GetResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_GetResource_fail_when_get_return_false(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + with set_module_args( + { + "category": "Raw", + "command": "GetResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": False, "msg": "404 error"} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_GetResource_pass(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "GetResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_GetCollectionResource_fail_when_required_args_missing(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetCollectionResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "GetCollectionResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_GetCollectionResource_fail_when_get_return_false(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetCollectionResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + with set_module_args( + { + "category": "Raw", + "command": "GetCollectionResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": False, "msg": "404 error"} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_GetCollectionResource_fail_when_get_not_colection(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetCollectionResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "GetCollectionResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_GetCollectionResource_pass_when_get_empty_collection(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetCollectionResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'Members': [], 'Members@odata.count': 0}} + with set_module_args( + { + "category": "Raw", + "command": "GetCollectionResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": True, "data": {"Members": [], "Members@odata.count": 0}} with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_GetCollectionResource_pass_when_get_collection(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'GetCollectionResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'Members': [{'@odata.id': '/redfish/v1/testuri/1'}], 'Members@odata.count': 1}} + with set_module_args( + { + "category": "Raw", + "command": "GetCollectionResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"Members": [{"@odata.id": "/redfish/v1/testuri/1"}], "Members@odata.count": 1}, + } with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_PatchResource_fail_when_required_args_missing(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PatchResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "PatchResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"teststr": "xxxx", "@odata.etag": "27f6eb13fa1c28a2711"}, + } + + with patch.object(module.XCCRedfishUtils, "patch_request") as mock_patch_request: + mock_patch_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PatchResource_fail_when_required_args_missing_no_requestbody(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PatchResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "PatchResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"teststr": "xxxx", "@odata.etag": "27f6eb13fa1c28a2711"}, + } + + with patch.object(module.XCCRedfishUtils, "patch_request") as mock_patch_request: + mock_patch_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PatchResource_fail_when_noexisting_property_in_requestbody(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PatchResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - 'request_body': {'teststr': 'yyyy', 'otherkey': 'unknownkey'} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + with set_module_args( + { + "category": "Raw", + "command": "PatchResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + "request_body": {"teststr": "yyyy", "otherkey": "unknownkey"}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"teststr": "xxxx", "@odata.etag": "27f6eb13fa1c28a2711"}, + } + + with patch.object(module.XCCRedfishUtils, "patch_request") as mock_patch_request: + mock_patch_request.return_value = {"ret": True, "data": {"teststr": "xxxx"}} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PatchResource_fail_when_get_return_false(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PatchResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - 'request_body': {'teststr': 'yyyy'} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': False, 'msg': '500 internal error'} + with set_module_args( + { + "category": "Raw", + "command": "PatchResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + "request_body": {"teststr": "yyyy"}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"teststr": "xxxx", "@odata.etag": "27f6eb13fa1c28a2711"}, + } + + with patch.object(module.XCCRedfishUtils, "patch_request") as mock_patch_request: + mock_patch_request.return_value = {"ret": False, "msg": "500 internal error"} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PatchResource_pass(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PatchResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - 'request_body': {'teststr': 'yyyy'} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} - - with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: - mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'yyyy', '@odata.etag': '322e0d45d9572723c98'}} + with set_module_args( + { + "category": "Raw", + "command": "PatchResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + "request_body": {"teststr": "yyyy"}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = { + "ret": True, + "data": {"teststr": "xxxx", "@odata.etag": "27f6eb13fa1c28a2711"}, + } + + with patch.object(module.XCCRedfishUtils, "patch_request") as mock_patch_request: + mock_patch_request.return_value = { + "ret": True, + "data": {"teststr": "yyyy", "@odata.etag": "322e0d45d9572723c98"}, + } with self.assertRaises(AnsibleExitJson) as result: module.main() def test_module_command_PostResource_fail_when_required_args_missing(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_invalid_resourceuri(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/testuri', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/testuri", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_no_requestbody(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_no_requestbody_2(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_requestbody_mismatch_with_data_from_actioninfo_uri(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "request_body": {"PasswordName": "UefiAdminPassword", "NewPassword": "PASSW0RD=="}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Parameters': [], - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Parameters": [], + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_get_return_false(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', - 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: - mock_get_request.return_value = {'ret': False, 'msg': '404 error'} - - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True} + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "request_body": {"PasswordName": "UefiAdminPassword", "NewPassword": "PASSW0RD=="}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: + mock_get_request.return_value = {"ret": False, "msg": "404 error"} + + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_fail_when_post_return_false(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', - 'request_body': {} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", + "request_body": {}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': False, 'msg': '500 internal error'} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": False, "msg": "500 internal error"} with self.assertRaises(AnsibleFailJson) as result: module.main() def test_module_command_PostResource_pass(self): - with set_module_args({ - 'category': 'Raw', - 'command': 'PostResource', - 'baseuri': '10.245.39.251', - 'username': 'USERID', - 'password': 'PASSW0RD=21', - 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', - 'request_body': {} - }): - with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + with set_module_args( + { + "category": "Raw", + "command": "PostResource", + "baseuri": "10.245.39.251", + "username": "USERID", + "password": "PASSW0RD=21", + "resource_uri": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", + "request_body": {}, + } + ): + with patch.object(module.XCCRedfishUtils, "get_request") as mock_get_request: mock_get_request.return_value = { - 'ret': True, - 'data': { - 'Actions': { - '#Bios.ChangePassword': { - '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", - 'title': "ChangePassword", - 'PasswordName@Redfish.AllowableValues': [ - "UefiAdminPassword", - "UefiPowerOnPassword" - ] + "ret": True, + "data": { + "Actions": { + "#Bios.ChangePassword": { + "@Redfish.ActionInfo": "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + "title": "ChangePassword", + "PasswordName@Redfish.AllowableValues": ["UefiAdminPassword", "UefiPowerOnPassword"], + }, + "#Bios.ResetBios": { + "title": "ResetBios", + "target": "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios", }, - '#Bios.ResetBios': { - 'title': "ResetBios", - 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" - } }, - } + }, } - with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: - mock_post_request.return_value = {'ret': True, 'msg': 'post success'} + with patch.object(module.XCCRedfishUtils, "post_request") as mock_post_request: + mock_post_request.return_value = {"ret": True, "msg": "post success"} with self.assertRaises(AnsibleExitJson) as result: module.main() diff --git a/tests/unit/plugins/modules/test_xenserver_guest_info.py b/tests/unit/plugins/modules/test_xenserver_guest_info.py index 30c73812abb..bb278e83c3b 100644 --- a/tests/unit/plugins/modules/test_xenserver_guest_info.py +++ b/tests/unit/plugins/modules/test_xenserver_guest_info.py @@ -12,7 +12,7 @@ from .xenserver_common import fake_xenapi_ref from .xenserver_conftest import XenAPI, xenserver_guest_info # noqa: F401, pylint: disable=unused-import -pytestmark = pytest.mark.usefixtures('patch_ansible_module') +pytestmark = pytest.mark.usefixtures("patch_ansible_module") testcase_module_params = { @@ -45,7 +45,12 @@ } -@pytest.mark.parametrize('patch_ansible_module', testcase_module_params['params'], ids=testcase_module_params['ids'], indirect=True) # type: ignore +@pytest.mark.parametrize( + "patch_ansible_module", + testcase_module_params["params"], # type: ignore + ids=testcase_module_params["ids"], # type: ignore + indirect=True, +) def test_xenserver_guest_info(mocker, capfd, XenAPI, xenserver_guest_info): """ Tests regular module invocation including parsing and propagation of @@ -53,20 +58,30 @@ def test_xenserver_guest_info(mocker, capfd, XenAPI, xenserver_guest_info): """ fake_vm_facts = {"fake-vm-fact": True} - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.get_object_ref', return_value=None) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_params', return_value=None) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_facts', return_value=fake_vm_facts) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_info.get_object_ref", return_value=None + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_params", return_value=None + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_facts", + return_value=fake_vm_facts, + ) - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) with pytest.raises(SystemExit): xenserver_guest_info.main() @@ -74,4 +89,4 @@ def test_xenserver_guest_info(mocker, capfd, XenAPI, xenserver_guest_info): out, err = capfd.readouterr() result = json.loads(out) - assert result['instance'] == fake_vm_facts + assert result["instance"] == fake_vm_facts diff --git a/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py b/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py index 811d44700d3..640fe80b033 100644 --- a/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py +++ b/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py @@ -126,46 +126,60 @@ } -@pytest.mark.parametrize('power_state', testcase_set_powerstate['params'], ids=testcase_set_powerstate['ids']) # type: ignore -def test_xenserver_guest_powerstate_set_power_state(mocker, fake_ansible_module, XenAPI, xenserver_guest_powerstate, power_state): +@pytest.mark.parametrize("power_state", testcase_set_powerstate["params"], ids=testcase_set_powerstate["ids"]) # type: ignore +def test_xenserver_guest_powerstate_set_power_state( + mocker, fake_ansible_module, XenAPI, xenserver_guest_powerstate, power_state +): """Tests power state change handling.""" - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref', - return_value=fake_xenapi_ref('VM')) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', - return_value={"power_state": "Someoldstate"}) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref", + return_value=fake_xenapi_ref("VM"), + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params", + return_value={"power_state": "Someoldstate"}, + ) mocked_set_vm_power_state = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state', - return_value=power_state) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state", + return_value=power_state, + ) - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) - fake_ansible_module.params.update({ - "name": "somename", - "uuid": "someuuid", - "state_change_timeout": 1, - }) + fake_ansible_module.params.update( + { + "name": "somename", + "uuid": "someuuid", + "state_change_timeout": 1, + } + ) vm = xenserver_guest_powerstate.XenServerVM(fake_ansible_module) state_changed = vm.set_power_state(None) - mocked_set_vm_power_state.assert_called_once_with(fake_ansible_module, fake_xenapi_ref('VM'), None, 1) + mocked_set_vm_power_state.assert_called_once_with(fake_ansible_module, fake_xenapi_ref("VM"), None, 1) assert state_changed == power_state[0] - assert vm.vm_params['power_state'] == power_state[1].capitalize() + assert vm.vm_params["power_state"] == power_state[1].capitalize() -@pytest.mark.parametrize('patch_ansible_module', - testcase_module_params_state_present['params'], # type: ignore - ids=testcase_module_params_state_present['ids'], # type: ignore - indirect=True) +@pytest.mark.parametrize( + "patch_ansible_module", + testcase_module_params_state_present["params"], # type: ignore + ids=testcase_module_params_state_present["ids"], # type: ignore + indirect=True, +) def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate): """ Tests regular module invocation including parsing and propagation of @@ -173,28 +187,40 @@ def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, """ fake_vm_facts = {"fake-vm-fact": True} - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref', - return_value=fake_xenapi_ref('VM')) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={}) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts', - return_value=fake_vm_facts) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref", + return_value=fake_xenapi_ref("VM"), + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params", + return_value={}, + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts", + return_value=fake_vm_facts, + ) mocked_set_vm_power_state = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state', - return_value=(True, "somenewstate")) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state", + return_value=(True, "somenewstate"), + ) mocked_wait_for_vm_ip_address = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address', - return_value={}) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address", + return_value={}, + ) - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) with pytest.raises(SystemExit): xenserver_guest_powerstate.main() @@ -204,14 +230,16 @@ def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, mocked_set_vm_power_state.assert_not_called() mocked_wait_for_vm_ip_address.assert_not_called() - assert result['changed'] is False - assert result['instance'] == fake_vm_facts + assert result["changed"] is False + assert result["instance"] == fake_vm_facts -@pytest.mark.parametrize('patch_ansible_module', - testcase_module_params_state_other['params'], # type: ignore - ids=testcase_module_params_state_other['ids'], # type: ignore - indirect=True) +@pytest.mark.parametrize( + "patch_ansible_module", + testcase_module_params_state_other["params"], # type: ignore + ids=testcase_module_params_state_other["ids"], # type: ignore + indirect=True, +) def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate): """ Tests regular module invocation including parsing and propagation of @@ -220,27 +248,40 @@ def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, X """ fake_vm_facts = {"fake-vm-fact": True} - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref', - return_value=fake_xenapi_ref('VM')) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={}) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref", + return_value=fake_xenapi_ref("VM"), + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params", + return_value={}, + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts", + return_value=fake_vm_facts, + ) mocked_set_vm_power_state = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state', - return_value=(True, "somenewstate")) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state", + return_value=(True, "somenewstate"), + ) mocked_wait_for_vm_ip_address = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address', - return_value={}) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address", + return_value={}, + ) - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) with pytest.raises(SystemExit): xenserver_guest_powerstate.main() @@ -250,14 +291,16 @@ def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, X mocked_set_vm_power_state.assert_called_once() mocked_wait_for_vm_ip_address.assert_not_called() - assert result['changed'] is True - assert result['instance'] == fake_vm_facts + assert result["changed"] is True + assert result["instance"] == fake_vm_facts -@pytest.mark.parametrize('patch_ansible_module', - testcase_module_params_wait['params'], # type: ignore - ids=testcase_module_params_wait['ids'], # type: ignore - indirect=True) +@pytest.mark.parametrize( + "patch_ansible_module", + testcase_module_params_wait["params"], # type: ignore + ids=testcase_module_params_wait["ids"], # type: ignore + indirect=True, +) def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate): """ Tests regular module invocation including parsing and propagation of @@ -265,27 +308,40 @@ def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, Xe """ fake_vm_facts = {"fake-vm-fact": True} - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref', - return_value=fake_xenapi_ref('VM')) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={}) - mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref", + return_value=fake_xenapi_ref("VM"), + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params", + return_value={}, + ) + mocker.patch( + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts", + return_value=fake_vm_facts, + ) mocked_set_vm_power_state = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state', - return_value=(True, "somenewstate")) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state", + return_value=(True, "somenewstate"), + ) mocked_wait_for_vm_ip_address = mocker.patch( - 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address', - return_value={}) + "ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address", + return_value={}, + ) - mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True) + mocked_xenapi = mocker.patch.object(XenAPI.Session, "xenapi", create=True) mocked_returns = { - "pool.get_all.return_value": [fake_xenapi_ref('pool')], - "pool.get_default_SR.return_value": fake_xenapi_ref('SR'), + "pool.get_all.return_value": [fake_xenapi_ref("pool")], + "pool.get_default_SR.return_value": fake_xenapi_ref("SR"), } mocked_xenapi.configure_mock(**mocked_returns) - mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0]) + mocker.patch( + "ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version", + return_value=[7, 2, 0], + ) with pytest.raises(SystemExit): xenserver_guest_powerstate.main() @@ -294,4 +350,4 @@ def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, Xe result = json.loads(out) mocked_wait_for_vm_ip_address.assert_called_once() - assert result['instance'] == fake_vm_facts + assert result["instance"] == fake_vm_facts diff --git a/tests/unit/plugins/modules/uthelper.py b/tests/unit/plugins/modules/uthelper.py index eb5dba2f0db..2f0435da22f 100644 --- a/tests/unit/plugins/modules/uthelper.py +++ b/tests/unit/plugins/modules/uthelper.py @@ -30,14 +30,16 @@ def from_file(ansible_module, test_module, test_spec_filehandle, mocks=None): @staticmethod def from_module(ansible_module, test_module_name, mocks=None): test_module = sys.modules[test_module_name] - extensions = ['.yaml', '.yml'] + extensions = [".yaml", ".yml"] for ext in extensions: - test_spec_filename = test_module.__file__.replace('.py', ext) + test_spec_filename = test_module.__file__.replace(".py", ext) if os.path.exists(test_spec_filename): with open(test_spec_filename, "r") as test_spec_filehandle: return UTHelper.from_file(ansible_module, test_module, test_spec_filehandle, mocks=mocks) - raise Exception(f"Cannot find test case file for {test_module.__file__} with one of the extensions: {extensions}") + raise Exception( + f"Cannot find test case file for {test_module.__file__} with one of the extensions: {extensions}" + ) def add_func_to_test_module(self, name, func): setattr(self.test_module, name, func) @@ -54,7 +56,7 @@ def __init__(self, ansible_module, test_module, test_spec, mocks=None): self.mocks_map = {m.name: m for m in mocks} if mocks else {} - for spec_test_case in test_spec['test_cases']: + for spec_test_case in test_spec["test_cases"]: tc = ModuleTestCase.make_test_case(spec_test_case, test_module, self.mocks_map) self.test_cases.append(tc) self.fixtures.update(tc.fixtures) @@ -66,7 +68,7 @@ def runner(self): return Runner(self.ansible_module.main) def set_test_func(self): - @pytest.mark.parametrize('test_case', self.test_cases, ids=[tc.id for tc in self.test_cases]) + @pytest.mark.parametrize("test_case", self.test_cases, ids=[tc.id for tc in self.test_cases]) @pytest.mark.usefixtures(*self.fixtures) def _test_module(mocker, capfd, patch_ansible_module_uthelper, test_case): """ @@ -125,8 +127,10 @@ def __init__(self, id, input, output, mocks, flags): self._fixtures = {} def __str__(self): - return (f"") + return ( + f"" + ) def __repr__(self): return f"ModuleTestCase(id={self.id}, input={self.input}, output={self.output}, mocks={self.mocks!r}, flags={self.flags})" @@ -138,7 +142,7 @@ def make_test_case(test_case_spec, test_module, mocks_map): input=test_case_spec.get("input", {}), output=test_case_spec.get("output", {}), mocks=test_case_spec.get("mocks", {}), - flags=test_case_spec.get("flags", {}) + flags=test_case_spec.get("flags", {}), ) tc.build_mocks(mocks_map) return tc @@ -175,12 +179,13 @@ def setup_mocks(self, mocker): def check_testcase(self, results): print(f"testcase =\n{self!r}") print(f"results =\n{results}") - if 'exception' in results: + if "exception" in results: print(f"exception = \n{results['exception']}") for test_result in self.output: - assert results[test_result] == self.output[test_result], \ + assert results[test_result] == self.output[test_result], ( f"'{test_result}': '{results[test_result]}' != '{self.output[test_result]}'" + ) def check_mocks(self, test_case, results): for mock in self.mocks.values(): @@ -219,13 +224,14 @@ def fixtures(self): def patch_bin(mocker): def mockie(self_, path, *args, **kwargs): return f"/testbin/{path}" - mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', mockie) + + mocker.patch("ansible.module_utils.basic.AnsibleModule.get_bin_path", mockie) return {"patch_bin": patch_bin} def setup(self, mocker): def _results(): - for result in [(x['rc'], x['out'], x['err']) for x in self.mock_specs]: + for result in [(x["rc"], x["out"], x["err"]) for x in self.mock_specs]: yield result raise Exception("testcase has not enough run_command calls") @@ -237,14 +243,18 @@ def side_effect(self_, **kwargs): raise Exception(f"rc = {result[0]}") return result - self.mock_run_cmd = mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', side_effect=side_effect) + self.mock_run_cmd = mocker.patch( + "ansible.module_utils.basic.AnsibleModule.run_command", side_effect=side_effect + ) def check(self, test_case, results): call_args_list = [(item[0][0], item[1]) for item in self.mock_run_cmd.call_args_list] - expected_call_args_list = [(item['command'], item.get('environ', {})) for item in self.mock_specs] + expected_call_args_list = [(item["command"], item.get("environ", {})) for item in self.mock_specs] print(f"call args list =\n{call_args_list}") print(f"expected args list =\n{expected_call_args_list}") - assert self.mock_run_cmd.call_count == len(self.mock_specs), f"{self.mock_run_cmd.call_count} != {len(self.mock_specs)}" + assert self.mock_run_cmd.call_count == len(self.mock_specs), ( + f"{self.mock_run_cmd.call_count} != {len(self.mock_specs)}" + ) if self.mock_run_cmd.call_count: assert call_args_list == expected_call_args_list diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index d0c2a1d2455..4f86d1083d7 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -15,13 +15,13 @@ @_contextlib.contextmanager def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False + if "_ansible_remote_tmp" not in args: + args["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in args: + args["_ansible_keep_remote_files"] = False - serialized_args = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})) - with patch.object(basic, '_ANSIBLE_ARGS', serialized_args): + serialized_args = to_bytes(json.dumps({"ANSIBLE_MODULE_ARGS": args})) + with patch.object(basic, "_ANSIBLE_ARGS", serialized_args): yield @@ -34,22 +34,21 @@ class AnsibleFailJson(Exception): def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False + if "changed" not in kwargs: + kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): - kwargs['failed'] = True + kwargs["failed"] = True raise AnsibleFailJson(kwargs) class ModuleTestCase(unittest.TestCase): - def setUp(self): self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module.start() - self.mock_sleep = patch('time.sleep') + self.mock_sleep = patch("time.sleep") self.mock_sleep.start() set_module_args({}) self.addCleanup(self.mock_module.stop) diff --git a/tests/unit/plugins/modules/xenserver_conftest.py b/tests/unit/plugins/modules/xenserver_conftest.py index 3f74571face..6a9cc561e6f 100644 --- a/tests/unit/plugins/modules/xenserver_conftest.py +++ b/tests/unit/plugins/modules/xenserver_conftest.py @@ -16,7 +16,7 @@ @pytest.fixture def fake_ansible_module(request): """Returns fake AnsibleModule with fake module params.""" - if hasattr(request, 'param'): + if hasattr(request, "param"): return FakeAnsibleModule(request.param) else: params = { @@ -38,12 +38,12 @@ def XenAPI(): # First we use importlib.import_module() to import the module and assign # it to a local symbol. - fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.modules.FakeXenAPI') + fake_xenapi = importlib.import_module("ansible_collections.community.general.tests.unit.plugins.modules.FakeXenAPI") # Now we populate Python module cache with imported fake module using the # original module name (XenAPI). That way, any 'import XenAPI' statement # will just load already imported fake module from the cache. - sys.modules['XenAPI'] = fake_xenapi + sys.modules["XenAPI"] = fake_xenapi return fake_xenapi diff --git a/tests/unit/plugins/plugin_utils/test_unsafe.py b/tests/unit/plugins/plugin_utils/test_unsafe.py index 8d2bb854290..e4f62da0e32 100644 --- a/tests/unit/plugins/plugin_utils/test_unsafe.py +++ b/tests/unit/plugins/plugin_utils/test_unsafe.py @@ -21,14 +21,14 @@ TEST_MAKE_UNSAFE = [ ( - _make_trusted('text'), + _make_trusted("text"), [], [ (), ], ), ( - _make_trusted('{{text}}'), + _make_trusted("{{text}}"), [ (), ], @@ -36,56 +36,58 @@ ), ( { - _make_trusted('skey'): _make_trusted('value'), - _make_trusted('ukey'): _make_trusted('{{value}}'), + _make_trusted("skey"): _make_trusted("value"), + _make_trusted("ukey"): _make_trusted("{{value}}"), 1: [ - _make_trusted('value'), - _make_trusted('{{value}}'), + _make_trusted("value"), + _make_trusted("{{value}}"), { - 1.0: _make_trusted('{{value}}'), - 2.0: _make_trusted('value'), + 1.0: _make_trusted("{{value}}"), + 2.0: _make_trusted("value"), }, ], }, [ - ('ukey', ), + ("ukey",), (1, 1), (1, 2, 1.0), ], [ - ('skey', ), + ("skey",), (1, 0), (1, 2, 2.0), ], ), ( - [_make_trusted('value'), _make_trusted('{{value}}')], + [_make_trusted("value"), _make_trusted("{{value}}")], [ - (1, ), + (1,), ], [ - (0, ), + (0,), ], ), ] if not SUPPORTS_DATA_TAGGING: - TEST_MAKE_UNSAFE.extend([ - ( - _make_trusted(b"text"), - [], - [ - (), - ], - ), - ( - _make_trusted(b"{{text}}"), - [ - (), - ], - [], - ), - ]) + TEST_MAKE_UNSAFE.extend( + [ + ( + _make_trusted(b"text"), + [], + [ + (), + ], + ), + ( + _make_trusted(b"{{text}}"), + [ + (), + ], + [], + ), + ] + ) @pytest.mark.parametrize("value, check_unsafe_paths, check_safe_paths", TEST_MAKE_UNSAFE) @@ -107,16 +109,16 @@ def test_make_unsafe(value, check_unsafe_paths, check_safe_paths): def test_make_unsafe_idempotence(): assert make_unsafe(None) is None - unsafe_str = _make_untrusted('{{test}}') + unsafe_str = _make_untrusted("{{test}}") assert id(make_unsafe(unsafe_str)) == id(unsafe_str) - safe_str = _make_trusted('{{test}}') + safe_str = _make_trusted("{{test}}") assert id(make_unsafe(safe_str)) != id(safe_str) def test_make_unsafe_dict_key(): value = { - _make_trusted('test'): 2, + _make_trusted("test"): 2, } if not SUPPORTS_DATA_TAGGING: value[_make_trusted(b"test")] = 1 @@ -126,7 +128,7 @@ def test_make_unsafe_dict_key(): assert _is_trusted(obj) value = { - _make_trusted('{{test}}'): 2, + _make_trusted("{{test}}"): 2, } if not SUPPORTS_DATA_TAGGING: value[_make_trusted(b"{{test}}")] = 1 @@ -137,7 +139,7 @@ def test_make_unsafe_dict_key(): def test_make_unsafe_set(): - value = set([_make_trusted('test')]) + value = set([_make_trusted("test")]) if not SUPPORTS_DATA_TAGGING: value.add(_make_trusted(b"test")) unsafe_value = make_unsafe(value) @@ -145,7 +147,7 @@ def test_make_unsafe_set(): for obj in unsafe_value: assert _is_trusted(obj) - value = set([_make_trusted('{{test}}')]) + value = set([_make_trusted("{{test}}")]) if not SUPPORTS_DATA_TAGGING: value.add(_make_trusted(b"{{test}}")) unsafe_value = make_unsafe(value)