diff --git a/plugins/modules/pvesh_get.py b/plugins/modules/pvesh_get.py index 84f8895..a547980 100755 --- a/plugins/modules/pvesh_get.py +++ b/plugins/modules/pvesh_get.py @@ -1,26 +1,74 @@ +# +# Copyright (c) 2016 Musee Ullah +# Author: Musee Ullah (@lae) +# Forked from https://github.com/lae/ansible-role-proxmox +# + #!/usr/bin/python +ANSIBLE_METADATA = { + 'metadata_version': '1.0', + 'status': ['stableinterface'], + 'supported_by': 'lae' +} + +DOCUMENTATION = ''' +--- +module: proxmox_query + +short_description: Uses pvesh to query Proxmox API + +options: + query: + required: true + aliases: [ "name" ] + description: + - Specifies what resource to query + +author: + - Musee Ullah (@lae) +''' + +EXAMPLES = ''' +- name: Query cluster status + proxmox_query: + query: cluster/status +- name: Collect a list of running LXC containers for some hosts + proxmox_query: + query: "nodes/{{ item }}/lxc" + with_items: + - node01 + - node02 + - node03 +''' + +RETURN = ''' +response: + description: JSON response from pvesh provided by a query + type: json +''' + from ansible.module_utils.basic import AnsibleModule -# import ansible_collections.aybarsm.linux.plugins.module_utils.pvecm as pvecm +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh def main(): module = AnsibleModule( argument_spec = dict( - path=dict(type='path', required=True), - options=dict(type='dict', required=False), - noproxy=dict(type='bool', required=False, default=False), - human_readable=dict(type='bool', required=False, default=False), - noborder=dict(type='bool', required=False, default=False), - noheader=dict(type='bool', required=False, default=False), - output_format=dict(type='str', required=False, default='json', choices=['json', 'json-pretty', 'text', 'yaml']), + query=dict(type='str', required=True, aliases=['name']), ), supports_check_mode=True ) - # rc, out, err = pvecm.status(module) - result = {"changed": False} - result['response'] = module.params + + try: + result['response'] = pvesh.get(module.params['query']) + except ProxmoxShellError as e: + if e.data: + result["response"] = e.data + + module.fail_json(msg=e.message, status_code=e.status_code, **result) module.exit_json(**result) diff --git a/roles/auth/tasks/ssh_renew_host_keys.yml b/roles/auth/tasks/ssh_renew_host_keys.yml index 997aeb1..d989aae 100644 --- a/roles/auth/tasks/ssh_renew_host_keys.yml +++ b/roles/auth/tasks/ssh_renew_host_keys.yml @@ -6,7 +6,7 @@ - ansible__role_enabled | default(false) | bool - ansible__manage_local_facts | default(false) | bool - ansible__local_fact_name is defined - fail_msg: "aybarsm.linux.ansible role, local fact management must be enabled and local fact name needs to be defined. \ + fail_msg: "aybarsm.helper.ansible role, local fact management must be enabled and local fact name needs to be defined. \ (ansible__role_enabled: true & ansible__manage_local_facts: true & ansible__local_fact_name is defined)" success_msg: "Ansible requirements are met for one time ssh host key renewal." register: auth__ssh_host_keys_assert_ansible_role @@ -52,7 +52,7 @@ - name: Include update local facts tasks when ssh host keys renewed ansible.builtin.include_role: - name: aybarsm.linux.ansible + name: aybarsm.helper.ansible tasks_from: update_local_facts.yml vars: ansible__local_fact_updates: diff --git a/roles/network/defaults/main.yml b/roles/network/defaults/main.yml index 0983b56..4c4ba4f 100644 --- a/roles/network/defaults/main.yml +++ b/roles/network/defaults/main.yml @@ -7,6 +7,7 @@ network__manage_hostname: false network__manage_hosts: false network__manage_iptables: false network__manage_udev_rules: false +network__manage_gai: false network__default: [] network__group: [] @@ -98,9 +99,25 @@ network__hosts_auto_discovery: false network__hosts_auto_discovery_inventories: 'webservers:&atlanta' ##### END: network hosts vars +##### BEGIN: network udev rules vars +network__iptables_comment_prefix: '' +network__iptables_comment_suffix: '' +network__iptables_cleanup_v4: false +network__iptables_cleanup_v4_cleanup_regex: [] +network__iptables_cleanup_v4_state_file: /tmp/iptables_state.v4 +network__iptables_cleanup_v6: false +network__iptables_cleanup_v6_cleanup_regex: [] +network__iptables_cleanup_v6_state_file: /tmp/iptables_state.v6 +##### END: network udev rules vars + ##### BEGIN: network udev rules vars network__udev_rules_file: /etc/udev/rules.d/70-persistent-net.rules network__udev_rules_template: etc/udev/rules.d/70-persistent-net.rules.j2 network__udev_rules_backup: true network__udev_rules_remove: false +##### END: network udev rules vars + +##### BEGIN: network udev rules vars +# Prefer IPv4 over IPv6 > precedence ::ffff:0:0/96 100 +network__gai_prefer_ipv4: false ##### END: network udev rules vars \ No newline at end of file diff --git a/roles/network/tasks/iptables.yml b/roles/network/tasks/iptables.yml index d607d41..ef5d9db 100644 --- a/roles/network/tasks/iptables.yml +++ b/roles/network/tasks/iptables.yml @@ -1,11 +1,27 @@ --- +- name: Include iptables IPv4 cleanup tasks + ansible.builtin.include_tasks: iptables_cleanup.yml + vars: + iptables_cleanup_ip_ver: ipv4 + iptables_cleanup_state_file: "{{ network__iptables_cleanup_v4_state_file }}" + iptables_cleanup_regex: "{{ network__iptables_cleanup_v4_cleanup_regex }}" + when: network__iptables_cleanup_v4 | bool and network__iptables_cleanup_v4_cleanup_regex | length > 0 + +- name: Include iptables IPv6 cleanup tasks + ansible.builtin.include_tasks: iptables_cleanup.yml + vars: + iptables_cleanup_ip_ver: ipv6 + iptables_cleanup_state_file: "{{ network__iptables_cleanup_v6_state_file }}" + iptables_cleanup_regex: "{{ network__iptables_cleanup_v6_cleanup_regex }}" + when: network__iptables_cleanup_v6 | bool and network__iptables_cleanup_v6_cleanup_regex | length > 0 + - name: Deploy iptables configuration become: true ansible.builtin.iptables: action: "{{ item.action | default(omit) }}" chain: "{{ item.chain | default(omit) }}" chain_management: "{{ item.chain_management | default(omit) }}" - comment: "{{ item.comment | default(omit) }}" + comment: "{{ rule_comment | default(omit, true) }}" ctstate: "{{ item.ctstate | default(omit) }}" destination: "{{ item.destination | default(omit) }}" destination_port: "{{ item.destination_port | default(omit) }}" @@ -48,6 +64,11 @@ to_source: "{{ item.to_source | default(omit) }}" uid_owner: "{{ item.uid_owner | default(omit) }}" wait: "{{ item.wait | default(omit) }}" + vars: + rule_comment: "{{ [(network__iptables_comment_prefix | default('')), (item.comment | default('')), (network__iptables_comment_suffix | default(''))] | + join(' ') | trim }}" loop: "{{ network__iptables_all }}" register: network__iptables_deploy when: network__iptables_all | default([]) | length > 0 + +#TODO: Iptables cleanup regex diff --git a/roles/network/tasks/iptables_cleanup.yml b/roles/network/tasks/iptables_cleanup.yml new file mode 100644 index 0000000..fbb0972 --- /dev/null +++ b/roles/network/tasks/iptables_cleanup.yml @@ -0,0 +1,26 @@ +--- +- name: "Cleanup iptables {{ iptables_cleanup_ip_ver }} rules" + block: + - name: "Save iptables {{ iptables_cleanup_ip_ver }} rules" + community.general.iptables_state: + ip_version: "{{ iptables_cleanup_ip_ver }}" + path: "{{ iptables_cleanup_state_file }}" + state: saved + - name: "Cleanup iptables {{ iptables_cleanup_ip_ver }} rules" + ansible.builtin.lineinfile: + path: "{{ iptables_cleanup_state_file }}" + regexp: "{{ item }}" + state: absent + loop: "{{ iptables_cleanup_regex }}" + - name: "Assert check mode for restoring iptables {{ iptables_cleanup_ip_ver }} rules" + ansible.builtin.assert: + that: True + success_msg: "{{ 'Check mode is %s. Iptables rule restore errors will %s.' | format(*format_args) }}" + vars: + format_args: "{{ ['on', 'be ignored'] if ansible_check_mode else ['off', 'not be ignored'] }}" + - name: "Restore iptables {{ iptables_cleanup_ip_ver }} rules" + community.general.iptables_state: + ip_version: "{{ iptables_cleanup_ip_ver }}" + path: "{{ iptables_cleanup_state_file }}" + state: restored + ignore_errors: "{{ ansible_check_mode }}" diff --git a/roles/network/templates/etc/network/interfaces.j2 b/roles/network/templates/etc/network/interfaces.j2 index 4bb1608..92be53b 100644 --- a/roles/network/templates/etc/network/interfaces.j2 +++ b/roles/network/templates/etc/network/interfaces.j2 @@ -39,6 +39,9 @@ iface {{ iface.name }} {{ addr_family }} {{ ifaceMethod }} {{ ifaceParams | aybarsm.helper.to_querystring('name', 'value', ' ', '\n') | indent(4, true) }} {% endif %} {% endif %} +{% if iface.comment is defined %} +{{ iface.comment | comment }} +{% endif %} {% endif %} {% endfor %} diff --git a/roles/network/vars/main.yml b/roles/network/vars/main.yml index 1436814..2a4ecc2 100644 --- a/roles/network/vars/main.yml +++ b/roles/network/vars/main.yml @@ -1,29 +1,26 @@ -network__all: "{{ (network__host + network__group + network__default) | selectattr('type', 'defined') | rejectattr('entry__skip', 'defined') }}" +network__all: "{{ (network__host + network__group + network__default) | + map('aybarsm.helper.combine_reverse', {'keep__entry': true, 'skip__entry': false}) | + rejectattr('keep__entry', 'eq', false) | rejectattr('skip__entry', 'eq', true) | rejectattr('type', 'undefined') | + aybarsm.helper.all_except(['keep__entry', 'skip__entry']) }}" ##### BEGIN: network systemd vars __network_systemd_available_change_modules: ['systemd_service', 'command'] __network__systemd_link_name: '{"type":"systemd","name":"__SYSTEMD_LINK_NAME__","children":[{"name":"Match","children":[{"name":"MACAddress","value":"__MAC_ADDRESS__"},{"name":"Type","value":"ether"}]},{"name":"Link","children":[{"name":"NamePolicy","value":""},{"name":"Name","value":"__LINK_NAME__"}]}]}' -# REVIEW: There is room for optimisation -__network__systemd_configs: >- - {%- set __cfgs = [] -%} - {%- for cfg in network__all | reverse if cfg -%} - {%- set __cfgs_grp = (cfg | selectattr('type', 'eq', 'systemd') | - selectattr('name', 'defined') | selectattr('name', 'search', '\\.(network|link|netdev)$') | selectattr('children', 'defined')) -%} - {%- set __cfgs_rnm = (cfg | selectattr('type', 'eq', 'systemd_link_rename') | - selectattr('name', 'defined') | selectattr('name', 'search', '^(?:[0-9]|[1-9][0-9])-[a-zA-Z]+\\d+\\.link$') | - selectattr('macaddress', 'defined') | selectattr('macaddress', 'ansible.utils.mac') | default([])) -%} - {%- if __cfgs_rnm | length > 0 -%} - {%- for cfgrnm in __cfgs_rnm if cfgrnm -%} - {%- set __cfgs_grp = __cfgs_grp.append(__network__systemd_link_name | - replace('__SYSTEMD_LINK_NAME__', cfgrnm.name) | replace('__MAC_ADDRESS__', cfgrnm.macaddress) | - replace('__LINK_NAME__', (cfgrnm.name | regex_replace('^(?:[0-9]|[1-9][0-9])-(.*).link$', '\\1'))) | from_json) -%} - {%- endfor -%} - {%- endif -%} - {%- set __cfgs = __cfgs.append(__cfgs_grp) -%} +__network__systemd_rename: >- + {%- set __rtr = [] -%} + {%- set __cfgs = (network__all | selectattr('type', 'eq', 'systemd_link_rename') | + selectattr('name', 'defined') | selectattr('name', 'match', '^(?:[0-9]|[1-9][0-9])-[a-zA-Z]+\\d+\\.link$') | + selectattr('macaddress', 'defined') | selectattr('macaddress', 'match', '^([0-9A-Fa-f]{2}([:])?){5}([0-9A-Fa-f]{2})$') | default([])) -%} + {%- for cfg in __cfgs if cfg -%} + {%- set __rtr = __rtr.append(__network__systemd_link_name | + replace('__SYSTEMD_LINK_NAME__', cfg.name) | replace('__MAC_ADDRESS__', cfg.macaddress) | + replace('__LINK_NAME__', (cfg.name | regex_replace('^(?:[0-9]|[1-9][0-9])-(.*).link$', '\\1'))) | from_json) -%} {%- endfor -%} - {{ __cfgs }} + {{ __rtr }} -network__systemd_all: "{{ __network__systemd_configs | +network__systemd_all: "{{ ((network__all | selectattr('type', 'eq', 'systemd') | + selectattr('name', 'defined') | selectattr('name', 'search', '\\.(network|link|netdev)$') | + selectattr('children', 'defined') | default([])) + __network__systemd_rename) | reverse | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') | aybarsm.helper.unique_recursive(attributes='name', recurse='children') }}" @@ -44,14 +41,6 @@ network__systemd_cleanup_patterns: ["(?!{{ __network__systemd_cleanup_regex | jo ##### BEGIN: network interfaces vars __network_interfaces_available_change_modules: ['service', 'systemd_service', 'command'] -# REVIEW: There is room for optimisation -__network__interfaces_configs: >- - {%- set __configs_prepared = [] -%} - {%- for config in [network__default, network__group, network__host] if config -%} - {%- set __configs_prepared = __configs_prepared.append(config | selectattr('type', 'defined') | selectattr('type', 'eq', 'interface') | selectattr('name', 'defined')) -%} - {%- endfor -%} - {{ __configs_prepared }} - # Sort interfaces by name to avoid unneccessary changes network__interfaces_all: "{{ network__all | reverse | selectattr('type', 'eq', 'interface') | selectattr('name', 'defined') | @@ -61,7 +50,7 @@ network__interfaces_all: "{{ network__all | reverse | sort(attribute='name') }}" ##### BEGIN: network sysctl vars -network__sysctl_all: "{{ (network__host + network__group + network__default) | +network__sysctl_all: "{{ network__all | selectattr('type', 'defined') | selectattr('type', 'equalto', 'sysctl') | aybarsm.helper.replace_aliases(__ansible.modules.ansible_posix_sysctl.aliases) | selectattr('name', 'defined') | selectattr('value', 'defined') | unique(attribute='name') }}" @@ -84,11 +73,9 @@ __network__hosts_auto_discovered: >- {%- endif -%} {{ auto_discovered }} -network__hosts_all: "{{ (network__host + network__group + network__default + __network__hosts_auto_discovered) | - selectattr('type', 'defined') | selectattr('type', 'equalto', 'host') | - selectattr('ip', 'defined') | selectattr('hostname', 'defined') | - aybarsm.helper.replace_aliases({'fqdn': ['hostname']}) | - aybarsm.helper.unique_recursive(attributes=['ip', 'hostname', 'fqdn']) }}" +network__hosts_all: "{{ (network__all + __network__hosts_auto_discovered) | + selectattr('type', 'eq', 'host') | selectattr('ip', 'defined') | selectattr('hostname', 'defined') | + aybarsm.helper.replace_aliases({'fqdn': ['hostname']}) | unique(attribute='ip') }}" # Sort hosts by hostname to avoid unneccessary changes network__hosts_all_ipv4: "{{ network__hosts_all | selectattr('ip', 'ansible.utils.ipv4') | sort(attribute='hostname') }}" @@ -96,12 +83,9 @@ network__hosts_all_ipv6: "{{ network__hosts_all | selectattr('ip', 'ansible.util ##### END: network hosts vars ##### BEGIN: network iptables vars -network__iptables_all: "{{ (network__host + network__group + network__default) | - selectattr('type', 'defined') | selectattr('type', 'equalto', 'iptables') }}" +network__iptables_all: "{{ network__all | selectattr('type', 'eq', 'iptables') }}" ##### END: network iptables vars ##### BEGIN: network udev rules vars -network__udev_rules_all: "{{ (network__host + network__group + network__default) | - selectattr('type', 'defined') | selectattr('type', 'equalto', 'udev_rule') | - selectattr('entries', 'defined') }}" +network__udev_rules_all: "{{ network__all | selectattr('type', 'eq', 'udev_rule') | selectattr('entries', 'defined') }}" ##### END: network udev rules vars \ No newline at end of file diff --git a/roles/package_manager/defaults/main.yml b/roles/package_manager/defaults/main.yml index 27a2259..35775ee 100644 --- a/roles/package_manager/defaults/main.yml +++ b/roles/package_manager/defaults/main.yml @@ -36,6 +36,4 @@ package_manager__repo_list_use_regex: true package_manager__default: [] package_manager__group: [] -package_manager__host: [] - -package_manager__all: "{{ package_manager__host + package_manager__group + package_manager__default }}" \ No newline at end of file +package_manager__host: [] \ No newline at end of file diff --git a/roles/package_manager/handlers/main.yml b/roles/package_manager/handlers/main.yml index 3dd5e87..eae46d5 100644 --- a/roles/package_manager/handlers/main.yml +++ b/roles/package_manager/handlers/main.yml @@ -1,7 +1,7 @@ --- # - name: Update local facts for the host upgrade # ansible.builtin.include_role: -# name: aybarsm.linux.ansible +# name: aybarsm.helper.ansible # tasks_from: update_local_facts.yml # vars: # ansible__local_fact_updates: @@ -31,3 +31,14 @@ when: - ansible_os_family | lower == 'redhat' +- name: DEB APT Full Clean (autoclean, clean, autoremove and purge) + become: true + ansible.builtin.apt: + clean: true + autoclean: true + autoremove: true + purge: true + register: package_manager__deb_full_clean + listen: "package_manager__manager_full_clean" + when: + - ansible_os_family | lower == 'debian' diff --git a/roles/package_manager/tasks/upgrade.yml b/roles/package_manager/tasks/upgrade.yml index fc1933f..8a2fd5e 100644 --- a/roles/package_manager/tasks/upgrade.yml +++ b/roles/package_manager/tasks/upgrade.yml @@ -85,7 +85,7 @@ - name: Include update local facts tasks when upgrade is successful ansible.builtin.include_role: - name: aybarsm.linux.ansible + name: aybarsm.helper.ansible tasks_from: update_local_facts.yml vars: ansible__local_fact_updates: diff --git a/roles/package_manager/vars/common-pre.yml b/roles/package_manager/vars/common-pre.yml index 1defb99..62d6005 100644 --- a/roles/package_manager/vars/common-pre.yml +++ b/roles/package_manager/vars/common-pre.yml @@ -39,11 +39,11 @@ __package_manager__upgrade_execute: >- {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy is set to clean, but no default repo list configuration found.'} -%} {%- elif __package_manager__upgrade_once -%} {%- if ansible__local_fact_name is undefined -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role imported.'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role imported.'} -%} {%- elif ansible__role_enabled is undefined or not ansible__role_enabled -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role enabled. (ansible__role_enabled: true)'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role enabled. (ansible__role_enabled: true)'} -%} {%- elif not ansible__manage_local_facts -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role local fact management enabled. (ansible__manage_local_facts: true)'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role local fact management enabled. (ansible__manage_local_facts: true)'} -%} {%- elif ansible_local[ansible__local_fact_name].package_manager.upgrade is defined -%} {%- set exec_upgrade = {'decision': false, 'reason': 'Host already upgraded.'} -%} {%- endif -%} diff --git a/roles/package_manager/vars/common.yml b/roles/package_manager/vars/common.yml index 8e7347f..efaf461 100644 --- a/roles/package_manager/vars/common.yml +++ b/roles/package_manager/vars/common.yml @@ -1,3 +1,8 @@ +package_manager__all: "{{ (package_manager__host + package_manager__group + package_manager__default) | + map('aybarsm.helper.combine_reverse', {'keep__entry': true, 'skip__entry': false}) | + rejectattr('keep__entry', 'eq', false) | rejectattr('skip__entry', 'eq', true) | rejectattr('type', 'undefined') | + aybarsm.helper.all_except(['keep__entry', 'skip__entry']) }}" + package_manager__packages_all: "{{ package_manager__all | aybarsm.helper.selectattr(__package_manager__config.packages.selectattr) | aybarsm.helper.replace_aliases(__package_manager__config.packages.replace_aliases) | @@ -44,11 +49,11 @@ __package_manager__upgrade_execute: >- {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy is set to clean, but no default repo list configuration found.'} -%} {%- elif __package_manager__upgrade_once -%} {%- if ansible__local_fact_name is undefined -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role imported.'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role imported.'} -%} {%- elif ansible__role_enabled is undefined or not ansible__role_enabled -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role enabled. (ansible__role_enabled: true)'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role enabled. (ansible__role_enabled: true)'} -%} {%- elif not ansible__manage_local_facts -%} - {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.linux.ansible role local fact management enabled. (ansible__manage_local_facts: true)'} -%} + {%- set exec_upgrade = {'decision': false, 'reason': 'Upgrade strategy once requires aybarsm.helper.ansible role local fact management enabled. (ansible__manage_local_facts: true)'} -%} {%- elif ansible_local[ansible__local_fact_name].package_manager.upgrade is defined -%} {%- set exec_upgrade = {'decision': false, 'reason': 'Host already upgraded.'} -%} {%- endif -%} diff --git a/roles/proxmox/defaults/main.yml b/roles/proxmox/defaults/main.yml index 6efb32d..e58e8db 100644 --- a/roles/proxmox/defaults/main.yml +++ b/roles/proxmox/defaults/main.yml @@ -12,7 +12,9 @@ proxmox__manage_iptables: false proxmox__manage_root_access: false proxmox__manage_sshd_config: false proxmox__manage_ssh_config: false +proxmox__manage_files: false proxmox__manage_cluster: false + proxmox__hostname: "{{ inventory_hostname_short }}" proxmox__domain: local diff --git a/roles/proxmox/meta/main.yml b/roles/proxmox/meta/main.yml index bd2b691..0c14010 100644 --- a/roles/proxmox/meta/main.yml +++ b/roles/proxmox/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - - role: aybarsm.helper.ansible \ No newline at end of file + - role: aybarsm.helper.ansible + - role: geerlingguy.ntp \ No newline at end of file diff --git a/roles/proxmox/tasks/asserts.yml b/roles/proxmox/tasks/asserts.yml new file mode 100644 index 0000000..2b4d92a --- /dev/null +++ b/roles/proxmox/tasks/asserts.yml @@ -0,0 +1,6 @@ +--- +- name: Check Proxmox links are valid ip addresses + assert: + that: proxmox__cluster_links | select('ansible.utils.ip') | length == proxmox__cluster_links | length + fail_msg: "proxmox__cluster_links must be a list of valid ip addresses" + success_msg: "proxmox__cluster_links are valid ip addresses" \ No newline at end of file diff --git a/roles/proxmox/tasks/auth.yml b/roles/proxmox/tasks/auth.yml index 6b6ec82..678cc7f 100644 --- a/roles/proxmox/tasks/auth.yml +++ b/roles/proxmox/tasks/auth.yml @@ -1,5 +1,5 @@ --- -- name: Import aybarsm auth role +- name: Import aybarsm.linux.auth role ansible.builtin.import_role: name: aybarsm.linux.auth when: proxmox__manage_root_access or proxmox__manage_sshd_config or proxmox__manage_ssh_config diff --git a/roles/proxmox/tasks/main.yml b/roles/proxmox/tasks/main.yml index 84bd1d2..b42a054 100644 --- a/roles/proxmox/tasks/main.yml +++ b/roles/proxmox/tasks/main.yml @@ -15,27 +15,34 @@ # when: __proxmox__cluster_eligible # delegate_to: localhost -- name: Import aybarsm package_manager role +- name: Import aybarsm.linux.package_manager role ansible.builtin.import_role: name: aybarsm.linux.package_manager when: - proxmox__role_enabled - proxmox__manage_repo_keys or proxmox__manage_repos or proxmox__manage_packages -- name: Import aybarsm grub role +- name: Import aybarsm.linux.grub role ansible.builtin.import_role: name: aybarsm.linux.grub when: - proxmox__role_enabled - proxmox__manage_grub -- name: Import aybarsm network role +- name: Import aybarsm.linux.network role ansible.builtin.import_role: name: aybarsm.linux.network when: - proxmox__role_enabled - proxmox__manage_hostname or proxmox__manage_hosts or proxmox__manage_iptables +- name: Import aybarsm.helper.file_mgr role + ansible.builtin.import_role: + name: aybarsm.helper.file_mgr + when: + - proxmox__role_enabled + - proxmox__manage_files + - name: Update grub and initramfs when systemd network and/or grub changed become: true ansible.builtin.command: @@ -47,7 +54,7 @@ when: (grub__deploy is defined and grub__deploy is changed) or (network__systemd_deploy is defined and network__systemd_deploy is changed) -- name: Reboot node and wait if upgraded, grub, network-hostname,systemd, or interfaces changed +- name: Reboot node and wait if upgraded, grub, network-hostname, systemd, or interfaces changed become: true ansible.builtin.reboot: test_command: "uptime" @@ -59,6 +66,7 @@ chk_interfaces: "{{ network__interfaces_deploy is defined and network__interfaces_deploy is changed }}" register: proxmox__primary_reboot when: chk_upgrade or chk_grub or chk_hostname or chk_systemd or chk_interfaces + throttle: 1 - name: Include auth tasks ansible.builtin.include_tasks: @@ -73,8 +81,8 @@ path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js line: ' orig_cmd(); return;' insertafter: '^\s+checked_command: function\(orig_cmd\) {$' - firstmatch: yes - backup: yes + firstmatch: true + backup: true when: - proxmox__role_enabled - proxmox__remove_subscription_warning diff --git a/roles/proxmox/tasks/set_facts.yml b/roles/proxmox/tasks/set_facts.yml index a0ee1ce..16e1a1b 100644 --- a/roles/proxmox/tasks/set_facts.yml +++ b/roles/proxmox/tasks/set_facts.yml @@ -16,6 +16,8 @@ - proxmox__clusters | length > 0 - __proxmox__cluster_query is defined + #TODO: Assertations for cluster configuration (when proxmox__manage_clusters is true) + - name: Set facts for proxmox cluster configuration ansible.builtin.set_fact: __proxmox__cluster: "{{ {'members': cluster_members} | combine(cluster_play) if cluster_eligible else None }}" @@ -44,6 +46,11 @@ auth__manage_sshd_config: "{{ proxmox__manage_sshd_config if cluster_eligible and proxmox__manage_sshd_config else omit }}" auth__manage_ssh_config: "{{ proxmox__manage_ssh_config if cluster_eligible and proxmox__manage_ssh_config else omit }}" auth__host: "{{ node_auth + (auth__host | default([])) if cluster_eligible and use_auth else omit }}" + file_mgr__role_enabled: "{{ proxmox__manage_files if proxmox__manage_files else omit }}" + + # #TODO: Remove below, testing only + # __proxmox__cluster_hosts: "{{ cluster_hosts }}" + # __proxmox__node_hosts: "{{ node_hosts }}" vars: cluster_play: "{{ __proxmox__cluster_configs | selectattr('host', 'eq', inventory_hostname) | first | default(None) }}" cluster_members: "{{ __proxmox__cluster_configs | @@ -92,3 +99,14 @@ ([node_sshd_config] if cluster_eligible and proxmox__manage_sshd_config else []) }}" when: - __proxmox__cluster_configs | length > 0 + +# - name: Debug +# ansible.builtin.debug: +# msg: +# __proxmox__cluster_hosts: "{{ __proxmox__cluster_hosts }}" +# __proxmox__node_hosts: "{{ __proxmox__node_hosts }}" +# delegate_to: localhost + +# - name: Fail for debugging +# ansible.builtin.fail: +# msg: "Debugging" \ No newline at end of file diff --git a/roles/proxmox/vars/main.yml b/roles/proxmox/vars/main.yml index f424aea..bb53ed7 100644 --- a/roles/proxmox/vars/main.yml +++ b/roles/proxmox/vars/main.yml @@ -9,6 +9,7 @@ __proxmox__purpose_packages: >- {%- set purpose_packages_done = [] -%} {%- if proxmox__all.purposes is defined -%} {%- for purpose in proxmox__purpose_names -%} + {% set proxmox_purpose_loop = loop %} {%- for purpose_type in proxmox__purpose_types -%} {%- set host_config = proxmox__all.purposes[purpose] if purpose in proxmox__all.purposes else none -%} {%- set is_host_purpose = true if host_config else false -%} @@ -22,7 +23,7 @@ __proxmox__purpose_packages: >- 'repo': 'deb ' + purpose_repo + '/' + purpose + ' ' + ansible_distribution_release + ' ' + purpose + '-' + purpose_type, 'filename': purpose + '-' + (purpose_type | replace('_', '-')), 'state': purpose_repo_state, - 'update_cache': true + 'update_cache': proxmox_purpose_loop.last }) -%} {%- elif purpose.startswith('ceph-') or purpose.startswith('ceph_') -%} {%- set proxmox_repos = proxmox_repos.append({ @@ -30,7 +31,7 @@ __proxmox__purpose_packages: >- 'repo': 'deb ' + purpose_repo + '/' + (purpose | replace('_', '-')) + ' ' + ansible_distribution_release + ' ' + purpose_type, 'filename': 'ceph', 'state': purpose_repo_state, - 'update_cache': true + 'update_cache': proxmox_purpose_loop.last }) -%} {%- endif -%} {%- if is_host_purpose and proxmox__purpose_packages[purpose] is defined and not purpose in purpose_packages_done -%}