diff --git a/plugins/modules/proxmox_acl.py b/plugins/modules/proxmox_acl.py index 1fa7d7c..160186c 100755 --- a/plugins/modules/proxmox_acl.py +++ b/plugins/modules/proxmox_acl.py @@ -72,8 +72,8 @@ ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.pvesh import ProxmoxShellError -import ansible.module_utils.pvesh as pvesh +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh class ProxmoxACL(object): def __init__(self, module): diff --git a/plugins/modules/proxmox_group.py b/plugins/modules/proxmox_group.py index a0d3bfc..66ce647 100755 --- a/plugins/modules/proxmox_group.py +++ b/plugins/modules/proxmox_group.py @@ -61,8 +61,8 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.pvesh import ProxmoxShellError -import ansible.module_utils.pvesh as pvesh +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh class ProxmoxGroup(object): def __init__(self, module): diff --git a/plugins/modules/proxmox_pool.py b/plugins/modules/proxmox_pool.py index 8e9a5b4..52accff 100644 --- a/plugins/modules/proxmox_pool.py +++ b/plugins/modules/proxmox_pool.py @@ -61,8 +61,8 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.pvesh import ProxmoxShellError -import ansible.module_utils.pvesh as pvesh +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh class ProxmoxPool(object): def __init__(self, module): diff --git a/plugins/modules/proxmox_role.py b/plugins/modules/proxmox_role.py index f2b3ed9..6f70d92 100644 --- a/plugins/modules/proxmox_role.py +++ b/plugins/modules/proxmox_role.py @@ -49,8 +49,8 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.pvesh import ProxmoxShellError -import ansible.module_utils.pvesh as pvesh +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh class ProxmoxRole(object): def __init__(self, module): diff --git a/plugins/modules/proxmox_user.py b/plugins/modules/proxmox_user.py index 5c345dc..ead0fa4 100755 --- a/plugins/modules/proxmox_user.py +++ b/plugins/modules/proxmox_user.py @@ -121,8 +121,8 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.pvesh import ProxmoxShellError -import ansible.module_utils.pvesh as pvesh +from ansible_collections.aybarsm.linux.plugins.module_utils.pvesh import ProxmoxShellError +import ansible_collections.aybarsm.linux.plugins.module_utils.pvesh as pvesh class ProxmoxUser(object): def __init__(self, module): diff --git a/roles/ansible/defaults/main.yml b/roles/ansible/defaults/main.yml deleted file mode 100644 index 4fbfac9..0000000 --- a/roles/ansible/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ -ansible__role_enabled: true -ansible__manage_local_facts: true - -ansible__local_fact_backup: true -ansible__local_facts_dir: "{{ (not lookup('config', 'DEFAULT_FACT_PATH')) | ternary('/etc/ansible/facts.d', lookup('config', 'DEFAULT_FACT_PATH')) }}" -ansible__local_fact_name: aybarsm_linux -ansible__local_fact_file: "{{ ansible__local_facts_dir }}/{{ ansible__local_fact_name }}.fact" -ansible__local_fact_template: "{{ ansible__local_fact_name }}.json.fact.j2" -ansible__local_fact_combine_recursive: true -ansible__local_fact_combine_list_merge: prepend - diff --git a/roles/ansible/handlers/main.yml b/roles/ansible/handlers/main.yml deleted file mode 100644 index 73b314f..0000000 --- a/roles/ansible/handlers/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/roles/ansible/meta/main.yml b/roles/ansible/meta/main.yml deleted file mode 100644 index 4831b03..0000000 --- a/roles/ansible/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -allow_duplicates: true \ No newline at end of file diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml deleted file mode 100644 index ed97d53..0000000 --- a/roles/ansible/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/ansible/tasks/update_local_facts.yml b/roles/ansible/tasks/update_local_facts.yml deleted file mode 100644 index 334a79b..0000000 --- a/roles/ansible/tasks/update_local_facts.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Ensure ansible local facts directory exists on host - become: true - ansible.builtin.file: - state: directory - recurse: true - path: "{{ ansible__local_facts_dir }}" - register: ansible__ensure_local_facts_dir - when: - - ansible__role_enabled | bool - - ansible__manage_local_facts | bool - - ansible__local_fact_updates is defined - - ansible__local_fact_updates | type_debug == 'dict' - -- name: Settle updated local facts on the host - become: true - ansible.builtin.template: - src: "{{ ansible__local_fact_template }}" - dest: "{{ ansible__local_fact_file }}" - backup: "{{ ansible__local_fact_backup | default(omit) | bool }}" - vars: - recurse_strategy: "{{ ansible__local_fact_combine_recursive | default(true) | bool }}" - list_merge_strategy: "{{ ansible__local_fact_combine_list_merge | default('prepend') }}" - host_local_facts: "{{ ansible_local[ansible__local_fact_name] | default({'created_at': now().utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')}) | - combine(ansible__local_fact_updates, {'updated_at': now().utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')}, recursive=recurse_strategy, list_merge=list_merge_strategy) }}" - register: ansible__settle_local_facts - when: - - ansible__role_enabled | bool - - ansible__manage_local_facts | bool - - ansible__local_fact_updates is defined - - ansible__local_fact_updates | type_debug == 'dict' - -- name: Re-read updated local facts on the host - become: true - ansible.builtin.setup: - filter: ansible_local - register: ansible__reread_local_facts - when: - - ansible__role_enabled | bool - - ansible__manage_local_facts | bool - - ansible__settle_local_facts.changed is defined - - ansible__settle_local_facts.changed | bool \ No newline at end of file diff --git a/roles/ansible/templates/aybarsm_linux.json.fact.j2 b/roles/ansible/templates/aybarsm_linux.json.fact.j2 deleted file mode 100644 index 24d187e..0000000 --- a/roles/ansible/templates/aybarsm_linux.json.fact.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ host_local_facts | to_nice_json }} \ No newline at end of file diff --git a/roles/ansible/vars/main.yml b/roles/ansible/vars/main.yml deleted file mode 100644 index e884c5c..0000000 --- a/roles/ansible/vars/main.yml +++ /dev/null @@ -1,105 +0,0 @@ -__ansible__config: - modules: - ansible: - builtin: - user: - required: ['name'] - uniques: ['name'] - aliases: - create_home: ['createhome'] - name: ['user'] - systemd_service: - required: ['name'] - uniques: ['name'] - aliases: - daemon_reexec: ['daemon-reexec'] - daemon_reload: ['daemon-reload'] - name: ['service', 'unit'] - service: - required: ['name'] - uniques: ['name'] - aliases: - arguments: ['args'] - apt: - uniques: ['name'] - aliases: - allow_downgrade: ['allow-downgrade', 'allow_downgrades', 'allow-downgrades'] - allow_unauthenticated: ['allow-unauthenticated'] - default_release: ['default-release'] - install_recommends: ['install-recommends'] - name: ['package', 'pkg'] - update_cache: ['update-cache'] - apt_repository: - uniques: ['repo'] - aliases: - update_cache: ['update-cache'] - apt_key: - uniques: ['keyserver', 'url', 'id', 'file' ,'data'] - aliases: {} - find: - required: ['paths'] - aliases: - excludes: ['exclude'] - paths: ['name', 'path'] - patterns: ['pattern'] - posix: - sysctl: - required: ['name', 'value'] - aliases: - name: ['key'] - value: ['val'] - community: - general: - sudoers: - required: ['name'] - uniques: ['name'] - ansible_builtin_apt: - uniques: ['name'] - aliases: - allow_downgrade: ['allow-downgrade', 'allow_downgrades', 'allow-downgrades'] - allow_unauthenticated: ['allow-unauthenticated'] - default_release: ['default-release'] - install_recommends: ['install-recommends'] - name: ['package', 'pkg'] - update_cache: ['update-cache'] - ansible_builtin_apt_repository: - uniques: ['repo'] - aliases: - update_cache: ['update-cache'] - ansible_builtin_apt_key: - uniques: ['keyserver', 'url', 'id', 'file' ,'data'] - ansible_builtin_dnf: - uniques: ['name'] - aliases: - name: ['package', 'pkg'] - update_cache: ['expire-cache'] - ansible_builtin_yum_repository: - uniques: ['name'] - aliases: - attributes: ['attr'] - sslcacert: ['ca_cert'] - sslclientcert: ['client_cert'] - sslclientkey: ['client_key'] - sslverify: ['validate_certs'] - ansible_builtin_copy: - uniques: ['dest'] - aliases: - attributes: ['attr'] - ansible_builtin_template: - uniques: ['dest'] - aliases: - attributes: ['attr'] - ansible_builtin_add_host: - uniques: ['name'] - aliases: - groups: ['group', 'groupname'] - name: ['host','hostname'] - ansible_builtin_user: - uniques: ['name'] - aliases: - create_home: ['createhome'] - name: ['user'] - ansible_builtin_group: - uniques: ['name'] - ansible_posix_authorized_key: - required: ['user', 'key'] diff --git a/roles/auth/defaults/main.yml b/roles/auth/defaults/main.yml index bc9f160..dd9accd 100644 --- a/roles/auth/defaults/main.yml +++ b/roles/auth/defaults/main.yml @@ -18,7 +18,8 @@ auth__ssh_dir: /etc/ssh # If smart enabled and auth__sshd_validate provided then a dummy command task will run to trigger ssh service handlers as provided. # This is a workaround to avoid using changed_when in the template module to prevent misleading output. auth__ssh_changes_strategy: - module: service + # module: service + module: None immediate: true smart: true name: ssh.service @@ -30,16 +31,22 @@ auth__sshd_config_dir: "{{ auth__ssh_dir }}/sshd_config.d" auth__sshd_config_file: "{{ auth__sshd_config_dir }}/50-ansible.conf" auth__sshd_config_template: etc/ssh/sshd_config.d/50-ansible.conf.j2 auth__sshd_config_backup: true +auth__sshd_config_cleanup: false auth__ssh_config_dir: "{{ auth__ssh_dir }}/ssh_config.d" auth__ssh_config_file: "{{ auth__ssh_config_dir }}/50-ansible.conf" auth__ssh_config_template: etc/ssh/ssh_config.d/50-ansible.conf.j2 auth__ssh_config_backup: true +auth__ssh_config_cleanup: false +auth__renew_ssh_host_keys_once: false +auth__ssh_host_keys_use_regex: true +auth__ssh_host_keys_patterns: '^ssh_host_.*_key' ##### END: SSH Config ##### ##### BEGIN: SUDOERS Config ##### auth__sudoers_dir: /etc/sudoers.d auth__sudoers_file: "{{ auth__sudoers_dir }}/50-ansible" auth__sudoers_template: etc/sudoers.d/50-ansible.j2 +auth__sudoers_cleanup: false # Uncomment the following line to validate the sudoers file # auth__sudoers_validate: "/usr/sbin/visudo -c -f %s" ##### END: SUDOERS Config ##### diff --git a/roles/auth/handlers/main.yml b/roles/auth/handlers/main.yml index ea6a757..0cfa6bc 100644 --- a/roles/auth/handlers/main.yml +++ b/roles/auth/handlers/main.yml @@ -2,13 +2,13 @@ - name: Effect systemd service for ssh changes become: true ansible.builtin.systemd_service: - daemon_reexec: "{{ service.daemon_reexec | default(omit) | bool }}" - daemon_reload: "{{ service.daemon_reload | default(omit) | bool }}" - enabled: "{{ service.enabled | default(omit) | bool }}" - force: "{{ service.force | default(omit) | bool }}" - masked: "{{ service.masked | default(omit) | bool }}" + daemon_reexec: "{{ service.daemon_reexec | default(omit) }}" + daemon_reload: "{{ service.daemon_reload | default(omit) }}" + enabled: "{{ service.enabled | default(omit) }}" + force: "{{ service.force | default(omit) }}" + masked: "{{ service.masked | default(omit) }}" name: "{{ service.name | default('ssh.service') }}" - no_block : "{{ service.no_block | default(omit) | bool }}" + no_block : "{{ service.no_block | default(omit) }}" scope: "{{ service.scope | default(omit) }}" state: "{{ service.state | default(omit) }}" vars: diff --git a/roles/auth/meta/main.yml b/roles/auth/meta/main.yml index e69de29..bd2b691 100644 --- a/roles/auth/meta/main.yml +++ b/roles/auth/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: aybarsm.helper.ansible \ No newline at end of file diff --git a/roles/auth/tasks/authorized_keys.yml b/roles/auth/tasks/authorized_keys.yml index 291a93b..1634c2f 100644 --- a/roles/auth/tasks/authorized_keys.yml +++ b/roles/auth/tasks/authorized_keys.yml @@ -3,12 +3,14 @@ set_fact: auth__authorized_keys_all: "{{ (auth__authorized_keys_all | default([])) + ssh_keys_distribute }}" vars: - distribute_query: "*.auth__users_apply.results[*] | [] | [?contains(not_null(item.distribute_ssh_key,``),`{{ inventory_hostname }}`)].{user: name, key: ssh_public_key}" ssh_keys_distribute: "{{ dict(hostvars) | - aybarsm.helper.only_with(ansible_play_batch | difference([inventory_hostname])) | - community.general.json_query(distribute_query) | default([]) }}" + aybarsm.helper.only_with(ansible_play_batch) | + community.general.json_query(__auth__key_distribute_query) | default([]) }}" register: auth__users__ssh_key_distribution - when: ssh_keys_distribute | length > 0 + when: + - auth__users_apply.results is defined + - auth__users_apply.results | length > 0 + - ssh_keys_distribute | length > 0 - name: Retrieve passwd database in check mode for authorized keys become: true @@ -23,15 +25,15 @@ become: true ansible.posix.authorized_key: comment: "{{ item.comment | default(omit) }}" - exclusive: "{{ item.exclusive | default(omit) | bool }}" - follow: "{{ item.follow | default(omit) | bool }}" + exclusive: "{{ item.exclusive | default(omit) }}" + follow: "{{ item.follow | default(omit) }}" key: "{{ item.key }}" key_options: "{{ item.key_options | default(omit) }}" - manage_dir: "{{ item.manage_dir | default(omit) | bool }}" + manage_dir: "{{ item.manage_dir | default(omit) }}" path: "{{ item.path | default(omit) }}" state: "{{ item.state | default(omit) }}" user: "{{ item.user }}" - validate_certs: "{{ item.validate_certs | default(omit) | bool }}" + validate_certs: "{{ item.validate_certs | default(omit) }}" loop: "{{ auth__authorized_keys_all }}" register: auth__authorized_keys_apply when: diff --git a/roles/auth/tasks/groups.yml b/roles/auth/tasks/groups.yml index 27929b5..1e268de 100644 --- a/roles/auth/tasks/groups.yml +++ b/roles/auth/tasks/groups.yml @@ -2,13 +2,13 @@ - name: Apply groups configuration become: true ansible.builtin.group: - force: "{{ item.force | default(omit) | bool }}" + force: "{{ item.force | default(omit) }}" gid: "{{ item.gid | default(omit) }}" - local: "{{ item.local | default(omit) | bool }}" + local: "{{ item.local | default(omit) }}" name: "{{ item.name }}" - non_unique: "{{ item.non_unique | default(omit) | bool }}" + non_unique: "{{ item.non_unique | default(omit) }}" state: "{{ item.state | default(omit) }}" - system: "{{ item.system | default(omit) | bool }}" + system: "{{ item.system | default(omit) }}" loop: "{{ auth__groups_all }}" register: auth__groups_apply when: diff --git a/roles/auth/tasks/main.yml b/roles/auth/tasks/main.yml index a3f8371..e3da929 100644 --- a/roles/auth/tasks/main.yml +++ b/roles/auth/tasks/main.yml @@ -24,6 +24,14 @@ - auth__role_enabled | default(false) | bool - auth__manage_authorized_keys | default(false) | bool +# TODO: Renew SSH host keys only once +- name: Include ssh renew host keys tasks + ansible.builtin.include_tasks: + file: ssh_renew_host_keys.yml + when: + - auth__role_enabled | default(false) | bool + - auth__renew_ssh_host_keys_once | default(false) | bool + - name: Include ssh config tasks ansible.builtin.include_tasks: file: ssh.yml diff --git a/roles/auth/tasks/ssh.yml b/roles/auth/tasks/ssh.yml index c8fdb78..0e79767 100644 --- a/roles/auth/tasks/ssh.yml +++ b/roles/auth/tasks/ssh.yml @@ -1,6 +1,29 @@ --- -# TODO: Renew SSH host keys only once -- name: Apply ssh daemon configuration +- name: Cleanup unexpected ssh DAEMON config files + block: + - name: Find unexpected ssh DAEMON config files + become: true + ansible.builtin.find: + paths: "{{ auth__sshd_config_dir }}" + patterns: "(?!{{ auth__sshd_config_file | basename }})" + use_regex: true + hidden: true + recurse: true + file_type: any + register: auth__sshd_config_find_cleanup_files + + - name: Remove unexpected ssh DAEMON config files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ auth__sshd_config_find_cleanup_files.files | map(attribute='path') }}" + register: auth__sshd_config_cleanup_files + notify: "auth__ssh_apply_changes" + when: auth__sshd_config_find_cleanup_files.files | default([]) | length > 0 + when: auth__sshd_config_cleanup | default(false) | bool + +- name: Apply ssh DAEMON configuration become: true ansible.builtin.template: src: "{{ auth__sshd_config_template }}" @@ -9,12 +32,33 @@ validate: "{{ auth__sshd_validate | default(omit) }}" register: auth__sshd_config_apply notify: "auth__ssh_apply_changes" - when: - - auth__manage_sshd_config | default(false) | bool - - auth__sshd_config_all | type_debug == 'list' - - auth__sshd_config_all | length > 0 + when: auth__sshd_config_all | default([]) | length > 0 + +- name: Cleanup unexpected ssh CLIENT config files + block: + - name: Find unexpected ssh CLIENT config files + become: true + ansible.builtin.find: + paths: "{{ auth__ssh_config_dir }}" + patterns: "(?!{{ auth__ssh_config_file | basename }})" + use_regex: true + hidden: true + recurse: true + file_type: any + register: auth__ssh_config_find_cleanup_files -- name: Apply ssh client configuration + - name: Remove unexpected ssh CLIENT config files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ auth__ssh_config_find_cleanup_files.files | map(attribute='path') }}" + register: auth__ssh_config_cleanup_files + notify: "auth__ssh_apply_changes" + when: auth__ssh_config_find_cleanup_files.files | default([]) | length > 0 + when: auth__ssh_config_cleanup | default(false) | bool + +- name: Apply ssh CLIENT configuration become: true ansible.builtin.template: src: "{{ auth__ssh_config_template }}" @@ -22,10 +66,7 @@ backup: "{{ auth__ssh_config_backup | default(omit) | bool }}" register: auth__ssh_config_apply notify: "auth__ssh_apply_changes" - when: - - auth__manage_ssh_config | default(false) | bool - - auth__ssh_config_all | type_debug == 'list' - - auth__ssh_config_all | length > 0 + when: auth__ssh_config_all | default([]) | length > 0 - name: Apply ssh client configuration via community.general.ssh_config module become: true @@ -53,10 +94,7 @@ loop: "{{ auth__ssh_config_module_all }}" register: auth__ssh_config_module_apply notify: "auth__ssh_apply_changes" - when: - - auth__manage_ssh_config | default(false) | bool - - auth__ssh_config_module_all | type_debug == 'list' - - auth__ssh_config_module_all | length > 0 + when: auth__ssh_config_module_all | default([]) | length > 0 - name: Run smart sshd configuration changes ansible.builtin.command: diff --git a/roles/auth/tasks/ssh_renew_host_keys.yml b/roles/auth/tasks/ssh_renew_host_keys.yml new file mode 100644 index 0000000..997aeb1 --- /dev/null +++ b/roles/auth/tasks/ssh_renew_host_keys.yml @@ -0,0 +1,62 @@ +--- +#TODO: Assert ansible__role_enabled and ansible__manage_local_facts +- name: Ensure ansible role enabled for one time ssh host key renewal + ansible.builtin.assert: + that: + - ansible__role_enabled | default(false) | bool + - ansible__manage_local_facts | default(false) | bool + - ansible__local_fact_name is defined + fail_msg: "aybarsm.linux.ansible role, local fact management must be enabled and local fact name needs to be defined. \ + (ansible__role_enabled: true & ansible__manage_local_facts: true & ansible__local_fact_name is defined)" + success_msg: "Ansible requirements are met for one time ssh host key renewal." + register: auth__ssh_host_keys_assert_ansible_role + when: auth__renew_ssh_host_keys_once + +- name: Renew host keys once + block: + - name: Find SSH host key files + become: true + ansible.builtin.find: + paths: "{{ auth__ssh_dir }}" + patterns: "{{ auth__ssh_host_keys_patterns | default(omit) }}" + use_regex: "{{ auth__ssh_host_keys_use_regex | default(omit) }}" + register: auth__find_ssh_host_key_files + + - name: Remove found SSH host key files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ auth__find_ssh_host_key_files.files | map(attribute='path') }}" + register: auth__remove_ssh_host_key_files + when: auth__find_ssh_host_key_files.files | default([]) | length > 0 + + - name: Regenerate SSH host keys according to found files + become: true + ansible.builtin.command: + cmd: "ssh-keygen -q -N \"\" -t {{ key_type }} -f {{ item }}" + vars: + key_type: "{{ item | basename | regex_replace('^ssh_host_', '') | regex_replace('_key$', '') | trim }}" + loop: "{{ auth__find_ssh_host_key_files.files | map(attribute='path') | select('match', '.*/ssh_host_.*_key$') }}" + register: auth__regen_found_ssh_host_keys + notify: "auth__ssh_apply_changes" + when: auth__find_ssh_host_key_files.files | default([]) | length > 0 + + - name: Regenerate SSH host keys generically when no files found + become: true + ansible.builtin.command: + cmd: "ssh-keygen -A" + register: auth__regen_generic_ssh_host_keys + notify: "auth__ssh_apply_changes" + when: auth__find_ssh_host_key_files.files | default([]) | length == 0 + + - name: Include update local facts tasks when ssh host keys renewed + ansible.builtin.include_role: + name: aybarsm.linux.ansible + tasks_from: update_local_facts.yml + vars: + ansible__local_fact_updates: + auth: + ssh_host_keys_renewed: ["{{ now().utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') }}"] + + when: not auth__ssh_host_keys_assert_ansible_role.failed diff --git a/roles/auth/tasks/sudoers.yml b/roles/auth/tasks/sudoers.yml index cd23fa2..330d750 100644 --- a/roles/auth/tasks/sudoers.yml +++ b/roles/auth/tasks/sudoers.yml @@ -1,4 +1,29 @@ --- +- name: Cleanup unexpected sudoers files + block: + - name: Find unexpected sudoers files + become: true + ansible.builtin.find: + paths: "{{ auth__sudoers_dir }}" + patterns: "(?!{{ ([auth__sudoers_file | basename] + sudoers_module_files) | join('|') }})" + use_regex: true + hidden: true + recurse: true + file_type: any + vars: + sudoers_module_files: "{{ auth__sudoers_module_all | map(attribute='name') | list if auth__sudoers_module_all | default([]) | length > 0 else [] }}" + register: auth__sudoers_find_cleanup_files + + - name: Remove unexpected sudoers files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ auth__sudoers_find_cleanup_files.files | map(attribute='path') }}" + register: auth__sudoers_cleanup_files + when: auth__sudoers_find_cleanup_files.files | default([]) | length > 0 + when: auth__sudoers_cleanup | default(false) | bool + - name: Apply sudoers configuration file become: true ansible.builtin.template: @@ -17,17 +42,14 @@ groups: "{{ item.groups | default(omit) }}" host: "{{ item.host | default(omit) }}" name: "{{ item.name }}" - noexec: "{{ item.noexec | default(omit) | bool }}" - nopassword: "{{ item.nopassword | default(omit) | bool }}" + noexec: "{{ item.noexec | default(omit) }}" + nopassword: "{{ item.nopassword | default(omit) }}" runas: "{{ item.runas | default(omit) }}" - setenv: "{{ item.setenv | default(omit) | bool }}" + setenv: "{{ item.setenv | default(omit) }}" state: "{{ item.state | default(omit) }}" sudoers_path: "{{ item.sudoers_path | default(omit) }}" user: "{{ item.user | default(omit) }}" validation: "{{ item.validation | default(omit) }}" loop: "{{ auth__sudoers_module_all }}" register: auth__sudoers_module_apply - when: - - auth__sudoers_module_all | type_debug == 'list' - - auth__sudoers_module_all | length > 0 - \ No newline at end of file + when: auth__sudoers_module_all | default([]) | length > 0 diff --git a/roles/auth/tasks/users.yml b/roles/auth/tasks/users.yml index dd578b1..b4cfe5c 100644 --- a/roles/auth/tasks/users.yml +++ b/roles/auth/tasks/users.yml @@ -2,29 +2,29 @@ - name: Apply users configuration become: true ansible.builtin.user: - append: "{{ item.append | default(omit) | bool }}" + append: "{{ item.append | default(omit) }}" authorization: "{{ item.authorization | default(omit) }}" comment: "{{ item.comment | default(omit) }}" - create_home: "{{ item.create_home | default(omit) | bool }}" + create_home: "{{ item.create_home | default(omit) }}" expires: "{{ item.expires | default(omit) }}" - force: "{{ item.force | default(omit) | bool }}" - generate_ssh_key: "{{ item.generate_ssh_key | default(omit) | bool }}" + force: "{{ item.force | default(omit) }}" + generate_ssh_key: "{{ item.generate_ssh_key | default(omit) }}" group: "{{ item.group | default(omit) }}" groups: "{{ item.groups | default(omit) }}" - hidden: "{{ item.hidden | default(omit) | bool }}" + hidden: "{{ item.hidden | default(omit) }}" home: "{{ item.home | default(omit) }}" - local: "{{ item.local | default(omit) | bool }}" + local: "{{ item.local | default(omit) }}" login_class: "{{ item.login_class | default(omit) }}" - move_home: "{{ item.move_home | default(omit) | bool }}" + move_home: "{{ item.move_home | default(omit) }}" name: "{{ item.name }}" - non_unique: "{{ item.non_unique | default(omit) | bool }}" + non_unique: "{{ item.non_unique | default(omit) }}" password: "{{ item.password | default(omit) }}" password_expire_max: "{{ item.password_expire_max | default(omit) }}" password_expire_min: "{{ item.password_expire_min | default(omit) }}" password_expire_warn: "{{ item.password_expire_warn | default(omit) }}" - password_lock: "{{ item.password_lock | default(omit) | bool }}" + password_lock: "{{ item.password_lock | default(omit) }}" profile: "{{ item.profile | default(omit) }}" - remove: "{{ item.remove | default(omit) | bool }}" + remove: "{{ item.remove | default(omit) }}" role: "{{ item.role | default(omit) }}" seuser: "{{ item.seuser | default(omit) }}" shell: "{{ item.shell | default(omit) }}" @@ -35,12 +35,10 @@ ssh_key_passphrase: "{{ item.ssh_key_passphrase | default(omit) }}" ssh_key_type: "{{ item.ssh_key_type | default(omit) }}" state: "{{ item.state | default(omit) }}" - system: "{{ item.system | default(omit) | bool }}" + system: "{{ item.system | default(omit) }}" uid: "{{ item.uid | default(omit) }}" umask: "{{ item.umask | default(omit) }}" update_password: "{{ item.update_password | default(omit) }}" loop: "{{ auth__users_all }}" register: auth__users_apply - when: - - auth__users_all | type_debug == 'list' - - auth__users_all | length > 0 + when: auth__users_all | default([]) | length > 0 diff --git a/roles/auth/vars/main.yml b/roles/auth/vars/main.yml index 802f043..1d0dbc0 100644 --- a/roles/auth/vars/main.yml +++ b/roles/auth/vars/main.yml @@ -101,4 +101,7 @@ auth__sudoers_file_all: "{{ auth__all | auth__sudoers_module_all: "{{ auth__all | aybarsm.helper.selectattr(__auth__config.sudoers_module.selectattr) | aybarsm.helper.unique_recursive('name') | - default([]) }}" \ No newline at end of file + default([]) }}" + +__auth__key_distribute_query: "*.auth__users_apply.results[*] | [] | + [?contains(not_null(item.distribute_ssh_key,``),`{{ inventory_hostname }}`)].{user: name, key: ssh_public_key}" \ No newline at end of file diff --git a/roles/grub/defaults/main.yml b/roles/grub/defaults/main.yml index 5068c4a..dc8c5dc 100644 --- a/roles/grub/defaults/main.yml +++ b/roles/grub/defaults/main.yml @@ -10,7 +10,8 @@ grub__inherit_cmdlines: true grub__change_strategy: # Available modules: command # Set module other than available options to disable applying changes - module: command + # module: command + module: None # immediate can be true or false (Flushes the handlers immediately) immediate: false cmd: 'update-grub' diff --git a/roles/grub/handlers/main.yml b/roles/grub/handlers/main.yml index 7cd5861..45a911a 100644 --- a/roles/grub/handlers/main.yml +++ b/roles/grub/handlers/main.yml @@ -2,13 +2,14 @@ - name: Apply grub changes via command module become: true ansible.builtin.command: - chdir: "{{ grub__change_strategy.chdir | default(omit) }}" - cmd: "{{ grub__change_strategy.cmd }}" - creates: "{{ grub__change_strategy.creates | default(omit) }}" - removes: "{{ grub__change_strategy.removes | default(omit) }}" - stdin: "{{ grub__change_strategy.stdin | default(omit) }}" - stdin_add_newline: "{{ grub__change_strategy.stdin_add_newline | default(omit) }}" - strip_empty_ends: "{{ grub__change_strategy.strip_empty_ends | default(omit) }}" + chdir: "{{ item.chdir | default(omit) }}" + cmd: "{{ item.cmd }}" + creates: "{{ item.creates | default(omit) }}" + removes: "{{ item.removes | default(omit) }}" + stdin: "{{ item.stdin | default(omit) }}" + stdin_add_newline: "{{ item.stdin_add_newline | default(omit) }}" + strip_empty_ends: "{{ item.strip_empty_ends | default(omit) }}" + loop: "{{ grub__change_strategy.cmds | default([grub__change_strategy]) }}" changed_when: true register: grub__apply_changes_command listen: "grub__apply_changes" diff --git a/roles/grub/meta/main.yml b/roles/grub/meta/main.yml new file mode 100644 index 0000000..bd2b691 --- /dev/null +++ b/roles/grub/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: aybarsm.helper.ansible \ No newline at end of file diff --git a/roles/grub/tasks/grub.yml b/roles/grub/tasks/grub.yml index 944f544..acad5ba 100644 --- a/roles/grub/tasks/grub.yml +++ b/roles/grub/tasks/grub.yml @@ -4,7 +4,7 @@ ansible.builtin.template: src: "{{ grub__template }}" dest: "{{ grub__file }}" - backup: "{{ grub__backup | default(omit) | bool }}" + backup: "{{ grub__backup | default(omit) }}" mode: "0644" when: - grub__all | default([]) | type_debug == 'list' diff --git a/roles/network/defaults/main.yml b/roles/network/defaults/main.yml index 10e40e8..0983b56 100644 --- a/roles/network/defaults/main.yml +++ b/roles/network/defaults/main.yml @@ -5,6 +5,8 @@ network__manage_interfaces: false network__manage_sysctl: false network__manage_hostname: false network__manage_hosts: false +network__manage_iptables: false +network__manage_udev_rules: false network__default: [] network__group: [] @@ -26,7 +28,8 @@ network__systemd_template: etc/systemd/network/unit.j2 network__systemd_change_strategy: # Available modules: systemd_service # Set module other than available options to disable applying changes - module: systemd_service + # module: systemd_service + module: None # immediate can be true or false (Flushes the handlers immediately) immediate: false # Use name for service or systemd_service module @@ -36,8 +39,13 @@ network__systemd_change_strategy: ##### BEGIN: network interfaces vars network__interfaces_file: /etc/network/interfaces +network__interfacesd_dir: /etc/network/interfaces.d # Keep the loopback interface in the file network__interfaces_keep_lo: true +# Keep the source in the file +network__interfaces_keep_source: true +# Cleanup the interfaces.d directory +network__interfacesd_cleanup: false # The location of the source line in the file (controversial topic) network__interfaces_source_position: bottom network__interfaces_source_line: "source /etc/network/interfaces.d/*" @@ -56,7 +64,8 @@ network__interfaces_dhcp_excludes: [ network__interfaces_change_strategy: # Available modules: service, systemd_service, or command # Set module other than available options to disable applying changes - module: service + # module: service + module: None # immediate can be true or false (Flushes the handlers immediately) immediate: false # Use name for service or systemd_service module @@ -87,4 +96,11 @@ network__hosts_backup: true network__hosts_auto_discovery: false # Consult https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html for more information network__hosts_auto_discovery_inventories: 'webservers:&atlanta' -##### END: network hosts vars \ No newline at end of file +##### END: network hosts vars + +##### BEGIN: network udev rules vars +network__udev_rules_file: /etc/udev/rules.d/70-persistent-net.rules +network__udev_rules_template: etc/udev/rules.d/70-persistent-net.rules.j2 +network__udev_rules_backup: true +network__udev_rules_remove: false +##### END: network udev rules vars \ No newline at end of file diff --git a/roles/network/handlers/main.yml b/roles/network/handlers/main.yml index bba5575..f5f278a 100644 --- a/roles/network/handlers/main.yml +++ b/roles/network/handlers/main.yml @@ -2,13 +2,13 @@ - name: Apply network interfaces changes via systemd service module become: true ansible.builtin.systemd_service: - daemon_reexec: "{{ service.daemon_reexec | default(omit) | bool }}" - daemon_reload: "{{ service.daemon_reload | default(omit) | bool }}" - enabled: "{{ service.enabled | default(omit) | bool }}" - force: "{{ service.force | default(omit) | bool }}" - masked: "{{ service.masked | default(omit) | bool }}" + daemon_reexec: "{{ service.daemon_reexec | default(omit) }}" + daemon_reload: "{{ service.daemon_reload | default(omit) }}" + enabled: "{{ service.enabled | default(omit) }}" + force: "{{ service.force | default(omit) }}" + masked: "{{ service.masked | default(omit) }}" name: "{{ service.name }}" - no_block : "{{ service.no_block | default(omit) | bool }}" + no_block : "{{ service.no_block | default(omit) }}" scope: "{{ service.scope | default(omit) }}" state: "{{ service.state | default(omit) }}" vars: @@ -41,13 +41,14 @@ - name: Apply network interfaces changes via command module become: true ansible.builtin.command: - chdir: "{{ network__interfaces_change_strategy.chdir | default(omit) }}" - cmd: "{{ network__interfaces_change_strategy.cmd }}" - creates: "{{ network__interfaces_change_strategy.creates | default(omit) }}" - removes: "{{ network__interfaces_change_strategy.removes | default(omit) }}" - stdin: "{{ network__interfaces_change_strategy.stdin | default(omit) }}" - stdin_add_newline: "{{ network__interfaces_change_strategy.stdin_add_newline | default(omit) }}" - strip_empty_ends: "{{ network__interfaces_change_strategy.strip_empty_ends | default(omit) }}" + chdir: "{{ item.chdir | default(omit) }}" + cmd: "{{ item.cmd }}" + creates: "{{ item.creates | default(omit) }}" + removes: "{{ item.removes | default(omit) }}" + stdin: "{{ item.stdin | default(omit) }}" + stdin_add_newline: "{{ item.stdin_add_newline | default(omit) }}" + strip_empty_ends: "{{ item.strip_empty_ends | default(omit) }}" + loop: "{{ network__interfaces_change_strategy.cmds | default([network__interfaces_change_strategy]) }}" changed_when: true register: network__interfaces_apply_changes_command listen: "network__interfaces_apply_changes" @@ -58,13 +59,13 @@ - name: Apply systemd network changes via systemd service module become: true ansible.builtin.systemd_service: - daemon_reexec: "{{ service.daemon_reexec | default(omit) | bool }}" - daemon_reload: "{{ service.daemon_reload | default(omit) | bool }}" - enabled: "{{ service.enabled | default(omit) | bool }}" - force: "{{ service.force | default(omit) | bool }}" - masked: "{{ service.masked | default(omit) | bool }}" + daemon_reexec: "{{ service.daemon_reexec | default(omit) }}" + daemon_reload: "{{ service.daemon_reload | default(omit) }}" + enabled: "{{ service.enabled | default(omit) }}" + force: "{{ service.force | default(omit) }}" + masked: "{{ service.masked | default(omit) }}" name: "{{ service.name }}" - no_block : "{{ service.no_block | default(omit) | bool }}" + no_block : "{{ service.no_block | default(omit) }}" scope: "{{ service.scope | default(omit) }}" state: "{{ service.state | default(omit) }}" vars: @@ -73,4 +74,21 @@ listen: "network__systemd_apply_changes" when: - network__systemd_change_strategy.module is defined - - network__systemd_change_strategy.module == 'systemd_service' \ No newline at end of file + - network__systemd_change_strategy.module == 'systemd_service' + +- name: Apply systemd network changes via command module + become: true + ansible.builtin.command: + chdir: "{{ item.chdir | default(omit) }}" + cmd: "{{ item.cmd }}" + creates: "{{ item.creates | default(omit) }}" + removes: "{{ item.removes | default(omit) }}" + stdin: "{{ item.stdin | default(omit) }}" + stdin_add_newline: "{{ item.stdin_add_newline | default(omit) }}" + strip_empty_ends: "{{ item.strip_empty_ends | default(omit) }}" + loop: "{{ network__systemd_change_strategy.cmds | default([network__systemd_change_strategy]) }}" + register: network__systemd_apply_changes_command + listen: "network__systemd_apply_changes" + when: + - network__systemd_change_strategy.module is defined + - network__systemd_change_strategy.module == 'command' \ No newline at end of file diff --git a/roles/network/meta/main.yml b/roles/network/meta/main.yml index e69de29..bd2b691 100644 --- a/roles/network/meta/main.yml +++ b/roles/network/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: aybarsm.helper.ansible \ No newline at end of file diff --git a/roles/network/tasks/hosts.yml b/roles/network/tasks/hosts.yml index 7d67f65..f754ce8 100644 --- a/roles/network/tasks/hosts.yml +++ b/roles/network/tasks/hosts.yml @@ -4,6 +4,6 @@ ansible.builtin.template: src: "{{ network__hosts_template }}" dest: "{{ network__hosts_file }}" - backup: "{{ network__hosts_backup | default(omit) | bool }}" + backup: "{{ network__hosts_backup | default(omit) }}" when: (network__hosts_all_ipv4 | default([]) | length > 0) or (network__hosts_all_ipv6 | default([]) | length > 0) register: network__hosts_deploy diff --git a/roles/network/tasks/interfaces.yml b/roles/network/tasks/interfaces.yml index b98ec2e..87edd18 100644 --- a/roles/network/tasks/interfaces.yml +++ b/roles/network/tasks/interfaces.yml @@ -1,14 +1,34 @@ --- +- name: Cleanup network interfaces.d directory + block: + - name: Find network interfaces.d cleanup files + become: true + ansible.builtin.find: + paths: "{{ network__interfacesd_dir }}" + hidden: true + recurse: true + file_type: any + register: network__interfacesd_find_cleanup_files + + - name: Remove network interfaces.d cleanup files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ network__interfacesd_find_cleanup_files.files | map(attribute='path') }}" + register: network__interfacesd_cleanup_files + notify: "network__interfaces_apply_changes" + when: network__interfacesd_find_cleanup_files.files | default([]) | length > 0 + when: network__interfacesd_cleanup | default(false) | bool + - name: Deploy network interfaces file configuration become: true ansible.builtin.template: src: "{{ network__interfaces_template }}" dest: "{{ network__interfaces_file }}" - backup: "{{ network__interfaces_backup | default(omit) | bool }}" + backup: "{{ network__interfaces_backup | default(omit) }}" mode: "0644" - when: - - network__interfaces_all | type_debug == 'list' - - network__interfaces_all | length > 0 + when: network__interfaces_all | default([]) | length > 0 register: network__interfaces_deploy notify: "network__interfaces_apply_changes" diff --git a/roles/network/tasks/iptables.yml b/roles/network/tasks/iptables.yml new file mode 100644 index 0000000..d607d41 --- /dev/null +++ b/roles/network/tasks/iptables.yml @@ -0,0 +1,53 @@ +--- +- name: Deploy iptables configuration + become: true + ansible.builtin.iptables: + action: "{{ item.action | default(omit) }}" + chain: "{{ item.chain | default(omit) }}" + chain_management: "{{ item.chain_management | default(omit) }}" + comment: "{{ item.comment | default(omit) }}" + ctstate: "{{ item.ctstate | default(omit) }}" + destination: "{{ item.destination | default(omit) }}" + destination_port: "{{ item.destination_port | default(omit) }}" + destination_ports: "{{ item.destination_ports | default(omit) }}" + dst_range: "{{ item.dst_range | default(omit) }}" + flush: "{{ item.flush | default(omit) }}" + fragment: "{{ item.fragment | default(omit) }}" + gateway: "{{ item.gateway | default(omit) }}" + gid_owner: "{{ item.gid_owner | default(omit) }}" + go†o: "{{ item.goto | default(omit) }}" + icmp_type: "{{ item.icmp_type | default(omit) }}" + in_interface: "{{ item.in_interface | default(omit) }}" + ip_version: "{{ item.ip_version | default(omit) }}" + jump: "{{ item.jump | default(omit) }}" + limit: "{{ item.limit | default(omit) }}" + limit_burst: "{{ item.limit_burst | default(omit) }}" + log_level: "{{ item.log_level | default(omit) }}" + log_prefix: "{{ item.log_prefix | default(omit) }}" + match: "{{ item.match | default(omit) }}" + match_set: "{{ item.match_set | default(omit) }}" + match_set_flags: "{{ item.match_set_flags | default(omit) }}" + numeric: "{{ item.numeric | default(omit) }}" + out_interface: "{{ item.out_interface | default(omit) }}" + policy: "{{ item.policy | default(omit) }}" + protocol: "{{ item.protocol | default(omit) }}" + reject_with: "{{ item.reject_with | default(omit) }}" + rule_num: "{{ item.rule_num | default(omit) }}" + set_counters: "{{ item.set_counters | default(omit) }}" + set_dscp_mark: "{{ item.set_dscp_mark | default(omit) }}" + set_dscp_mark_class: "{{ item.set_dscp_mark_class | default(omit) }}" + source: "{{ item.source | default(omit) }}" + source_port: "{{ item.source_port | default(omit) }}" + src_range: "{{ item.src_range | default(omit) }}" + state: "{{ item.state | default(omit) }}" + syn: "{{ item.syn | default(omit) }}" + table: "{{ item.table | default(omit) }}" + tcp_flags: "{{ item.tcp_flags | default(omit) }}" + to_destination: "{{ item.to_destination | default(omit) }}" + to_ports: "{{ item.to_ports | default(omit) }}" + to_source: "{{ item.to_source | default(omit) }}" + uid_owner: "{{ item.uid_owner | default(omit) }}" + wait: "{{ item.wait | default(omit) }}" + loop: "{{ network__iptables_all }}" + register: network__iptables_deploy + when: network__iptables_all | default([]) | length > 0 diff --git a/roles/network/tasks/main.yml b/roles/network/tasks/main.yml index 4b34415..ed30c4c 100644 --- a/roles/network/tasks/main.yml +++ b/roles/network/tasks/main.yml @@ -11,6 +11,13 @@ - network__role_enabled | default(false) | bool - network__manage_systemd | default(false) | bool +- name: Include udev rules tasks + ansible.builtin.include_tasks: + file: udev_rules.yml + when: + - network__role_enabled | default(false) | bool + - network__manage_udev_rules | default(false) | bool + - name: Include interfaces tasks ansible.builtin.include_tasks: file: interfaces.yml @@ -37,4 +44,11 @@ file: hosts.yml when: - network__role_enabled | default(false) | bool - - network__manage_hosts | default(false) | bool \ No newline at end of file + - network__manage_hosts | default(false) | bool + +- name: Include iptables tasks + ansible.builtin.include_tasks: + file: iptables.yml + when: + - network__role_enabled | default(false) | bool + - network__manage_iptables | default(false) | bool \ No newline at end of file diff --git a/roles/network/tasks/sysctl.yml b/roles/network/tasks/sysctl.yml index 2c5dbf5..27d95a3 100644 --- a/roles/network/tasks/sysctl.yml +++ b/roles/network/tasks/sysctl.yml @@ -4,11 +4,11 @@ ansible.posix.sysctl: name: "{{ item.name }}" value: "{{ item.value }}" - ignoreerrors: "{{ item.ignoreerrors | default(omit) | bool }}" - reload: "{{ item.reload | default(omit) | bool }}" + ignoreerrors: "{{ item.ignoreerrors | default(omit) }}" + reload: "{{ item.reload | default(omit) }}" state: "{{ item.state | default(omit) }}" sysctl_file: "{{ item.sysctl_file | default(omit) }}" - sysctl_set: "{{ item.sysctl_set | default(omit) | bool }}" + sysctl_set: "{{ item.sysctl_set | default(omit) }}" loop: "{{ network__sysctl_all }}" register: network__sysctl_deploy when: diff --git a/roles/network/tasks/systemd.yml b/roles/network/tasks/systemd.yml index 1698768..98a5104 100644 --- a/roles/network/tasks/systemd.yml +++ b/roles/network/tasks/systemd.yml @@ -1,38 +1,37 @@ --- -- name: Find unexpected systemd-network unit files - become: true - ansible.builtin.find: - paths: "{{ network__systemd_dir }}" - patterns: "{{ network__systemd_cleanup_patterns | default(omit) }}" - use_regex: "{{ network__systemd_cleanup_use_regex | default(omit) | bool }}" - register: network__systemd_find_cleanup_files - when: network__systemd_cleanup | default(false) | bool +#TODO: udev persistent rules interfering with link files, need to be fixed +- name: Cleanup unexpected systemd-network unit files + block: + - name: Find unexpected systemd-network unit files + become: true + ansible.builtin.find: + paths: "{{ network__systemd_dir }}" + patterns: "{{ network__systemd_cleanup_patterns | default(omit) }}" + use_regex: "{{ network__systemd_cleanup_use_regex | default(omit) }}" + register: network__systemd_find_cleanup_files -- name: Remove unexpected systemd-network unit files - become: true - ansible.builtin.file: - path: "{{ item }}" - state: absent - loop: "{{ network__systemd_find_cleanup_files.files | map(attribute='path') }}" - register: network__systemd_cleanup_files - notify: network__systemd_apply_changes - when: - - network__systemd_cleanup | default(false) | bool - - network__systemd_find_cleanup_files.files | length > 0 + - name: Remove unexpected systemd-network unit files + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ network__systemd_find_cleanup_files.files | map(attribute='path') }}" + register: network__systemd_cleanup_files + notify: "network__systemd_apply_changes" + when: network__systemd_find_cleanup_files.files | default([]) | length > 0 + when: network__systemd_cleanup | default(false) | bool - name: Deploy systemd-network unit files become: true ansible.builtin.template: src: "{{ network__systemd_template }}" dest: "{{ network__systemd_dir }}/{{ item.name }}" - backup: "{{ network__systemd_backup | default(omit) | bool }}" + backup: "{{ network__systemd_backup | default(omit) }}" mode: "0644" loop: "{{ network__systemd_all }}" register: network__systemd_deploy - notify: network__systemd_apply_changes - when: - - network__systemd_all | type_debug == 'list' - - network__systemd_all | length > 0 + notify: "network__systemd_apply_changes" + when: network__systemd_all | default([]) | length > 0 - name: Apply network systemd changes ansible.builtin.meta: 'flush_handlers' diff --git a/roles/network/tasks/udev_rules.yml b/roles/network/tasks/udev_rules.yml new file mode 100644 index 0000000..01d3f0b --- /dev/null +++ b/roles/network/tasks/udev_rules.yml @@ -0,0 +1,22 @@ +--- +#TODO: Implement handler for udev rules +- name: Ensure udev rules network configuration is removed + become: true + ansible.builtin.file: + path: "{{ network__udev_rules_file }}" + state: absent + register: network__udev_rules_remove + when: network__udev_rules_remove | default(false) | bool + +- name: Deploy udev rules configuration + become: true + ansible.builtin.template: + src: "{{ network__udev_rules_template }}" + dest: "{{ network__udev_rules_file }}" + backup: "{{ network__udev_rules_backup | default(omit) }}" + mode: "0644" + register: network__udev_rules_deploy + when: + - not network__udev_rules_remove | default(false) | bool + - network__udev_rules_all | default([]) | length > 0 + # notify: "network__interfaces_apply_changes" diff --git a/roles/network/templates/etc/network/interfaces.j2 b/roles/network/templates/etc/network/interfaces.j2 index ded3602..4bb1608 100644 --- a/roles/network/templates/etc/network/interfaces.j2 +++ b/roles/network/templates/etc/network/interfaces.j2 @@ -1,6 +1,6 @@ {{ ansible_managed | comment }} -{% if network__interfaces_source_position == 'top' %} +{% if network__interfaces_keep_source and network__interfaces_source_position == 'top' %} {{ network__interfaces_source_line }} {% endif %} @@ -43,6 +43,6 @@ iface {{ iface.name }} {{ addr_family }} {{ ifaceMethod }} {% endif %} {% endfor %} {% endfor %} -{% if network__interfaces_source_position != 'top' %} +{% if network__interfaces_keep_source and network__interfaces_source_position != 'top' %} {{ network__interfaces_source_line }} {% endif %} \ No newline at end of file diff --git a/roles/network/templates/etc/udev/rules.d/70-persistent-net.rules.j2 b/roles/network/templates/etc/udev/rules.d/70-persistent-net.rules.j2 new file mode 100644 index 0000000..f5002ec --- /dev/null +++ b/roles/network/templates/etc/udev/rules.d/70-persistent-net.rules.j2 @@ -0,0 +1,5 @@ +{{ ansible_managed | comment }} + +{% for rule in network__udev_rules_all %} +{{ rule.entries | aybarsm.helper.to_querystring('name', 'value', '=="', '", ') }}" +{% endfor %} \ No newline at end of file diff --git a/roles/network/vars/main.yml b/roles/network/vars/main.yml index 96cc4f8..37cd74d 100644 --- a/roles/network/vars/main.yml +++ b/roles/network/vars/main.yml @@ -1,14 +1,25 @@ ##### BEGIN: network systemd vars -__network_systemd_available_change_modules: ['systemd_service'] - +__network_systemd_available_change_modules: ['systemd_service', 'command'] +__network__systemd_link_name: '{"type":"systemd","name":"__SYSTEMD_LINK_NAME__","children":[{"name":"Match","children":[{"name":"MACAddress","value":"__MAC_ADDRESS__"},{"name":"Type","value":"ether"}]},{"name":"Link","children":[{"name":"NamePolicy","value":""},{"name":"Name","value":"__LINK_NAME__"}]}]}' # REVIEW: There is room for optimisation __network__systemd_configs: >- - {%- set __configs_prepared = [] -%} - {%- for config in [network__default, network__group, network__host] if config -%} - {%- set __configs_prepared = __configs_prepared.append(config | selectattr('type', 'defined') | selectattr('type', 'equalto', 'systemd') | + {%- set __cfgs = [] -%} + {%- for cfg in [network__default, network__group, network__host] if cfg -%} + {%- set __cfgs_grp = (cfg | selectattr('type', 'defined') | selectattr('type', 'eq', 'systemd') | selectattr('name', 'defined') | selectattr('name', 'search', '\\.(network|link|netdev)$') | selectattr('children', 'defined')) -%} + {%- set __cfgs_rnm = (cfg | selectattr('type', 'defined') | selectattr('type', 'eq', 'systemd_link_rename') | + selectattr('name', 'defined') | selectattr('name', 'search', '^(?:[0-9]|[1-9][0-9])-[a-zA-Z]+\\d+\\.link$') | + selectattr('macaddress', 'defined') | selectattr('macaddress', 'ansible.utils.mac') | default([])) -%} + {%- if __cfgs_rnm | length > 0 -%} + {%- for cfgrnm in __cfgs_rnm if cfgrnm -%} + {%- set __cfgs_grp = __cfgs_grp.append(__network__systemd_link_name | + replace('__SYSTEMD_LINK_NAME__', cfgrnm.name) | replace('__MAC_ADDRESS__', cfgrnm.macaddress) | + replace('__LINK_NAME__', (cfgrnm.name | regex_replace('^(?:[0-9]|[1-9][0-9])-(.*).link$', '\\1'))) | from_json) -%} + {%- endfor -%} + {%- endif -%} + {%- set __cfgs = __cfgs.append(__cfgs_grp) -%} {%- endfor -%} - {{ __configs_prepared }} + {{ __cfgs }} network__systemd_all: "{{ __network__systemd_configs | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') | @@ -35,7 +46,7 @@ __network_interfaces_available_change_modules: ['service', 'systemd_service', 'c __network__interfaces_configs: >- {%- set __configs_prepared = [] -%} {%- for config in [network__default, network__group, network__host] if config -%} - {%- set __configs_prepared = __configs_prepared.append(config | selectattr('type', 'defined') | selectattr('type', 'equalto', 'interface') | selectattr('name', 'defined')) -%} + {%- set __configs_prepared = __configs_prepared.append(config | selectattr('type', 'defined') | selectattr('type', 'eq', 'interface') | selectattr('name', 'defined')) -%} {%- endfor -%} {{ __configs_prepared }} @@ -79,4 +90,15 @@ network__hosts_all: "{{ (network__host + network__group + network__default + __n # Sort hosts by hostname to avoid unneccessary changes network__hosts_all_ipv4: "{{ network__hosts_all | selectattr('ip', 'ansible.utils.ipv4') | sort(attribute='hostname') }}" network__hosts_all_ipv6: "{{ network__hosts_all | selectattr('ip', 'ansible.utils.ipv6') | sort(attribute='hostname') }}" -##### END: network hosts vars \ No newline at end of file +##### END: network hosts vars + +##### BEGIN: network iptables vars +network__iptables_all: "{{ (network__host + network__group + network__default) | + selectattr('type', 'defined') | selectattr('type', 'equalto', 'iptables') }}" +##### END: network iptables vars + +##### BEGIN: network udev rules vars +network__udev_rules_all: "{{ (network__host + network__group + network__default) | + selectattr('type', 'defined') | selectattr('type', 'equalto', 'udev_rule') | + selectattr('entries', 'defined') }}" +##### END: network udev rules vars \ No newline at end of file diff --git a/roles/package_manager/defaults/main.yml b/roles/package_manager/defaults/main.yml index 70fae3e..27a2259 100644 --- a/roles/package_manager/defaults/main.yml +++ b/roles/package_manager/defaults/main.yml @@ -14,7 +14,8 @@ package_manager__package_strategy: specific # Upgrade the packages on the system either always, never or once # Once option stores the state of the upgrade in host with ansible facts # Once option requires ansible__role_enabled and ansible__manage_local_facts to be enabled -package_manager__upgrade_strategy: once +# package_manager__upgrade_strategy: once +package_manager__upgrade_strategy: never # Upgrade Mode: # Available options for APT: full, clean_full, safe, clean_safe, yes, clean_yes, dist, clean_dist diff --git a/roles/package_manager/meta/main.yml b/roles/package_manager/meta/main.yml index 1bccfcc..bd2b691 100644 --- a/roles/package_manager/meta/main.yml +++ b/roles/package_manager/meta/main.yml @@ -1,3 +1,2 @@ -allow_duplicates: true dependencies: - - role: aybarsm.linux.ansible \ No newline at end of file + - role: aybarsm.helper.ansible \ No newline at end of file diff --git a/roles/package_manager/tasks/deb_packages.yml b/roles/package_manager/tasks/deb_packages.yml index 8276e75..285dce9 100644 --- a/roles/package_manager/tasks/deb_packages.yml +++ b/roles/package_manager/tasks/deb_packages.yml @@ -3,32 +3,29 @@ become: true ansible.builtin.apt: name: "{{ item.name }}" - allow_change_held_packages: "{{ item.allow_change_held_packages | default(omit) | bool }}" - allow_downgrade: "{{ item.allow_downgrade | default(omit) | bool }}" - allow_unauthenticated: "{{ item.allow_unauthenticated | default(omit) | bool }}" - autoclean: "{{ item.autoclean | default(omit) | bool }}" - autoremove: "{{ item.autoremove | default(omit) | bool }}" + allow_change_held_packages: "{{ item.allow_change_held_packages | default(omit) }}" + allow_downgrade: "{{ item.allow_downgrade | default(omit) }}" + allow_unauthenticated: "{{ item.allow_unauthenticated | default(omit) }}" + autoclean: "{{ item.autoclean | default(omit) }}" + autoremove: "{{ item.autoremove | default(omit) }}" cache_valid_time: "{{ item.cache_valid_time | default(omit) }}" - clean: "{{ item.clean | default(omit) | bool }}" + clean: "{{ item.clean | default(omit) }}" deb: "{{ item.deb | default(omit) }}" default_release: "{{ item.default_release | default(omit) }}" dpkg_options: "{{ item.dpkg_options | default(omit) }}" - fail_on_autoremove: "{{ item.fail_on_autoremove | default(omit) | bool }}" - force: "{{ item.force | default(omit) | bool }}" - force_apt_get: "{{ item.force_apt_get | default(omit) | bool }}" - install_recommends: "{{ item.install_recommends | default(omit) | bool }}" + fail_on_autoremove: "{{ item.fail_on_autoremove | default(omit) }}" + force: "{{ item.force | default(omit) }}" + force_apt_get: "{{ item.force_apt_get | default(omit) }}" + install_recommends: "{{ item.install_recommends | default(omit) }}" lock_timeout: "{{ item.lock_timeout | default(omit) }}" - only_upgrade: "{{ item.only_upgrade | default(omit) | bool }}" + only_upgrade: "{{ item.only_upgrade | default(omit) }}" policy_rc_d: "{{ item.policy_rc_d | default(omit) }}" - purge: "{{ item.purge | default(omit) | bool }}" + purge: "{{ item.purge | default(omit) }}" state: "{{ item.state | default(omit) }}" - update_cache: "{{ item.update_cache | default(omit) | bool }}" + update_cache: "{{ item.update_cache | default(omit) }}" update_cache_retries: "{{ item.update_cache_retries | default(omit) }}" update_cache_retry_max_delay: "{{ item.update_cache_retry_max_delay | default(omit) }}" upgrade: "{{ item.upgrade | default(omit) }}" loop: "{{ package_manager__packages_all }}" - register: package_manager__packages_deb_apply - when: - - package_manager__manage_packages | default(false) | bool - - package_manager__packages_all | type_debug == 'list' - - package_manager__packages_all | length > 0 + register: package_manager__packages_deb_deploy + when: package_manager__packages_all | default([]) | length > 0 diff --git a/roles/package_manager/tasks/deb_repo_keys.yml b/roles/package_manager/tasks/deb_repo_keys.yml index 94934b5..c0bba25 100644 --- a/roles/package_manager/tasks/deb_repo_keys.yml +++ b/roles/package_manager/tasks/deb_repo_keys.yml @@ -7,7 +7,7 @@ file: "{{ item.file | default(omit) }}" keyring: "{{ item.keyring | default(omit) }}" url: "{{ item.url | default(omit) }}" - validate_certs: "{{ item.validate_certs | default(omit) | bool }}" + validate_certs: "{{ item.validate_certs | default(omit) }}" loop: "{{ package_manager__repo_keys_all }}" register: package_manager__repo_keys_deb_apply when: diff --git a/roles/package_manager/tasks/deb_repos.yml b/roles/package_manager/tasks/deb_repos.yml index 3c4e737..5fc6405 100644 --- a/roles/package_manager/tasks/deb_repos.yml +++ b/roles/package_manager/tasks/deb_repos.yml @@ -6,12 +6,12 @@ state: "{{ item.state | default(omit) }}" codename: "{{ item.codename | default(omit) }}" filename: "{{ item.filename | default(omit) }}" - install_python_apt: "{{ item.install_python_apt | default(omit) | bool }}" + install_python_apt: "{{ item.install_python_apt | default(omit) }}" mode: "{{ item.mode | default(omit) }}" - update_cache: "{{ item.update_cache | default(omit) | bool }}" + update_cache: "{{ item.update_cache | default(omit) }}" update_cache_retries: "{{ item.update_cache_retries | default(omit) }}" update_cache_retry_max_delay: "{{ item.update_cache_retry_max_delay | default(omit) }}" - validate_certs: "{{ item.validate_certs | default(omit) | bool }}" + validate_certs: "{{ item.validate_certs | default(omit) }}" loop: "{{ package_manager__repos_all }}" register: package_manager__repos_deb_apply when: diff --git a/roles/package_manager/tasks/rpm_packages.yml b/roles/package_manager/tasks/rpm_packages.yml index 27ccb08..2e4792e 100644 --- a/roles/package_manager/tasks/rpm_packages.yml +++ b/roles/package_manager/tasks/rpm_packages.yml @@ -3,37 +3,37 @@ become: true ansible.builtin.dnf: name: "{{ item.name }}" - allow_downgrade: "{{ item.allow_downgrade | default(omit) | bool }}" - allowerasing: "{{ item.allowerasing | default(omit) | bool }}" - autoremove: "{{ item.autoremove | default(omit) | bool }}" - best: "{{ item.best | default(omit) | bool }}" - bugfix: "{{ item.bugfix | default(omit) | bool }}" - cacheonly: "{{ item.cacheonly | default(omit) | bool }}" + allow_downgrade: "{{ item.allow_downgrade | default(omit) }}" + allowerasing: "{{ item.allowerasing | default(omit) }}" + autoremove: "{{ item.autoremove | default(omit) }}" + best: "{{ item.best | default(omit) }}" + bugfix: "{{ item.bugfix | default(omit) }}" + cacheonly: "{{ item.cacheonly | default(omit) }}" conf_file: "{{ item.conf_file | default(omit) }}" disable_excludes: "{{ item.disable_excludes | default(omit) }}" - disable_gpg_check: "{{ item.disable_gpg_check | default(omit) | bool }}" + disable_gpg_check: "{{ item.disable_gpg_check | default(omit) }}" disable_plugin: "{{ item.disable_plugin | default(omit) }}" disablerepo: "{{ item.disablerepo | default(omit) }}" download_dir: "{{ item.download_dir | default(omit) }}" - download_only: "{{ item.download_only | default(omit) | bool }}" + download_only: "{{ item.download_only | default(omit) }}" enable_plugin: "{{ item.enable_plugin | default(omit) }}" enablerepo: "{{ item.enablerepo | default(omit) }}" exclude: "{{ item.exclude | default(omit) }}" - install_repoquery: "{{ item.install_repoquery | default(omit) | bool }}" - install_weak_deps: "{{ item.install_weak_deps | default(omit) | bool }}" + install_repoquery: "{{ item.install_repoquery | default(omit) }}" + install_weak_deps: "{{ item.install_weak_deps | default(omit) }}" installroot: "{{ item.installroot | default(omit) }}" list: "{{ item.list | default(omit) }}" lock_timeout: "{{ item.lock_timeout | default(omit) }}" - nobest: "{{ item.nobest | default(omit) | bool }}" + nobest: "{{ item.nobest | default(omit) }}" releasever: "{{ item.releasever | default(omit) }}" - security: "{{ item.security | default(omit) | bool }}" - skip_broken: "{{ item.skip_broken | default(omit) | bool }}" - sslverify: "{{ item.sslverify | default(omit) | bool }}" + security: "{{ item.security | default(omit) }}" + skip_broken: "{{ item.skip_broken | default(omit) }}" + sslverify: "{{ item.sslverify | default(omit) }}" state: "{{ item.state | default(omit) }}" - update_cache: "{{ item.update_cache | default(omit) | bool }}" - update_only: "{{ item.update_only | default(omit) | bool }}" + update_cache: "{{ item.update_cache | default(omit) }}" + update_only: "{{ item.update_only | default(omit) }}" use_backend: "{{ item.use_backend | default(omit) }}" - validate_certs: "{{ item.validate_certs | default(omit) | bool }}" + validate_certs: "{{ item.validate_certs | default(omit) }}" loop: "{{ package_manager__packages_all }}" register: package_manager__packages_rpm_apply when: diff --git a/roles/package_manager/tasks/upgrade.yml b/roles/package_manager/tasks/upgrade.yml index 21fd495..fc1933f 100644 --- a/roles/package_manager/tasks/upgrade.yml +++ b/roles/package_manager/tasks/upgrade.yml @@ -20,7 +20,7 @@ loop: "{{ __package_manager__repo_list_defaults_copy }}" register: package_manager__upgrade_clean_default_repo_list_copies notify: "package_manager__update_repo_cache" - when: + when: - __package_manager__upgrade_clean | default(false) | bool - __package_manager__repo_list_defaults_copy | length > 0 @@ -49,7 +49,7 @@ - name: Update package manager cache after repo list changes (Clean Upgrade) ansible.builtin.meta: 'flush_handlers' when: package_manager__upgrade_clean_default_repo_list_copies.changed or package_manager__upgrade_clean_default_repo_list_templates.changed - + - name: Perform DEB package upgrade via APT become: true ansible.builtin.apt: @@ -82,7 +82,7 @@ state: latest register: package_manager__rpm_upgrade when: ansible_os_family | lower == 'redhat' - + - name: Include update local facts tasks when upgrade is successful ansible.builtin.include_role: name: aybarsm.linux.ansible diff --git a/roles/proxmox/defaults/main.yml b/roles/proxmox/defaults/main.yml index 4f03e9c..6efb32d 100644 --- a/roles/proxmox/defaults/main.yml +++ b/roles/proxmox/defaults/main.yml @@ -1,3 +1,4 @@ +##### BEGIN: proxmox per host/node configuration proxmox__role_enabled: false proxmox__manage_repo_keys: false @@ -6,16 +7,26 @@ proxmox__manage_packages: false proxmox__manage_grub: false proxmox__manage_hostname: false proxmox__manage_hosts: false +proxmox__manage_iptables: false # This option force root user to generate ssh key and distribute to all hosts within the cluster proxmox__manage_root_access: false proxmox__manage_sshd_config: false proxmox__manage_ssh_config: false proxmox__manage_cluster: false - -# Hostname and FQDN configuration proxmox__hostname: "{{ inventory_hostname_short }}" proxmox__domain: local +# Ip addresses for the cluster links for the host +# i.e. proxmox__cluster_links: ['10.0.0.2', 'fd00::2'] +# If more than one ip provided, the first link ip will be used as the cluster ip for the hosts file +proxmox__cluster_links: [] +proxmox__ssh_port: 22 +proxmox__ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com" +proxmox__remove_subscription_warning: false +#TODO: pveproxy configuration -> /etc/default/pveproxy +##### END: proxmox per host/node configuration + +##### BEGIN: proxmox shared configuration # Cluster configuration # For target inventory specs, consult https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html for more information # If init node not provided, the init node will be designated as the first node regarding ascending sorted of inventory_hostname @@ -23,18 +34,20 @@ proxmox__domain: local # proxmox__clusters: # - name: 'pve-london01' # target: 'proxmox:&london' -# - name: 'pve-atlanta01' -# target: 'proxmox:&atlanta' -# init: 'pve01-atlanta01' +# manage_pools: true +# manage_roles: true +# manage_groups: false +# manage_users: false +# manage_acls: false +# - name: 'pve-atlanta01'# (required) +# target: 'proxmox:&atlanta'# (required) +# init: 'pve01-atlanta01'# (optional | default: first of asc sorted inventory_hostname) +# manage_pools: true# (optional | default: false) +# manage_roles: true# (optional | default: false) +# manage_groups: false# (optional | default: false) +# manage_users: false# (optional | default: false) +# manage_acls: false# (optional | default: false) proxmox__clusters: [] -# Ip addresses for the cluster links for the host -# i.e. proxmox__cluster_links: ['10.0.0.2', 'fd00::2'] -# If more than one ip provided, the first link ip will be used as the cluster ip for the hosts file -proxmox__cluster_links: [] - -proxmox__ssh_port: 22 -proxmox__ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com" -proxmox__remove_subscription_warning: false proxmox__repo_url_enterprise: https://enterprise.proxmox.com/debian proxmox__repo_url_no_subscription: http://download.proxmox.com/debian @@ -89,4 +102,5 @@ proxmox__host: {} # version: 2.2.4-pve1 # params: 'zfs_arc_max=2147483648' # zed_email: 'noc@example.com' -# post_install: "{{ playbook_dir }}/tasks/zfs_post_install.yml" \ No newline at end of file +# post_install: "{{ playbook_dir }}/tasks/zfs_post_install.yml" +##### END: proxmox shared configuration \ No newline at end of file diff --git a/roles/proxmox/meta/main.yml b/roles/proxmox/meta/main.yml new file mode 100644 index 0000000..bd2b691 --- /dev/null +++ b/roles/proxmox/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: aybarsm.helper.ansible \ No newline at end of file diff --git a/roles/proxmox/tasks/cluster_acls.yml b/roles/proxmox/tasks/cluster_acls.yml new file mode 100644 index 0000000..09e232d --- /dev/null +++ b/roles/proxmox/tasks/cluster_acls.yml @@ -0,0 +1,11 @@ +--- +- name: Deploy Proxmox cluster ACLs + aybarsm.linux.proxmox_acl: + path: "{{ item.path }}" + roles: "{{ item.roles }}" + state: "{{ item.state | default('present') }}" + groups: "{{ item.groups | default([]) }}" + users: "{{ item.users | default([]) }}" + with_items: "{{ __proxmox__cluster.acls }}" + when: __proxmox__cluster.acls | default([]) | length > 0 + register: proxmox__cluster_acls_deploy \ No newline at end of file diff --git a/roles/proxmox/tasks/cluster_config.yml b/roles/proxmox/tasks/cluster_config.yml new file mode 100644 index 0000000..c8e6618 --- /dev/null +++ b/roles/proxmox/tasks/cluster_config.yml @@ -0,0 +1,112 @@ +--- +- name: Lookup cluster information + proxmox_query: + query: cluster/status + register: proxmox__gather_cluster_info + +- name: Identify if the host is already part of a cluster + set_fact: + __proxmox__active_cluster: '{{ proxmox__gather_cluster_info | json_query(query) }}' + when: "proxmox__gather_cluster_info | json_query(query) | ternary(true, false)" + vars: + query: "response[?type=='cluster'].name | [0]" + +- name: Identify all clusters that the hosts in the specified group may be in + set_fact: + __proxmox__found_clusters: "{{ __proxmox__found_clusters | default([]) | intersect([hostvars[item]['__proxmox__active_cluster']]) }}" + # TODO: Replace group items with proxmox cluster members + with_items: "{{ groups[pve_group] }}" + when: "'__proxmox__active_cluster' in hostvars[item]" + +- name: Ensure that hosts found are not in multiple existing clusters + assert: + that: + - "(__proxmox__found_clusters | default([]) | length) <= 1" + msg: "Some or all of the hosts in {{ pve_group }} appear to already be part of two or more different clusters, \ + please ensure groups only have hosts meant to be in one single cluster." + +- name: Ensure that, if we find an existing cluster, that it matches the specified cluster name + assert: + that: + - "__proxmox__found_clusters[0] == pve_cluster_clustername" + msg: "Some or all of the hosts in group '{{ pve_group }}' appear to be in a cluster named \ + '{{ __proxmox__found_clusters[0] }}', which differs from the specified clustername of \ + '{{ pve_cluster_clustername }}'. Please ensure the clustername is correct. An existing \ + cluster's name cannot be modified." + when: "(__proxmox__found_clusters | default([]) | length) == 1" + +- name: Default initialization node is the first node of pve_group + ansible.builtin.set_fact: + _init_node: "{{ groups[pve_group][0] }}" + +- name: Find any active node in an already initialized Proxmox cluster + ansible.builtin.set_fact: + _init_node: "{{ item }}" + with_items: "{{ groups[pve_group] }}" + when: + - "'__proxmox__active_cluster' in hostvars[item]" + - "hostvars[item]['__proxmox__active_cluster'] == pve_cluster_clustername" + +- name: Initialize a Proxmox cluster + ansible.builtin.command: >- + pvecm create {{ pve_cluster_clustername }} + -link0 {{ pve_cluster_addr0 }} + {% if pve_cluster_addr1 is defined %} + -link1 {{ pve_cluster_addr1 }} + {% endif %} + args: + creates: "{{ pve_cluster_conf }}" + when: + - "__proxmox__found_clusters is not defined" + - "inventory_hostname == _init_node" + +- name: Wait for quorum on initialization node + proxmox_query: + query: cluster/status + register: proxmox__gather_cluster_info_init + until: "(proxmox__gather_cluster_info_init | json_query(query)) == 1" + retries: 5 + delay: 5 + when: + - "inventory_hostname == _init_node" + vars: + query: "response[?type=='cluster'].quorate | [0]" + +- include_tasks: pve_add_node.yml + when: + - "__proxmox__active_cluster is not defined" + - "inventory_hostname != _init_node" + +- name: Check for PVE cluster HA groups + proxmox_query: + query: "/cluster/ha/groups" + register: _ha_group_list + when: "inventory_hostname == _init_node" + +- name: Create PVE cluster HA groups + command: >- + ha-manager groupadd {{ item.name }} + -comment "{{ item.comment | default('') }}" + -nodes "{{ item.nodes }}" + {% if 'nofailback' in item %} + -nofailback {{ item.nofailback }} + {% endif %} + {% if 'restricted' in item %} + -restricted {{ item.restricted }} + {% endif %} + when: + - "inventory_hostname == _init_node" + - item.name not in _ha_group_list.response | json_query("[*].group") + with_items: "{{ pve_cluster_ha_groups }}" + +- name: Update PVE cluster HA groups + command: >- + ha-manager groupset {{ item.0.name }} -{{ item.1 }} "{{ item.0[item.1] }}" + when: + - "inventory_hostname == _init_node" + - item.0.name in _ha_group_list.response | json_query("[*].group") + - item.1 in item.0 + - item.0[item.1] != _ha_group_list.response + | json_query("[?group=='" + item.0.name + "']." + item.1) | first + loop: "{{ pve_cluster_ha_groups + | product(['comment', 'nodes', 'nofailback', 'restricted']) | list }}" diff --git a/roles/proxmox/tasks/cluster_groups.yml b/roles/proxmox/tasks/cluster_groups.yml new file mode 100644 index 0000000..45074d7 --- /dev/null +++ b/roles/proxmox/tasks/cluster_groups.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Proxmox cluster groups + aybarsm.linux.proxmox_group: + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + comment: "{{ item.comment | default(omit) }}" + with_items: "{{ __proxmox__cluster.groups }}" + when: __proxmox__cluster.groups | default([]) | length > 0 + register: proxmox__cluster_groups_deploy \ No newline at end of file diff --git a/roles/proxmox/tasks/cluster_pools.yml b/roles/proxmox/tasks/cluster_pools.yml new file mode 100644 index 0000000..b2a68b3 --- /dev/null +++ b/roles/proxmox/tasks/cluster_pools.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Proxmox cluster pools + aybarsm.linux.proxmox_pool: + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + comment: "{{ item.comment | default(omit) }}" + with_items: "{{ __proxmox__cluster.pools }}" + when: __proxmox__cluster.pools | default([]) | length > 0 + register: proxmox__cluster_pools_deploy \ No newline at end of file diff --git a/roles/proxmox/tasks/cluster_roles.yml b/roles/proxmox/tasks/cluster_roles.yml new file mode 100644 index 0000000..67fd77e --- /dev/null +++ b/roles/proxmox/tasks/cluster_roles.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Proxmox cluster roles + aybarsm.linux.proxmox_role: + name: "{{ item.name }}" + privileges: "{{ item.privileges }}" + state: "{{ item.state | default('present') }}" + with_items: "{{ __proxmox__cluster.roles }}" + when: __proxmox__cluster.roles | default([]) | length > 0 + register: proxmox__cluster_roles_deploy \ No newline at end of file diff --git a/roles/proxmox/tasks/cluster_users.yml b/roles/proxmox/tasks/cluster_users.yml new file mode 100644 index 0000000..a555eef --- /dev/null +++ b/roles/proxmox/tasks/cluster_users.yml @@ -0,0 +1,16 @@ +--- +- name: Deploy Proxmox cluster users + aybarsm.linux.proxmox_user: + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + enable: "{{ item.enable | default(omit) }}" + groups: "{{ item.groups | default([]) }}" + comment: "{{ item.comment | default(omit) }}" + email: "{{ item.email | default(omit) }}" + firstname: "{{ item.firstname | default(omit) }}" + lastname: "{{ item.lastname | default(omit) }}" + password: "{{ item.password | default(omit) }}" + expire: "{{ item.expire | default(omit) }}" + with_items: "{{ __proxmox__cluster.users }}" + when: __proxmox__cluster.users | default([]) | length > 0 + register: proxmox__cluster_users_deploy \ No newline at end of file diff --git a/roles/proxmox/tasks/main.yml b/roles/proxmox/tasks/main.yml index f927448..e0f6a52 100644 --- a/roles/proxmox/tasks/main.yml +++ b/roles/proxmox/tasks/main.yml @@ -7,72 +7,139 @@ ansible.builtin.include_tasks: file: set_facts.yml -# - name: Inform when host not cluster eligible +- name: Inform when host not cluster eligible + ansible.builtin.debug: + msg: 'Host is not cluster eligible.' + when: not __proxmox__cluster_eligible + delegate_to: localhost + +# - name: Inform cluster configuration # ansible.builtin.debug: -# msg: 'Host is not cluster eligible.' -# when: not __proxmox__cluster_eligible +# msg: "{{ __proxmox__cluster }}" +# when: __proxmox__cluster_eligible # delegate_to: localhost -# - name: Import aybarsm package_manager role -# ansible.builtin.import_role: -# name: aybarsm.linux.package_manager -# when: -# - proxmox__role_enabled -# - proxmox__manage_repo_keys or proxmox__manage_repos or proxmox__manage_packages - -# - name: Import aybarsm grub role -# ansible.builtin.import_role: -# name: aybarsm.linux.grub -# when: -# - proxmox__role_enabled -# - proxmox__manage_grub - -# - name: Import aybarsm network role -# ansible.builtin.import_role: -# name: aybarsm.linux.network -# when: -# - proxmox__role_enabled -# - proxmox__manage_hostname or proxmox__manage_hosts - -# - name: Reboot node and wait if upgraded, grub or network-hostname,systemd,interfaces changed -# ansible.builtin.reboot: -# test_command: "uptime" -# vars: -# chk_upgrade: "{{ package_manager__upgrade_deb is defined and package_manager__upgrade_deb is changed }}" -# chk_grub: "{{ grub__deploy is defined and grub__deploy is changed }}" -# chk_hostname: "{{ network__hostname_deploy is defined and network__hostname_deploy is changed }}" -# chk_systemd: "{{ network__systemd_deploy is defined and network__systemd_deploy is changed }}" -# chk_interfaces: "{{ network__interfaces_deploy is defined and network__interfaces_deploy is changed }}" -# register: proxmox__primary_reboot -# when: chk_upgrade or chk_grub or chk_hostname or chk_systemd or chk_interfaces - -# - name: Include auth tasks -# ansible.builtin.include_tasks: -# file: auth.yml -# when: -# - proxmox__role_enabled -# - __proxmox__cluster_eligible - -# - name: Remove subscription check wrapper function in web UI -# ansible.builtin.lineinfile: -# path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -# line: ' orig_cmd(); return;' -# insertafter: '^\s+checked_command: function\(orig_cmd\) {$' -# firstmatch: yes -# backup: yes -# when: -# - proxmox__role_enabled -# - proxmox__remove_subscription_warning +- name: Import aybarsm package_manager role + ansible.builtin.import_role: + name: aybarsm.linux.package_manager + when: + - proxmox__role_enabled + - proxmox__manage_repo_keys or proxmox__manage_repos or proxmox__manage_packages + +- name: Import aybarsm grub role + ansible.builtin.import_role: + name: aybarsm.linux.grub + when: + - proxmox__role_enabled + - proxmox__manage_grub + +- name: Import aybarsm network role + ansible.builtin.import_role: + name: aybarsm.linux.network + when: + - proxmox__role_enabled + - proxmox__manage_hostname or proxmox__manage_hosts or proxmox__manage_iptables + +- name: Update grub and initramfs when systemd network and/or grub changed + become: true + ansible.builtin.command: + cmd: "{{ item }}" + with_items: + - update-grub + - proxmox-boot-tool refresh + - update-initramfs -u -k all + when: (grub__deploy is defined and grub__deploy is changed) or + (network__systemd_deploy is defined and network__systemd_deploy is changed) + +- name: Reboot node and wait if upgraded, grub, network-hostname,systemd, or interfaces changed + become: true + ansible.builtin.reboot: + test_command: "uptime" + vars: + chk_upgrade: "{{ package_manager__upgrade_deb is defined and package_manager__upgrade_deb is changed }}" + chk_grub: "{{ grub__deploy is defined and grub__deploy is changed }}" + chk_hostname: "{{ network__hostname_deploy is defined and network__hostname_deploy is changed }}" + chk_systemd: "{{ network__systemd_deploy is defined and network__systemd_deploy is changed }}" + chk_interfaces: "{{ network__interfaces_deploy is defined and network__interfaces_deploy is changed }}" + register: proxmox__primary_reboot + when: chk_upgrade or chk_grub or chk_hostname or chk_systemd or chk_interfaces + +- name: Include auth tasks + ansible.builtin.include_tasks: + file: auth.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + +- name: Remove subscription check wrapper function in web UI + become: true + ansible.builtin.lineinfile: + path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js + line: ' orig_cmd(); return;' + insertafter: '^\s+checked_command: function\(orig_cmd\) {$' + firstmatch: yes + backup: yes + when: + - proxmox__role_enabled + - proxmox__remove_subscription_warning + +- name: Include Proxmox cluster pool tasks + ansible.builtin.include_tasks: + file: cluster_pools.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + - __proxmox__cluster.manage_pools | default(false) | bool + - inventory_hostname == __proxmox__cluster.init + +- name: Include Proxmox cluster roles tasks + ansible.builtin.include_tasks: + file: cluster_roles.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + - __proxmox__cluster.manage_roles | default(false) | bool + - inventory_hostname == __proxmox__cluster.init + +- name: Include Proxmox cluster groups tasks + ansible.builtin.include_tasks: + file: cluster_groups.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + - __proxmox__cluster.manage_groups | default(false) | bool + - inventory_hostname == __proxmox__cluster.init + +- name: Include Proxmox cluster users tasks + ansible.builtin.include_tasks: + file: cluster_users.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + - __proxmox__cluster.manage_users | default(false) | bool + - inventory_hostname == __proxmox__cluster.init + +- name: Include Proxmox cluster ACLs tasks + ansible.builtin.include_tasks: + file: cluster_acls.yml + when: + - proxmox__role_enabled + - __proxmox__cluster_eligible + - __proxmox__cluster.manage_acls | default(false) | bool + - inventory_hostname == __proxmox__cluster.init + #TODO: Remove old kernel packages #TODO: IPMI Watchdog -- name: Gather package facts - ansible.builtin.package_facts: - manager: auto - register: proxmox__package_facts +# - name: Gather package facts +# ansible.builtin.package_facts: +# manager: auto +# register: proxmox__package_facts -- name: Include ZFS tasks - ansible.builtin.include_tasks: - file: zfs.yml - when: proxmox__role_enabled \ No newline at end of file +# - name: Include ZFS tasks +# ansible.builtin.include_tasks: +# file: zfs.yml +# when: proxmox__role_enabled + +# #TODO: Kernel module cleanup diff --git a/roles/proxmox/tasks/set_facts.yml b/roles/proxmox/tasks/set_facts.yml index cc540a1..a0ee1ce 100644 --- a/roles/proxmox/tasks/set_facts.yml +++ b/roles/proxmox/tasks/set_facts.yml @@ -7,7 +7,7 @@ cluster_query: "{{ __proxmox__cluster_query | replace('__MEMBERS__', ('[`' + (cluster_inventory | replace(',', '`,`')) + '`]')) }}" cluster_node_config: "{{ dict(hostvars) | community.general.json_query(cluster_query) | sort(attribute='host') }}" cluster_default_init: "{{ cluster_node_config | map(attribute='host') | first }}" - cluster_config: "{{ cluster_node_config | map('combine', ({'init': cluster_default_init} | combine(item))) }}" + cluster_config: "{{ cluster_node_config | map('combine', ({'init': cluster_default_init} | combine(__proxmox__default_cluster_config, item))) }}" loop: "{{ proxmox__clusters }}" delegate_to: localhost run_once: true @@ -33,6 +33,7 @@ network__role_enabled: "{{ use_network if use_network else omit }}" network__manage_hostname: "{{ proxmox__manage_hostname if proxmox__manage_hostname else omit }}" network__manage_hosts: "{{ proxmox__manage_hosts if proxmox__manage_hosts else omit }}" + network__manage_iptables: "{{ proxmox__manage_iptables if proxmox__manage_iptables else omit }}" network__hosts_auto_discovery: "{{ False if proxmox__manage_hosts else omit }}" network__hostname: "{{ proxmox__hostname + '.' + proxmox__domain if proxmox__manage_hostname else omit }}" network__host: "{{ node_hosts + (network__host | default([])) if cluster_eligible and proxmox__manage_hosts else omit }}" @@ -53,7 +54,7 @@ cluster_eligible: "{{ (ansible_play_batch | intersect(cluster_required) | length) == (cluster_required | length) if cluster_play else False }}" use_package_manager: "{{ proxmox__manage_repo_keys or proxmox__manage_repos or proxmox__manage_packages }}" - use_network: "{{ proxmox__manage_hostname or proxmox__manage_hosts }}" + use_network: "{{ proxmox__manage_hostname or proxmox__manage_hosts or proxmox__manage_iptables }}" use_auth: "{{ proxmox__manage_root_access or proxmox__manage_sshd_config or proxmox__manage_ssh_config }}" node_repo_keys: "{{ __proxmox__purpose_packages | selectattr('type', 'eq', 'repo_key') if proxmox__manage_repo_keys else [] }}" @@ -77,7 +78,7 @@ node_root_default: "{{ node_auth_all | aybarsm.helper.selectattr(__proxmox__root_user_selector) | first | default({}) if cluster_eligible else {} }}" node_root_modification: ssh_key_comment: "{{ node_root_default.ssh_key_comment | default('root@' + (cluster_play.hostname if cluster_eligible else inventory_hostname_short)) }}" - distribute_ssh_key: "{{ cluster_required | difference([inventory_hostname]) if cluster_eligible else [] }}" + distribute_ssh_key: "{{ cluster_required if cluster_eligible else [] }}" node_root_user: "{{ node_root_default | combine(__proxmox__root_user, node_root_modification) }}" node_ssh_hosts: "{{ cluster_members | map(attribute='links') | map('join', ' ') | join(' ') if cluster_eligible else '' }}" diff --git a/roles/proxmox/tasks/zfs.yml b/roles/proxmox/tasks/zfs.yml index 689eb3f..b3fa07b 100644 --- a/roles/proxmox/tasks/zfs.yml +++ b/roles/proxmox/tasks/zfs.yml @@ -1,11 +1,22 @@ --- -- name: Load/Unload persistent ZFS module +- name: Load persistent ZFS module community.general.modprobe: name: zfs - params: "{{ proxmox__all.purposes.zfs.params | default(omit) }}" - persistent: "{{ 'present' if proxmox__all.purposes.zfs is defined else 'absent' }}" - state: "{{ 'present' if proxmox__all.purposes.zfs is defined else 'absent' }}" - register: proxmox__zfs_modprobe + params: "{{ proxmox__all.purposes.zfs.params | default('') }}" + persistent: present + state: present + register: proxmox__zfs_load_modprobe + when: proxmox__all.purposes.zfs is defined + +- name: Unload persistent ZFS module and remove ZFS modprobe configuration + ansible.builtin.file: + dest: "{{ item }}" + state: absent + loop: + - /etc/modprobe.d/zfs.conf + - /etc/modules-load.d/zfs.conf + register: proxmox__zfs_unload_modprobe + when: proxmox__all.purposes.zfs is undefined - name: Enable/Disable email address for ZFS event daemon notifications ansible.builtin.lineinfile: @@ -15,7 +26,7 @@ vars: is_enabled: "{{ proxmox__all.purposes.zfs.zed_email is defined and proxmox__all.purposes.zfs.zed_email | length > 0 }}" line_prefix: "{{ is_enabled | ternary('', '#') }}ZED_EMAIL_ADDR=" - zed_email: "{{ is_enabled | ternary(proxmox__all.purposes.zfs.zed_email, '') }}" + zed_email: "{{ is_enabled | ternary(proxmox__all.purposes.zfs.zed_email, 'root') }}" register: proxmox__zfs_zed_email notify: "proxmox__zfs_zed_apply_changes" diff --git a/roles/proxmox/vars/main.yml b/roles/proxmox/vars/main.yml index f1f0aeb..f424aea 100644 --- a/roles/proxmox/vars/main.yml +++ b/roles/proxmox/vars/main.yml @@ -91,4 +91,11 @@ __proxmox__sshd_config: value: '' children: - name: PermitRootLogin - value: prohibit-password \ No newline at end of file + value: prohibit-password + +__proxmox__default_cluster_config: + manage_pools: false + manage_roles: false + manage_groups: false + manage_users: false + manage_acls: false \ No newline at end of file