Skip to content

Commit

Permalink
Role: proxmox-qemu-vm debug
Browse files Browse the repository at this point in the history
  • Loading branch information
aybarsm committed Oct 19, 2024
1 parent 69c1262 commit 9c15ca5
Show file tree
Hide file tree
Showing 11 changed files with 347 additions and 440 deletions.
1 change: 1 addition & 0 deletions roles/network/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ network__manage_iptables: false
network__manage_udev_rules: false
network__manage_gai: false

network__use_only: ['host', 'group', 'default']
network__default: []
network__group: []
network__host: []
Expand Down
95 changes: 6 additions & 89 deletions roles/network/vars/main.yml
Original file line number Diff line number Diff line change
@@ -1,91 +1,8 @@
network__all: "{{ (network__host + network__group + network__default) |
map('aybarsm.helper.combine_reverse', {'keep__entry': true, 'skip__entry': false}) |
rejectattr('keep__entry', 'eq', false) | rejectattr('skip__entry', 'eq', true) | rejectattr('type', 'undefined') |
aybarsm.helper.all_except(['keep__entry', 'skip__entry']) }}"

##### BEGIN: network systemd vars
__network_systemd_available_change_modules: ['systemd_service', 'command']
__network__systemd_link_name: '{"type":"systemd","name":"__SYSTEMD_LINK_NAME__","children":[{"name":"Match","children":[{"name":"MACAddress","value":"__MAC_ADDRESS__"},{"name":"Type","value":"ether"}]},{"name":"Link","children":[{"name":"NamePolicy","value":""},{"name":"Name","value":"__LINK_NAME__"}]}]}'
__network__systemd_rename: >-
{%- set __rtr = [] -%}
{%- set __cfgs = (network__all | selectattr('type', 'eq', 'systemd_link_rename') |
selectattr('name', 'defined') | selectattr('name', 'match', '^(?:[0-9]|[1-9][0-9])-[a-zA-Z]+\\d+\\.link$') |
selectattr('macaddress', 'defined') | selectattr('macaddress', 'match', '^([0-9A-Fa-f]{2}([:])?){5}([0-9A-Fa-f]{2})$') | default([])) -%}
{%- for cfg in __cfgs if cfg -%}
{%- set __rtr = __rtr.append(__network__systemd_link_name |
replace('__SYSTEMD_LINK_NAME__', cfg.name) | replace('__MAC_ADDRESS__', cfg.macaddress) |
replace('__LINK_NAME__', (cfg.name | regex_replace('^(?:[0-9]|[1-9][0-9])-(.*).link$', '\\1'))) | from_json) -%}
{%- endfor -%}
{{ __rtr }}
network__systemd_all: "{{ ((network__all | selectattr('type', 'eq', 'systemd') |
selectattr('name', 'defined') | selectattr('name', 'search', '\\.(network|link|netdev)$') |
selectattr('children', 'defined') | default([])) + __network__systemd_rename) | reverse |
community.general.lists_mergeby('name', recursive=true, list_merge='prepend') |
aybarsm.helper.unique_recursive(attributes='name', recurse='children') }}"

__network__systemd_names: "{{ network__systemd_all | map(attribute='name') }}"
# REVIEW: There is room for optimisation
# Escape file names defined in network__systemd_all
__network__systemd_cleanup_regex:
- "^({{ __network__systemd_names | select('match', '.*\\.link$') | map('replace', '.link', '') | join('|') }})\\.link$"
- "^({{ __network__systemd_names | select('match', '.*\\.netdev$') | map('replace', '.netdev', '') | join('|') }})\\.netdev$"
- "^({{ __network__systemd_names | select('match', '.*\\.network$') | map('replace', '.network', '') | join('|') }})\\.network$"
# Escape backup files:
- "^.*\\.\\d+\\.\\d{4}-\\d{2}-\\d{2}@\\d{2}:\\d{2}:\\d{2}~$"

# Join regex expressions
network__systemd_cleanup_patterns: ["(?!{{ __network__systemd_cleanup_regex | join('|') }})"]
##### END: network systemd vars

##### BEGIN: network interfaces vars
__network_interfaces_available_change_modules: ['service', 'systemd_service', 'command']

# Sort interfaces by name to avoid unneccessary changes
network__interfaces_all: "{{ network__all | reverse |
selectattr('type', 'eq', 'interface') | selectattr('name', 'defined') |
community.general.lists_mergeby('name', recursive=true, list_merge='prepend') |
aybarsm.helper.unique_recursive(attributes='name', recurse='inet') |
aybarsm.helper.unique_recursive(attributes='name', recurse='inet6') |
sort(attribute='name') }}"
network__all: "{{ {'host': network__host, 'group': network__group, 'default': network__default} | aybarsm.helper.role_vars(only=network__use_only) }}"

##### BEGIN: network sysctl vars
network__sysctl_all: "{{ network__all |
selectattr('type', 'defined') | selectattr('type', 'equalto', 'sysctl') |
aybarsm.helper.replace_aliases(__ansible.modules.ansible_posix_sysctl.aliases) |
selectattr('name', 'defined') | selectattr('value', 'defined') | unique(attribute='name') }}"
##### END: network sysctl vars

##### BEGIN: network hosts vars
__network__hosts_auto_discovered: >-
{%- if (network__hosts_auto_discovery | default(false) | bool) and network__hosts_auto_discovery_inventories is defined and network__hosts_auto_discovery_inventories | length > 0 -%}
{%- set inventory_lookup = lookup('ansible.builtin.inventory_hostnames', network__hosts_auto_discovery_inventories) -%}
{%- if inventory_lookup | length > 0 -%}
{%- set inventory_hosts = inventory_lookup | split(',') -%}
{%- set discovered_hosts = dict(hostvars) | aybarsm.helper.only_with(inventory_hosts) | dict2items | selectattr('value.ansible_host', 'defined') -%}
{%- set ips = discovered_hosts | map(attribute='value.ansible_host') -%}
{%- set hostnames = discovered_hosts | map(attribute='value.inventory_hostname_short') -%}
{%- set fqdns = discovered_hosts | map(attribute='value.inventory_hostname') -%}
{%- set auto_discovered = {'ip': ips, 'hostname': hostnames, 'fqdn': fqdns} | aybarsm.helper.to_list_of_dicts({'type': 'host'}) -%}
{%- endif -%}
{%- else -%}
{%- set auto_discovered = [] -%}
{%- endif -%}
{{ auto_discovered }}
network__hosts_all: "{{ (network__all + __network__hosts_auto_discovered) |
selectattr('type', 'eq', 'host') | selectattr('ip', 'defined') | selectattr('hostname', 'defined') |
aybarsm.helper.replace_aliases({'fqdn': ['hostname']}) | unique(attribute='ip') }}"

# Sort hosts by hostname to avoid unneccessary changes
network__hosts_all_ipv4: "{{ network__hosts_all | selectattr('ip', 'ansible.utils.ipv4') | sort(attribute='hostname') }}"
network__hosts_all_ipv6: "{{ network__hosts_all | selectattr('ip', 'ansible.utils.ipv6') | sort(attribute='hostname') }}"
##### END: network hosts vars

##### BEGIN: network iptables vars
network__iptables_all: "{{ network__all | selectattr('type', 'eq', 'iptables') }}"
##### END: network iptables vars

##### BEGIN: network udev rules vars
network__udev_rules_all: "{{ network__all | selectattr('type', 'eq', 'udev_rule') | selectattr('entries', 'defined') }}"
##### END: network udev rules vars
network__sysctl_all: "{{ network__all | selectattr('entry__type', 'eq', 'sysctl') |
aybarsm.helper.replace_aliases(__ansible.modules.ansible_posix_sysctl.aliases, removeAliases=true) |
community.general.json_query('[?not_null(name) && not_null(value)]') |
unique(attribute='name') }}"
##### END: network sysctl vars
109 changes: 5 additions & 104 deletions roles/proxmox/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -1,108 +1,9 @@
##### BEGIN: proxmox per host/node configuration
proxmox__role_enabled: false

proxmox__manage_repo_keys: false
proxmox__manage_repos: false
proxmox__manage_packages: false
proxmox__manage_grub: false
proxmox__manage_hostname: false
proxmox__manage_hosts: false
proxmox__manage_iptables: false
# This option force root user to generate ssh key and distribute to all hosts within the cluster
proxmox__manage_root_access: false
proxmox__manage_sshd_config: false
proxmox__manage_ssh_config: false
proxmox__manage_files: false
proxmox__manage_cluster: false
proxmox__clusters: {}

proxmox__hostname: "{{ inventory_hostname_short }}"
proxmox__domain: local
proxmox__use_only: ['host', 'group', 'default']

# Ip addresses for the cluster links for the host
# i.e. proxmox__cluster_links: ['10.0.0.2', 'fd00::2']
# If more than one ip provided, the first link ip will be used as the cluster ip for the hosts file
proxmox__cluster_links: []
proxmox__ssh_port: 22
proxmox__ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected],[email protected]"
proxmox__remove_subscription_warning: false
#TODO: pveproxy configuration -> /etc/default/pveproxy
##### END: proxmox per host/node configuration

##### BEGIN: proxmox shared configuration
# Cluster configuration
# For target inventory specs, consult https://docs.ansible.com/ansible/latest/inventory_guide/intro_patterns.html for more information
# If init node not provided, the init node will be designated as the first node regarding ascending sorted of inventory_hostname
# proxmox__clusters variables has been designed to be managed from a single source of truth, i.e. group_vars/all.yml However, it can be overwritten from the host vars.
# proxmox__clusters:
# - name: 'pve-london01'
# target: 'proxmox:&london'
# manage_pools: true
# manage_roles: true
# manage_groups: false
# manage_users: false
# manage_acls: false
# - name: 'pve-atlanta01'# (required)
# target: 'proxmox:&atlanta'# (required)
# init: 'pve01-atlanta01'# (optional | default: first of asc sorted inventory_hostname)
# manage_pools: true# (optional | default: false)
# manage_roles: true# (optional | default: false)
# manage_groups: false# (optional | default: false)
# manage_users: false# (optional | default: false)
# manage_acls: false# (optional | default: false)
proxmox__clusters: []

proxmox__repo_url_enterprise: https://enterprise.proxmox.com/debian
proxmox__repo_url_no_subscription: http://download.proxmox.com/debian
proxmox__repo_keys:
bookworm:
url: https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg
keyring: /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
bullseye:
url: https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg
keyring: /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg

proxmox__purpose_names: ['pve', 'pbs', 'pmg', 'ceph_pacific', 'ceph_quincy', 'ceph_reef', 'zfs']
proxmox__purpose_types: ['enterprise', 'no-subscription']
proxmox__purpose_packages:
pve: ['pve-manager']
pbs: ['proxmox-backup-server']
pmg: ['proxmox-mailgateway']
ceph_pacific: ['ceph', 'ceph-common', 'ceph-mds', 'ceph-fuse']
ceph_quincy: ['ceph', 'ceph-common', 'ceph-mds', 'ceph-fuse']
ceph_reef: ['ceph', 'ceph-common', 'ceph-mds', 'ceph-fuse']
zfs: ['zfsutils-linux', 'zfs-initramfs', 'zfs-zed']

proxmox__default: {}
proxmox__group: {}
proxmox__host: {}

# Example configuration:
# Purposes will be overwritten by the host configuration.
# The type can be set other than the provided list to rule out.
## ZFS Consult with: https://pve.proxmox.com/wiki/ZFS_on_Linux#sysadmin_zfs_limit_memory_usage
## ZFS Consult with: https://pve.proxmox.com/wiki/ZFS_on_Linux#_configure_e_mail_notification
# proxmox__default:
# purposes:
# pve:
# type: no-subscription
# version: latest
# # ------------------------------------------------
# proxmox__host:
# purposes:
# pve:
# type: no-subscription
# version: 8.2.4
# ceph_reef:
# type: no-subscription
# pbs:
# type: no-subscription
# version: 3.2.4-1
# pmg:
# type: no-subscription
# version: 8.1.0
# zfs:
# version: 2.2.4-pve1
# params: 'zfs_arc_max=2147483648'
# zed_email: '[email protected]'
# post_install: "{{ playbook_dir }}/tasks/zfs_post_install.yml"
##### END: proxmox shared configuration
proxmox__default: []
proxmox__group: []
proxmox__host: []
3 changes: 2 additions & 1 deletion roles/proxmox/meta/main.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#TODO: re-enable geerlingguy.ntp role
dependencies:
- role: aybarsm.helper.ansible
- role: geerlingguy.ntp
# - role: geerlingguy.ntp
1 change: 1 addition & 0 deletions roles/proxmox/tasks/lxc/crud.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
---
154 changes: 8 additions & 146 deletions roles/proxmox/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,149 +1,11 @@
---
- name: Include set fact tasks
- name: Include Proxmox QEMU tasks
ansible.builtin.include_tasks:
file: set_facts.yml

- name: Inform when host not cluster eligible
ansible.builtin.debug:
msg: 'Host is not cluster eligible.'
when: not __proxmox__cluster_eligible
delegate_to: localhost

# - name: Inform cluster configuration
# ansible.builtin.debug:
# msg: "{{ __proxmox__cluster }}"
# when: __proxmox__cluster_eligible
# delegate_to: localhost

- name: Import aybarsm.linux.package_manager role
ansible.builtin.import_role:
name: aybarsm.linux.package_manager
file: qemu.yml
loop: "{{ proxmox__all | selectattr('entry__type', 'match', '^qemu_(vm|disk|nic)$') }}"
loop_control:
loop_var: proxmox__qemu_item
index_var: proxmox__qemu_item_index
when:
- proxmox__role_enabled
- proxmox__manage_repo_keys or proxmox__manage_repos or proxmox__manage_packages

- name: Import aybarsm.linux.grub role
ansible.builtin.import_role:
name: aybarsm.linux.grub
when:
- proxmox__role_enabled
- proxmox__manage_grub

- name: Import aybarsm.linux.network role
ansible.builtin.import_role:
name: aybarsm.linux.network
when:
- proxmox__role_enabled
- proxmox__manage_hostname or proxmox__manage_hosts or proxmox__manage_iptables

- name: Import aybarsm.helper.file_mgr role
ansible.builtin.import_role:
name: aybarsm.helper.file_mgr
when:
- proxmox__role_enabled
- proxmox__manage_files

- name: Update grub and initramfs when systemd network and/or grub changed
become: true
ansible.builtin.command:
cmd: "{{ item }}"
with_items:
- update-grub
- proxmox-boot-tool refresh
- update-initramfs -u -k all
when: (grub__deploy is defined and grub__deploy is changed) or
(network__systemd_deploy is defined and network__systemd_deploy is changed)

- name: Reboot node and wait if upgraded, grub, network-hostname, systemd, or interfaces changed
become: true
ansible.builtin.reboot:
test_command: "uptime"
vars:
chk_upgrade: "{{ package_manager__upgrade_deb is defined and package_manager__upgrade_deb is changed }}"
chk_grub: "{{ grub__deploy is defined and grub__deploy is changed }}"
chk_hostname: "{{ network__hostname_deploy is defined and network__hostname_deploy is changed }}"
chk_systemd: "{{ network__systemd_deploy is defined and network__systemd_deploy is changed }}"
chk_interfaces: "{{ network__interfaces_deploy is defined and network__interfaces_deploy is changed }}"
register: proxmox__primary_reboot
when: chk_upgrade or chk_grub or chk_hostname or chk_systemd or chk_interfaces
throttle: 1

- name: Include auth tasks
ansible.builtin.include_tasks:
file: auth.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible

- name: Remove subscription check wrapper function in web UI
become: true
ansible.builtin.lineinfile:
path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js
line: ' orig_cmd(); return;'
insertafter: '^\s+checked_command: function\(orig_cmd\) {$'
firstmatch: true
backup: true
when:
- proxmox__role_enabled
- proxmox__remove_subscription_warning

- name: Include Proxmox cluster pool tasks
ansible.builtin.include_tasks:
file: cluster_pools.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible
- __proxmox__cluster.manage_pools | default(false) | bool
- inventory_hostname == __proxmox__cluster.init

- name: Include Proxmox cluster roles tasks
ansible.builtin.include_tasks:
file: cluster_roles.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible
- __proxmox__cluster.manage_roles | default(false) | bool
- inventory_hostname == __proxmox__cluster.init

- name: Include Proxmox cluster groups tasks
ansible.builtin.include_tasks:
file: cluster_groups.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible
- __proxmox__cluster.manage_groups | default(false) | bool
- inventory_hostname == __proxmox__cluster.init

- name: Include Proxmox cluster users tasks
ansible.builtin.include_tasks:
file: cluster_users.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible
- __proxmox__cluster.manage_users | default(false) | bool
- inventory_hostname == __proxmox__cluster.init

- name: Include Proxmox cluster ACLs tasks
ansible.builtin.include_tasks:
file: cluster_acls.yml
when:
- proxmox__role_enabled
- __proxmox__cluster_eligible
- __proxmox__cluster.manage_acls | default(false) | bool
- inventory_hostname == __proxmox__cluster.init


#TODO: Remove old kernel packages
#TODO: IPMI Watchdog

# - name: Gather package facts
# ansible.builtin.package_facts:
# manager: auto
# register: proxmox__package_facts

# - name: Include ZFS tasks
# ansible.builtin.include_tasks:
# file: zfs.yml
# when: proxmox__role_enabled

# #TODO: Kernel module cleanup
- proxmox__role_enabled | default(false) | bool
- proxmox__all | selectattr('entry__type', 'match', '^qemu_(vm|disk|nic)$') | default([]) | length > 0
Loading

0 comments on commit 9c15ca5

Please sign in to comment.