From 07f44ca7f1899b0770be4f695aabf7c3427b5c45 Mon Sep 17 00:00:00 2001
From: Seena Fallah <seenafallah@gmail.com>
Date: Wed, 14 Feb 2024 15:29:03 +0100
Subject: [PATCH 1/4] ceph-config: introduce dedicated cluster config flow

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
---
 group_vars/all.yml.sample                | 10 +++++
 plugins/filter/dict2dict.py              | 23 ++++++++++
 roles/ceph-config/tasks/main.yml         | 27 ++++++------
 roles/ceph-config/templates/ceph.conf.j2 | 10 ++---
 roles/ceph-defaults/defaults/main.yml    | 10 +++++
 roles/ceph-rgw/tasks/pre_requisite.yml   |  9 ++--
 site-container.yml.sample                | 53 ++++++------------------
 site.yml.sample                          | 47 ++++++---------------
 8 files changed, 92 insertions(+), 97 deletions(-)
 create mode 100644 plugins/filter/dict2dict.py

diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample
index b13d243dff..88002cbbb7 100644
--- a/group_vars/all.yml.sample
+++ b/group_vars/all.yml.sample
@@ -258,6 +258,16 @@ dummy:
 
 #cephx: true
 
+# Cluster configuration
+#ceph_cluster_conf:
+#  global:
+#    public_network: "{{ public_network | default(omit) }}"
+#    cluster_network: "{{ cluster_network | default(omit) }}"
+#    osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
+#    ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
+#    ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
+#    osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"
+
 ## Client options
 #
 #rbd_cache: "true"
diff --git a/plugins/filter/dict2dict.py b/plugins/filter/dict2dict.py
new file mode 100644
index 0000000000..5cf842f9e6
--- /dev/null
+++ b/plugins/filter/dict2dict.py
@@ -0,0 +1,23 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class FilterModule(object):
+    ''' Loop over nested dictionaries '''
+
+    def dict2dict(self, nested_dict):
+        items = []
+        for key, value in nested_dict.items():
+            for k, v in value.items():
+                items.append(
+                    (
+                        {'key': key, 'value': value},
+                        {'key': k, 'value': v},
+                    ),
+                )
+        return items
+
+    def filters(self):
+        return {
+            'dict2dict': self.dict2dict
+        }
diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml
index ea8e170b0e..54528cfc55 100644
--- a/roles/ceph-config/tasks/main.yml
+++ b/roles/ceph-config/tasks/main.yml
@@ -97,24 +97,26 @@
 - name: Set osd related config facts
   when: inventory_hostname in groups.get(osd_group_name, [])
   block:
-    - name: Set_fact _osd_memory_target, override from ceph_conf_overrides
-      ansible.builtin.set_fact:
-        _osd_memory_target: "{{ item }}"
-      loop:
-        - "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
-        - "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
-      when: item
-
-    - name: Set_fact _osd_memory_target
-      ansible.builtin.set_fact:
+    - name: set_fact _osd_memory_target
+      set_fact:
         _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
       when:
         - _osd_memory_target is undefined
         - num_osds | default(0) | int > 0
         - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
 
-- name: Create ceph conf directory
-  ansible.builtin.file:
+    - name: Set osd_memory_target to cluster host config
+      ceph_config:
+        action: set
+        who: "osd.*/{{ ansible_hostname }}:host"
+        option: "osd_memory_target"
+        value: "{{ _osd_memory_target }}"
+      when:
+        - _osd_memory_target is defined
+        - ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') == ''
+
+- name: create ceph conf directory
+  file:
     path: "/etc/ceph"
     state: directory
     owner: "ceph"
@@ -135,6 +137,7 @@
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     mode: "0644"
+    config_overrides: "{{ ceph_conf_overrides }}"
     config_type: ini
   notify:
     - Restart ceph mons
diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2
index 69850ed08e..af5d1cb5de 100644
--- a/roles/ceph-config/templates/ceph.conf.j2
+++ b/roles/ceph-config/templates/ceph.conf.j2
@@ -2,11 +2,11 @@
 # {{ ansible_managed }}
 
 [global]
-#{% if not cephx | bool %}
-#auth cluster required = none
-#auth service required = none
-#auth client required = none
-#{% endif %}
+{% if not cephx | bool %}
+auth cluster required = none
+auth service required = none
+auth client required = none
+{% endif %}
 {# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
 
 {% set nb_mon = groups.get(mon_group_name, []) | length | int %}
diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml
index 8f483b9893..6f97e7e992 100644
--- a/roles/ceph-defaults/defaults/main.yml
+++ b/roles/ceph-defaults/defaults/main.yml
@@ -250,6 +250,16 @@ ceph_keyring_permissions: '0600'
 
 cephx: true
 
+# Cluster configuration
+ceph_cluster_conf:
+  global:
+    public_network: "{{ public_network | default(omit) }}"
+    cluster_network: "{{ cluster_network | default(omit) }}"
+    osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
+    ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
+    ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
+    osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"
+
 ## Client options
 #
 rbd_cache: "true"
diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml
index 8af02e15d5..9d30748499 100644
--- a/roles/ceph-rgw/tasks/pre_requisite.yml
+++ b/roles/ceph-rgw/tasks/pre_requisite.yml
@@ -16,11 +16,15 @@
   loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
 
 - name: Set rgw parameter (rgw_frontends)
+  vars:
+    _rgw_binding_socket: "{{ item.radosgw_address | default(_radosgw_address) | string + ':' + item.radosgw_frontend_port | default(radosgw_frontend_port) | string }}"
+    _rgw_beast_endpoint: "{{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}"
+    _rgw_beast_ssl_option: "{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}"
   ceph_config:
     action: set
     who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
     option: "rgw_frontends"
-    value: "beast port={{ item.radosgw_frontend_port | string }}"
+    value: "beast {{ _rgw_beast_endpoint }}{{ _rgw_beast_ssl_option }}"
   environment:
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
     CEPH_CONTAINER_BINARY: "{{ container_binary }}"
@@ -28,9 +32,6 @@
   loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
   notify: Restart ceph rgws
 
-# rgw_frontends
-# {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
-
 - name: Create rados gateway directories
   ansible.builtin.file:
     path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
diff --git a/site-container.yml.sample b/site-container.yml.sample
index a50c079a21..caf45645ec 100644
--- a/site-container.yml.sample
+++ b/site-container.yml.sample
@@ -174,58 +174,29 @@
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
 - hosts: mons[0]
-  become: True
+  become: true
   gather_facts: false
   any_errors_fatal: true
   tasks:
-    - import_role:
+    - name: Import default role
+      ansible.builtin.import_role:
         name: ceph-defaults
 
-    - name: set global config
-      ceph_config:
-        action: set
-        who: "global"
-        option: "{{ item.key }}"
-        value: "{{ item.value }}"
-      environment:
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-      with_dict:
-        "{{ { 
-           'public_network': public_network | default(False),
-           'cluster_network': cluster_network | default(False),
-           'osd pool default crush rule': osd_pool_default_crush_rule,
-           'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
-           'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
-           'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
-        } }}"
-      when:
-        - inventory_hostname == ansible_play_hosts_all | last
-        - item.value
-
-    - name: set global config overrides
-      ceph_config:
-        action: set
-        who: "global"
-        option: "{{ item.key }}"
-        value: "{{ item.value }}"
-      environment:
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-      when: inventory_hostname == ansible_play_hosts_all | last
-      with_dict: "{{ ceph_conf_overrides['global'] }}"
+    - name: Import config role
+      ansible.builtin.import_role:
+        name: ceph-config
 
-    - name: set osd_memory_target
+    - name: Set clsuter configs
       ceph_config:
         action: set
-        who: "osd.*/{{ item }}:host"
-        option: "osd_memory_target"
-        value: "{{ _osd_memory_target | default(osd_memory_target) }}"
+        who: "{{ item.0.key }}"
+        option: "{{ item.1.key }}"
+        value: "{{ item.1.value }}"
+      when: item.1.value != omit
+      loop: "{{ ceph_cluster_conf | dict2dict }}"
       environment:
         CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
         CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-      when: inventory_hostname == ansible_play_hosts_all | last
-      loop: "{{ groups[osd_group_name] | default([]) }}"
 
 - hosts: osds
   become: True
diff --git a/site.yml.sample b/site.yml.sample
index 8dd7d76e51..2110726001 100644
--- a/site.yml.sample
+++ b/site.yml.sample
@@ -166,49 +166,26 @@
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
 - hosts: mons[0]
-  become: True
+  become: true
   gather_facts: false
   any_errors_fatal: true
   tasks:
-    - import_role:
+    - name: Import default role
+      ansible.builtin.import_role:
         name: ceph-defaults
 
-    - name: set global config
-      ceph_config:
-        action: set
-        who: "global"
-        option: "{{ item.key }}"
-        value: "{{ item.value }}"
-      with_dict:
-        "{{ {
-           'public_network': public_network | default(False),
-           'cluster_network': cluster_network | default(False),
-           'osd pool default crush rule': osd_pool_default_crush_rule,
-           'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
-           'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
-           'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
-        } }}"
-      when:
-        - inventory_hostname == ansible_play_hosts_all | last
-        - item.value
-
-    - name: set global config overrides
-      ceph_config:
-        action: set
-        who: "global"
-        option: "{{ item.key }}"
-        value: "{{ item.value }}"
-      when: inventory_hostname == ansible_play_hosts_all | last
-      with_dict: "{{ ceph_conf_overrides['global'] }}"
+    - name: Import config role
+      ansible.builtin.import_role:
+        name: ceph-config
 
-    - name: set osd_memory_target
+    - name: Set clsuter configs
       ceph_config:
         action: set
-        who: "osd.*/{{ item }}:host"
-        option: "osd_memory_target"
-        value: "{{ _osd_memory_target | default(osd_memory_target) }}"
-      when: inventory_hostname == ansible_play_hosts_all | last
-      loop: "{{ groups[osd_group_name] | default([]) }}"
+        who: "{{ item.0.key }}"
+        option: "{{ item.1.key }}"
+        value: "{{ item.1.value }}"
+      when: item.1.value != omit
+      loop: "{{ ceph_cluster_conf | dict2dict }}"
 
 - hosts: osds
   gather_facts: false

From df424d8aeb56be8c9656f68b156880a927208d5b Mon Sep 17 00:00:00 2001
From: Seena Fallah <seenafallah@gmail.com>
Date: Wed, 14 Feb 2024 21:47:35 +0100
Subject: [PATCH 2/4] ceph-config: make rgw config to be in file

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
---
 roles/ceph-config/tasks/main.yml       | 55 +++++++++++++++++++++++---
 roles/ceph-rgw/tasks/pre_requisite.yml | 33 ----------------
 site-container.yml.sample              |  6 +--
 site.yml.sample                        |  6 +--
 4 files changed, 51 insertions(+), 49 deletions(-)

diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml
index 54528cfc55..e7fbf8e0b0 100644
--- a/roles/ceph-config/tasks/main.yml
+++ b/roles/ceph-config/tasks/main.yml
@@ -97,26 +97,69 @@
 - name: Set osd related config facts
   when: inventory_hostname in groups.get(osd_group_name, [])
   block:
-    - name: set_fact _osd_memory_target
-      set_fact:
+    - name: Set_fact _osd_memory_target
+      ansible.builtin.set_fact:
         _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
       when:
         - _osd_memory_target is undefined
         - num_osds | default(0) | int > 0
         - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
+        - ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') == ''
 
     - name: Set osd_memory_target to cluster host config
       ceph_config:
         action: set
-        who: "osd.*/{{ ansible_hostname }}:host"
+        who: "osd.*/{{ ansible_facts['hostname'] }}:host"
         option: "osd_memory_target"
         value: "{{ _osd_memory_target }}"
       when:
         - _osd_memory_target is defined
-        - ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') == ''
+        - running_mon is defined
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      delegate_to: "{{ running_mon }}"
+
+- name: Set rgw configs
+  when: inventory_hostname in groups.get(rgw_group_name, [])
+  block:
+    - name: Render rgw configs
+      vars:
+        _rgw_binding_socket: "{{ item.radosgw_address | default(_radosgw_address) | string + ':' + item.radosgw_frontend_port | default(radosgw_frontend_port) | string }}"
+        _rgw_beast_endpoint: "{{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}"
+        _rgw_beast_ssl_option: "{{ ' ssl_certificate=' + radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}"
+      ansible.builtin.set_fact:
+        _ceph_ansible_rgw_conf: >-
+          {{ _ceph_ansible_rgw_conf | default({}) | combine({
+              'client.rgw.' + ansible_facts['hostname'] + '.' + item.instance_name: {
+                'log_file': '/var/log/ceph/' + cluster + '-rgw-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log',
+                'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option,
+              }
+          }, recursive=true) }}
+      loop: "{{ rgw_instances }}"
+
+    - name: Set config to cluster
+      ceph_config:
+        action: set
+        who: "{{ item.0.key }}"
+        option: "{{ item.1.key }}"
+        value: "{{ item.1.value }}"
+      loop: "{{ _ceph_ansible_rgw_conf | dict2dict }}"
+      when:
+        - rgw_conf_to_cluster | default(true) | bool
+        - running_mon is defined
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      delegate_to: "{{ running_mon }}"
+
+    - name: Set rgw configs to file
+      ansible.builtin.set_fact:
+        ceph_conf_overrides: "{{ ceph_conf_overrides | default({}) | combine(_ceph_ansible_rgw_conf, recursive=true) }}"
+      when: not rgw_conf_to_cluster | default(true) | bool
 
-- name: create ceph conf directory
-  file:
+- name: Create ceph conf directory
+  ansible.builtin.file:
     path: "/etc/ceph"
     state: directory
     owner: "ceph"
diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml
index 9d30748499..a31b88dfb2 100644
--- a/roles/ceph-rgw/tasks/pre_requisite.yml
+++ b/roles/ceph-rgw/tasks/pre_requisite.yml
@@ -1,37 +1,4 @@
 ---
-- name: Set_fact _rgw_hostname
-  ansible.builtin.set_fact:
-    _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"
-
-- name: Set rgw parameter (log file)
-  ceph_config:
-    action: set
-    who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
-    option: "log file"
-    value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + item.instance_name }}.log"
-  environment:
-    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-  delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
-  loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
-
-- name: Set rgw parameter (rgw_frontends)
-  vars:
-    _rgw_binding_socket: "{{ item.radosgw_address | default(_radosgw_address) | string + ':' + item.radosgw_frontend_port | default(radosgw_frontend_port) | string }}"
-    _rgw_beast_endpoint: "{{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}"
-    _rgw_beast_ssl_option: "{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}"
-  ceph_config:
-    action: set
-    who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
-    option: "rgw_frontends"
-    value: "beast {{ _rgw_beast_endpoint }}{{ _rgw_beast_ssl_option }}"
-  environment:
-    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-  delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
-  loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
-  notify: Restart ceph rgws
-
 - name: Create rados gateway directories
   ansible.builtin.file:
     path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
diff --git a/site-container.yml.sample b/site-container.yml.sample
index caf45645ec..3935f9b20d 100644
--- a/site-container.yml.sample
+++ b/site-container.yml.sample
@@ -182,11 +182,7 @@
       ansible.builtin.import_role:
         name: ceph-defaults
 
-    - name: Import config role
-      ansible.builtin.import_role:
-        name: ceph-config
-
-    - name: Set clsuter configs
+    - name: Set cluster configs
       ceph_config:
         action: set
         who: "{{ item.0.key }}"
diff --git a/site.yml.sample b/site.yml.sample
index 2110726001..e5bd9de616 100644
--- a/site.yml.sample
+++ b/site.yml.sample
@@ -174,11 +174,7 @@
       ansible.builtin.import_role:
         name: ceph-defaults
 
-    - name: Import config role
-      ansible.builtin.import_role:
-        name: ceph-config
-
-    - name: Set clsuter configs
+    - name: Set cluster configs
       ceph_config:
         action: set
         who: "{{ item.0.key }}"

From fae88ef12922f6e403604ed1e3b15400bf24c201 Mon Sep 17 00:00:00 2001
From: Seena Fallah <seenafallah@gmail.com>
Date: Fri, 16 Feb 2024 11:30:07 +0100
Subject: [PATCH 3/4] ceph-handler: remove tempdir when all handlers are done

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
---
 roles/ceph-handler/handlers/main.yml | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml
index a806235eba..d634b9dd87 100644
--- a/roles/ceph-handler/handlers/main.yml
+++ b/roles/ceph-handler/handlers/main.yml
@@ -78,4 +78,13 @@
         - "Restart ceph rbdmirrors"
         - "Restart ceph mgrs"
       register: tmpdirpath
-      when: tmpdirpath.path is defined
+      when:
+        - tmpdirpath.path is defined
+        - not _crash_handler_called | default(false) | bool
+        - not _mds_handler_called | default(false) | bool
+        - not _mgr_handler_called | default(false) | bool
+        - not _mon_handler_called | default(false) | bool
+        - not _nfs_handler_called | default(false) | bool
+        - not _osd_handler_called | default(false) | bool
+        - not _rbdmirror_handler_called | default(false) | bool
+        - not _rgw_handler_called | default(false) | bool

From 70e7ae6304339358e3cd0719317c0b9a93c7f53d Mon Sep 17 00:00:00 2001
From: Seena Fallah <seenafallah@gmail.com>
Date: Fri, 16 Feb 2024 14:58:10 +0100
Subject: [PATCH 4/4] ceph-handler: restart existed rgw daemons

This is needed for new instances are the restart might trigger before the deployment

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
---
 roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
index 93eda64b8b..416213782c 100644
--- a/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
+++ b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
@@ -74,7 +74,16 @@ check_rest() {
 
 for ((i=0; i<${RGW_NUMS}; i++)); do
   # First, restart the daemon
-  systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
+
+  # Check if systemd unit exists
+  # This is needed for new instances are the restart might trigger before the deployment
+  if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"; then
+    systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
+  else
+    echo "Systemd unit ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist."
+    continue
+  fi
+
   # Check socket files
   check_socket ${i}
   # Check rest