|
6 | 6 | # Extend the existing volume group with the SSD (assuming SSD is used for |
7 | 7 | # caching) |
8 | 8 |
|
9 | | -- name: Check if cachepool exists |
10 | | - shell: "lvs --options 'lv_attr' -a --noheadings {{item.vgname}}/{{item.cachelvname}}| sed 's/^ *//;s/$//'" |
11 | | - register: checkpool_attrs |
12 | | - with_items: "{{ gluster_infra_cache_vars }}" |
13 | | - |
14 | | -- name: Check if cachepool-metadata exists |
15 | | - shell: "lvs --options 'lv_attr' -a --noheadings {{item.vgname}}/{{item.cachelvname}}_cmeta| sed 's/^ *//;s/$//'" |
16 | | - register: checkpoolmeta_attrs |
17 | | - with_items: "{{ gluster_infra_cache_vars }}" |
18 | | - |
19 | | -- name: Check if logical data volume exists |
20 | | - shell: lvs -a --options 'lv_attr' --noheading {{item.vgname}}/{{ item.cachetarget | default(item.cachethinpoolname) }} | sed 's/^ *//;s/$//' |
21 | | - register: datapool_attrs |
22 | | - with_items: "{{ gluster_infra_cache_vars }}" |
23 | | - |
24 | | -- name: Check if logical volume exists and is backed by the cache pool |
25 | | - shell: > |
26 | | - lvs -a --options 'data_lv,pool_lv' --separator "|" --noheadings {{item.vgname}}/{{item.cachetarget | default(item.cachethinpoolname)}} 2>/dev/null| |
27 | | - awk -F '|' '{ gsub(/^\s*\[/,"",$1);gsub(/\]\|?$/,"",$1);if(length($2)>0){ print "echo "$2}else if(length($1)>0){ print "lvs -a --noheadings --options 'pool_lv' {{item.vgname}}/"$1}}'| |
28 | | - bash|sed 's/^ *//;s/$//' |
29 | | - register: datapoolcache_attrs |
| 9 | +- name: Extend volume group |
| 10 | + lvg: |
| 11 | + state: present |
| 12 | + vg: "{{ item.vgname }}" |
| 13 | + pvs: "{{ item.cachedisk }}" |
| 14 | + pv_options: "--dataalignment 256K" |
30 | 15 | with_items: "{{ gluster_infra_cache_vars }}" |
31 | 16 |
|
32 | 17 | - name: Change attributes of LV |
33 | 18 | lvol: |
34 | | - state: present |
35 | | - vg: "{{ item.0.vgname }}" |
36 | | - thinpool: "{{ item.0.cachetarget | default(item.0.cachethinpoolname) }}" |
37 | | - opts: " --zero n " |
38 | | - loop: "{{ ((gluster_infra_cache_vars is not none and gluster_infra_cache_vars) or default([])) | zip(((datapool_attrs is not none and datapool_attrs.results) or default([]))) | list }}" |
39 | | - when: item.1.stdout is defined and item.1.stdout|length>0 |
40 | | - |
41 | | -- include_tasks: get_vg_groupings.yml |
42 | | - vars: |
43 | | - volume_groups: >- |
44 | | - {%- set output=[] -%} |
45 | | - {%- for cnf in gluster_infra_cache_vars -%} |
46 | | - {%- if cnf is defined and cnf is not none and cnf.vgname is defined |
47 | | - and (cnf.cachedisk is defined or cnf.meta_pvs is defined) |
48 | | - -%} |
49 | | - {{- output.append({"vgname": cnf.vgname, "pvname": (cnf.cachedisk|default('') ~ ',' ~ (cnf.meta_pvs|default(''))).split(',') | select | list | unique | join(',')}) -}} |
50 | | - {%- endif -%} |
51 | | - {%- endfor -%} |
52 | | - {{- output | to_json -}} |
53 | | - when: gluster_infra_cache_vars is defined and gluster_infra_cache_vars is not none and gluster_infra_cache_vars|length >0 |
54 | | - |
55 | | -- name: Make sure meta and cache pvs exists in volume group |
56 | | - register: gluster_changed_vgs |
57 | | - lvg: |
58 | | - state: present |
59 | | - vg: "{{ (item.value | first).vgname }}" |
60 | | - pvs: "{{ item.value | json_query('[].pvname') | unique | join(',') }}" |
61 | | - pv_options: "--dataalignment 256K" |
62 | | - loop: "{{ gluster_volumes_by_groupname | dict2items }}" |
63 | | - loop_control: |
64 | | - index_var: index |
65 | | - when: > |
66 | | - gluster_volumes_by_groupname is defined and gluster_volumes_by_groupname is not none and gluster_volumes_by_groupname|length >0 |
67 | | - and item.value|length>0 |
68 | | -
|
69 | | -- name: update LVM fact's |
70 | | - setup: |
71 | | - filter: 'ansible_lvm' |
72 | | - when: gluster_changed_vgs.changed |
73 | | - |
| 19 | + state: present |
| 20 | + vg: "{{ item.vgname }}" |
| 21 | + thinpool: "{{ item.cachethinpoolname }}" |
| 22 | + opts: " --zero n " |
| 23 | + with_items: "{{ gluster_infra_cache_vars }}" |
74 | 24 |
|
75 | | -- name: Create metadata LV for cache |
| 25 | +- name: Create LV for cache |
76 | 26 | lvol: |
77 | | - state: present |
78 | | - vg: "{{ item.0.vgname }}" |
79 | | - lv: "{{ item.0.cachemetalvname | default(item.0.cachelvname ~ '_meta') }}" |
80 | | - size: "{{ item.0.cachemetalvsize }}" |
81 | | - pvs: "{{ ((item.0.meta_pvs is defined and item.0.meta_pvs) or item.0.cachedisk) | default('') }}" |
82 | | - opts: "{{ ((item.0.meta_opts is defined and item.0.meta_opts) or item.0.opts) | default('') }}" |
83 | | - loop: "{{ ((gluster_infra_cache_vars is not none and gluster_infra_cache_vars) or default([])) | zip(((checkpoolmeta_attrs is not none and checkpoolmeta_attrs.results) or default([])))| list }}" |
84 | | - when: item.1.stdout is not defined or item.1.stdout.find('e') != 0 |
| 27 | + state: present |
| 28 | + vg: "{{ item.vgname }}" |
| 29 | + lv: "{{ item.cachelvname }}" |
| 30 | + size: "{{ item.cachelvsize }}" |
| 31 | + with_items: "{{ gluster_infra_cache_vars }}" |
85 | 32 |
|
86 | | -- name: Create LV for cache |
| 33 | +- name: Create metadata LV for cache |
87 | 34 | lvol: |
88 | | - state: present |
89 | | - shrink: false |
90 | | - vg: "{{ item.0.vgname }}" |
91 | | - lv: "{{ item.0.cachelvname }}" |
92 | | - size: "{{ item.0.cachelvsize }}" |
93 | | - pvs: "{{ item.0.cachedisk | default('') }}" |
94 | | - opts: "{{ item.0.opts | default('') }}" |
95 | | - #errors throw when trying to modify an existing cachepool attached to a LV |
96 | | - #Operation not permitted on hidden LV ans_vg/cache-ans_thinpool2. |
97 | | - #Sorry, no shrinking of cache-ans_thinpool3 without force=yes. |
98 | | - when: item.1.stdout.find('C') != 0 |
99 | | - loop: "{{ ((gluster_infra_cache_vars is not none and gluster_infra_cache_vars) or default([])) | zip(((checkpool_attrs is not none and checkpool_attrs.results) or default([]))) | list }}" |
| 35 | + state: present |
| 36 | + vg: "{{ item.vgname }}" |
| 37 | + lv: "{{ item.cachemetalvname }}" |
| 38 | + size: "{{ item.cachemetalvsize }}" |
| 39 | + with_items: "{{ gluster_infra_cache_vars }}" |
| 40 | + when: item.cachemetalvname is defined |
100 | 41 |
|
101 | | -#Command on LV ans_vg/cache-ans_thinpool2 does not accept LV type cachepool |
102 | 42 | - name: Convert logical volume to a cache pool LV |
103 | 43 | command: > |
104 | | - lvconvert -y --type cache-pool |
105 | | - {% if item.0.cachemetalvname is defined %} |
106 | | - --poolmetadata {{ item.0.vgname }}/{{ item.0.cachemetalvname }} |
107 | | - {% else %} |
108 | | - --poolmetadata {{ item.0.vgname }}/{{ item.0.cachelvname }}_meta |
109 | | - {% endif %} |
110 | | - --poolmetadataspare n |
111 | | - --cachemode {{item.0.cachemode | default('writethrough')}} |
112 | | - "{{item.0.vgname}}/{{item.0.cachelvname}}" |
113 | | - loop: "{{ ((gluster_infra_cache_vars is not none and gluster_infra_cache_vars) or default([])) | zip(((checkpool_attrs is not none and checkpool_attrs.results) or default([]))) | list }}" |
114 | | - when: item.1.stdout.find('C') != 0 |
| 44 | + lvconvert -y --type cache-pool --poolmetadata {{ item.cachemetalvname }} |
| 45 | + --poolmetadataspare n |
| 46 | + --cachemode {{item.cachemode | default('writethrough')}} |
| 47 | + "/dev/{{item.vgname}}/{{item.cachelvname}}" |
| 48 | + with_items: "{{ gluster_infra_cache_vars }}" |
| 49 | + when: item.cachemetalvname is defined |
| 50 | + |
| 51 | +# It is valid not to have cachemetalvname! Writing a separate task not to |
| 52 | +# complicate things. |
| 53 | +- name: Convert logical volume to a cache pool LV without cachemetalvname |
| 54 | + command: > |
| 55 | + lvconvert -y --type cache-pool |
| 56 | + --poolmetadataspare n |
| 57 | + --cachemode {{item.cachemode | default('writethrough')}} |
| 58 | + "/dev/{{item.vgname}}/{{item.cachelvname}}" |
| 59 | + with_items: "{{ gluster_infra_cache_vars }}" |
| 60 | + when: item.cachemetalvname is not defined |
115 | 61 |
|
116 | 62 | # Run lvs -a -o +devices to see the cache settings |
117 | 63 | - name: Convert an existing logical volume to a cache LV |
118 | 64 | command: > |
119 | | - lvconvert -y --type cache --cachepool {{ item.0.vgname }}/{{ item.0.cachelvname }} |
120 | | - {{ item.0.vgname }}/{{ item.0.cachetarget | default(item.0.cachethinpoolname) }} |
121 | | - loop: "{{ ((gluster_infra_cache_vars is not none and gluster_infra_cache_vars) or default([])) | zip(((datapoolcache_attrs is not none and datapoolcache_attrs.results) or default([]))) | list }}" |
122 | | - loop_control: |
123 | | - index_var: index |
124 | | - #check if the LV exists and is not yet converted to a cache volume |
125 | | - when: datapool_attrs.results[index].stdout is defined and datapool_attrs.results[index].stdout|length>0 and item.1.stdout.find(item.0.cachelvname) == -1 |
126 | | - |
| 65 | + lvconvert -y --type cache --cachepool "/dev/{{item.vgname}}/{{item.cachelvname}}" |
| 66 | + "/dev/{{item.vgname}}/{{item.cachethinpoolname}}" |
| 67 | + with_items: "{{ gluster_infra_cache_vars }}" |
0 commit comments