Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph_osd_flag: support setting noout flag at osd or bucket level #6147

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ jobs:
python-version: ${{ matrix.python-version }}
architecture: x64
- run: pip install -r tests/requirements.txt
- run: pytest --cov=library/ --cov=module_utils/ --cov=plugins/filter/ -vvvv tests/library/ tests/module_utils/ tests/plugins/filter/
- run: pytest --cov=library/ --cov=module_utils/ --cov=plugins/filter/ --cov-report term-missing -vvvv tests/library/ tests/module_utils/ tests/plugins/filter/
env:
PYTHONPATH: "$PYTHONPATH:/home/runner/work/ceph-ansible/ceph-ansible/library:/home/runner/work/ceph-ansible/ceph-ansible/module_utils:/home/runner/work/ceph-ansible/ceph-ansible/plugins/filter:/home/runner/work/ceph-ansible/ceph-ansible"
31 changes: 20 additions & 11 deletions infrastructure-playbooks/cephadm-adopt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -694,8 +694,8 @@
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'


- name: set osd flags
hosts: "{{ osd_group_name|default('osds') }}"
- name: set osd flag nodeep-scrub
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
any_errors_fatal: True
Expand All @@ -707,15 +707,13 @@
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
check_mode: false

- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_adopt
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
check_mode: false

Expand All @@ -738,7 +736,6 @@
cluster: "{{ cluster }}"
pg_autoscale_mode: false
with_items: "{{ pools_pgautoscaler_mode }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- pools_pgautoscaler_mode is defined
Expand All @@ -755,7 +752,6 @@
with_items:
- noout
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
Expand All @@ -776,6 +772,12 @@
tasks_from: container_binary.yml
when: containerized_deployment | bool

- name: set osd flag noout
command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} osd set-group noout {{ inventory_hostname }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: get osd list
ceph_volume:
cluster: "{{ cluster }}"
Expand Down Expand Up @@ -850,8 +852,14 @@
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: unset osd flags
hosts: "{{ osd_group_name|default('osds') }}"
- name: unset osd flag noout
command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} osd unset-group noout {{ inventory_hostname }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: unset osd flag nodeep-scrub
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
any_errors_fatal: True
Expand All @@ -865,7 +873,6 @@
cluster: "{{ cluster }}"
pg_autoscale_mode: true
with_items: "{{ pools_pgautoscaler_mode }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- pools_pgautoscaler_mode is defined
Expand All @@ -882,16 +889,18 @@
with_items:
- noout
- nodeep-scrub
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true

- name: unset osd flag nodeep-scrub
command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} osd unset nodeep-scrub"
changed_when: false
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status_adopt.stdout | from_json)['active'] | bool

Expand Down
47 changes: 43 additions & 4 deletions infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -412,9 +412,8 @@
name: ceph-mgr


- name: set osd flags
hosts: "{{ osd_group_name | default('osds') }}"
tags: osds
- name: set osd flag nodeep-scrub
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
gather_facts: false
tasks:
Expand All @@ -426,7 +425,6 @@

- name: set osd flags, disable autoscaler and balancer
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: get pool list
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
Expand Down Expand Up @@ -475,6 +473,14 @@
- noout
- nodeep-scrub

- name: set osd flag nodeep-scrub
ceph_osd_flag:
name: "nodeep-scrub"
cluster: "{{ cluster }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: upgrade ceph osds cluster
vars:
health_osd_check_retries: 600
Expand All @@ -491,6 +497,17 @@
- import_role:
name: ceph-facts

- name: set osd flag noout
ceph_osd_flag:
name: noout
level: bucket
bucket: "{{ inventory_hostname }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: get osd numbers - non container
shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
register: osd_ids
Expand Down Expand Up @@ -554,6 +571,18 @@
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"

- name: unset osd flag noout
ceph_osd_flag:
name: noout
state: absent
level: bucket
bucket: "{{ inventory_hostname }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"


- name: complete osd upgrade
hosts: "{{ osd_group_name | default('osds') }}"
Expand Down Expand Up @@ -601,6 +630,16 @@
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool

- name: unset osd flag nodeep-scrub
ceph_osd_flag:
name: "nodeep-scrub"
cluster: "{{ cluster }}"
state: absent
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"


- name: upgrade ceph mdss cluster, deactivate all rank > 0
hosts: "{{ mon_group_name | default('mons') }}[0]"
tags: mdss
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@
name: ceph-mgr


- name: set osd flags
- name: set osd flag nodeep-scrub
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
tasks:
Expand Down Expand Up @@ -258,16 +258,13 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: set osd flags
- name: set osd flag nodeep-scrub
ceph_osd_flag:
name: "{{ item }}"
name: "nodeep-scrub"
cluster: "{{ cluster }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub


- name: switching from non-containerized to containerized ceph osd
Expand All @@ -286,6 +283,21 @@
- import_role:
name: ceph-defaults

- import_role:
name: ceph-facts
tasks_from: container_binary.yml

- name: set osd flag noout
ceph_osd_flag:
name: noout
level: bucket
bucket: "{{ inventory_hostname }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: collect running osds
shell: |
set -o pipefail;
Expand Down Expand Up @@ -398,8 +410,19 @@
delay: "{{ health_osd_check_delay }}"
changed_when: false

- name: unset osd flag noout
ceph_osd_flag:
name: noout
state: absent
level: bucket
bucket: "{{ inventory_hostname }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: unset osd flags
- name: unset osd flag nodeep-scrub
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
tasks:
Expand All @@ -422,17 +445,14 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: unset osd flags
- name: set osd flag nodeep-scrub
ceph_osd_flag:
name: "{{ item }}"
name: "nodeep-scrub"
cluster: "{{ cluster }}"
state: absent
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- noout
- nodeep-scrub

- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
Expand Down
60 changes: 55 additions & 5 deletions library/ceph_osd_flag.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,25 @@
description:
- name of the ceph OSD flag.
required: true
choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance',
'norecover', 'noscrub', 'nodeep-scrub']
choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']
level:
description:
- This is applicable only when 'name' is 'noout'.
This flag can be applied at several levels:
1/ at the whole cluster level
2/ at the bucket level
3/ at the osd.X level
required: false
choices: ['osd', 'bucket', 'cluster']
default: 'cluster'
osd:
description:
- pass the osd when 'level' is 'osd'
required: false
bucket:
description:
- pass the bucket name when 'level' is 'bucket'
required: false
cluster:
description:
- The ceph cluster name.
Expand Down Expand Up @@ -75,6 +92,19 @@
loop:
- 'noup'
- 'norebalance'

- name: set noup flag on osd.123
ceph_osd_flag:
name: noup
level: osd
osd: osd.123

- name: unset noup flag on bucket 'host-456'
ceph_osd_flag:
state: absent
name: noup
level: bucket
bucket: host-456
'''

RETURN = '''# '''
Expand All @@ -84,24 +114,44 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']), # noqa: E501
level=dict(type='str', required=False, default='cluster', choices=['cluster', 'bucket', 'osd']),
osd=dict(type='str', required=False),
bucket=dict(type='str', required=False),
cluster=dict(type='str', required=False, default='ceph'),
state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501
),
supports_check_mode=True,
required_if=[
['level', 'osd', ['osd']],
['level', 'bucket', ['bucket']]
]
)

name = module.params.get('name')
level = module.params.get('level')
osd = module.params.get('osd')
bucket = module.params.get('bucket')
cluster = module.params.get('cluster')
state = module.params.get('state')

startd = datetime.datetime.now()

container_image = is_containerized()

if state == 'present':
cmd = generate_ceph_cmd(['osd', 'set'], [name], cluster=cluster, container_image=container_image) # noqa: E501
osd_sub_cmd = ['osd']
if name == 'noout' and level in ['osd', 'bucket']:
if level == 'osd':
action = ['add-noout'] if state == 'present' else ['rm-noout']
name = osd
if level == 'bucket':
action = ['set-group', 'noout'] if state == 'present' else ['unset-group', 'noout']
name = bucket
osd_sub_cmd.extend(action)

else:
cmd = generate_ceph_cmd(['osd', 'unset'], [name], cluster=cluster, container_image=container_image) # noqa: E501
osd_sub_cmd.extend(['set']) if state == 'present' else osd_sub_cmd.extend(['unset'])

cmd = generate_ceph_cmd(osd_sub_cmd, [name], cluster=cluster, container_image=container_image)

if module.check_mode:
exit_module(
Expand Down
Loading