-
Notifications
You must be signed in to change notification settings - Fork 270
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ansible lint fixes #658
ansible lint fixes #658
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,27 +1,12 @@ | ||
#!/bin/sh -x | ||
|
||
# Search for playbooks within specified directories (one level only) | ||
PLABOOKS_DIR="packaging/ansible-runner-service-project/project" | ||
|
||
# Directory with roles | ||
ROLES_DIR="packaging/ansible-runner-service-project/project/roles" | ||
|
||
SRCDIR="$(dirname "$0")/.." | ||
|
||
ANSIBLE_LINT=/usr/bin/ansible-lint | ||
ANSIBLE_LINT_CONF="$(dirname "$0")/ansible-lint.conf" | ||
|
||
if ! which "${ANSIBLE_LINT}" > /dev/null 2>&1; then | ||
echo "WARNING: tool '${ANSIBLE_LINT}' is missing" >&2 | ||
# Check if the ansible-lint binary exists | ||
if ! command -v ansible-lint > /dev/null 2>&1; then | ||
echo "WARNING: tool 'ansible-lint' is missing" >&2 | ||
exit 0 | ||
fi | ||
|
||
cd "${SRCDIR}" | ||
|
||
# Find playbooks | ||
PARAMS=$(find ${PLABOOKS_DIR} -type f -name '*.yml' -maxdepth 1) | ||
|
||
# Find roles | ||
PARAMS="$PARAMS $(find ${ROLES_DIR} -type d -maxdepth 1)" | ||
|
||
${ANSIBLE_LINT} -c ${ANSIBLE_LINT_CONF} ${PARAMS} | ||
# Run ansible-lint | ||
ansible-lint -c ${ANSIBLE_LINT_CONF} packaging/ansible-runner-service-project/project/roles/* |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,10 @@ | ||
skip_list: | ||
# [E701]: "meta/main.yml should contain relevant info" | ||
# meta-no-info: "meta/main.yml should contain relevant info" | ||
# Roles in ovirt-engine are not intended to be used/imported by Ansible Galaxy | ||
- '701' | ||
|
||
- 'meta-no-info' | ||
# role-name: Role name does not match ``^[a-z][a-z0-9_]*$`` pattern. | ||
- 'role-name' | ||
# name: All tasks should be named. (name[missing])' | ||
- 'name' | ||
# no-changed-when: Commands should not change things if nothing needs doing. | ||
- 'no-changed-when' |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,7 @@ | ||
--- | ||
# rc 1 = Device or resource busy | ||
- name: Clean up filesystem signature | ||
command: wipefs -a {{ item }} | ||
ansible.builtin.command: wipefs -a {{ item }} | ||
with_items: "{{ disks | default([]) }}" | ||
when: wipefs == 'yes' and item is defined | ||
register: resp | ||
|
@@ -11,25 +11,25 @@ | |
# needed if we can always assume 256K for JBOD, however we provide this extra | ||
# variable to override it. | ||
- name: Set PV data alignment for JBOD | ||
set_fact: | ||
ansible.builtin.set_fact: | ||
pv_dataalign: "{{ gluster_infra_dalign | default('256K') }}" | ||
when: disktype|upper in ['NONE', 'RAID0'] | ||
when: disktype | upper in ['NONE', 'RAID0'] | ||
|
||
# Set data alignment for RAID | ||
# We need KiB: ensure to keep the trailing `K' in the pv_dataalign calculation. | ||
- name: Set PV data alignment for RAID | ||
set_fact: | ||
pv_dataalign: "{{ diskcount|int * stripesize|int }}K" | ||
when: disktype|upper in ['RAID6', 'RAID10'] | ||
ansible.builtin.set_fact: | ||
pv_dataalign: "{{ diskcount | int * stripesize | int }}K" | ||
when: disktype | upper in ['RAID6', 'RAID10'] | ||
|
||
- name: Set VG physical extent size for RAID | ||
set_fact: | ||
vg_pesize: "{{ diskcount|int * stripesize|int }}K" | ||
when: disktype|upper in ['RAID6', 'RAID10'] | ||
ansible.builtin.set_fact: | ||
vg_pesize: "{{ diskcount | int * stripesize | int }}K" | ||
when: disktype | upper in ['RAID6', 'RAID10'] | ||
|
||
# rc 3 = already exists in filesystem | ||
- name: Create volume groups | ||
command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}" | ||
ansible.builtin.command: "vgcreate --dataalignment {{ pv_dataalign }} -s {{ vg_pesize | default(4) }} {{ vgname }} {{ disks | join(' ') }}" | ||
register: resp | ||
failed_when: resp.rc not in [0, 3] | ||
changed_when: resp.rc == 0 | ||
|
@@ -42,26 +42,28 @@ | |
# to full_stripe_size | ||
# | ||
- name: Calculate chunksize for RAID6/RAID10 | ||
set_fact: | ||
lv_chunksize: "{{ stripesize|int * diskcount|int }}K" | ||
when: disktype|upper in ['RAID6', 'RAID10'] | ||
ansible.builtin.set_fact: | ||
lv_chunksize: "{{ stripesize | int * diskcount | int }}K" | ||
when: disktype | upper in ['RAID6', 'RAID10'] | ||
|
||
# For JBOD the thin pool chunk size is set to 256 KiB. | ||
- name: Set chunksize for JBOD | ||
set_fact: | ||
ansible.builtin.set_fact: | ||
lv_chunksize: '256K' | ||
when: disktype|upper in ['NONE', 'RAID0'] | ||
when: disktype | upper in ['NONE', 'RAID0'] | ||
|
||
# rc 5 = Logical Volume 'name' already exists in volume group. | ||
- name: Create a LV thinpool | ||
command: "lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n --type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}" | ||
ansible.builtin.command: > | ||
"lvcreate -l 100%FREE --chunksize {{ lv_chunksize }} --poolmetadatasize {{ pool_metadatasize }} --zero n | ||
--type thin-pool --thinpool {{ lvname }}_pool {{ vgname }}" | ||
register: resp | ||
failed_when: resp.rc not in [0, 5] | ||
changed_when: resp.rc == 0 | ||
|
||
# rc 5 = Logical Volume 'name' already exists in volume group. | ||
- name: Create thin logical volume | ||
command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}" | ||
ansible.builtin.command: "lvcreate -T {{ vgname }}/{{ lvname }}_pool -V {{ size }} -n {{ lvname }}" | ||
register: resp | ||
failed_when: resp.rc not in [0, 5] | ||
changed_when: resp.rc == 0 | ||
|
@@ -71,15 +73,18 @@ | |
|
||
# rc 1 = Filesystem already exists | ||
- name: Create an xfs filesystem | ||
command: "mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k {% endif %} /dev/{{ vgname }}/{{ lvname }}" | ||
ansible.builtin.command: > | ||
"mkfs.xfs -f -K -i size=512 -n size=8192 {% if 'raid' in disktype %} -d sw={{ diskcount }},su={{ stripesize }}k | ||
{% endif %} /dev/{{ vgname }}/{{ lvname }}" | ||
register: resp | ||
failed_when: resp.rc not in [0, 1] | ||
changed_when: resp.rc == 0 | ||
|
||
- name: Create the backend directory, skips if present | ||
file: | ||
ansible.builtin.file: | ||
path: "{{ mntpath }}" | ||
state: directory | ||
mode: '755' | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Better to put possibly functional changes to a separate commit or a PR. |
||
|
||
- name: Mount the brick | ||
mount: | ||
|
@@ -90,6 +95,6 @@ | |
state: mounted | ||
|
||
- name: Set SELinux labels on the bricks | ||
command: "chcon -t glusterd_brick_t {{ mntpath }}" | ||
ansible.builtin.command: "chcon -t glusterd_brick_t {{ mntpath }}" | ||
register: resp | ||
changed_when: resp.rc == 0 |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
--- | ||
# defaults file for replace_node | ||
# defaults file for replace_node |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,16 +2,16 @@ | |
# Peer restoration | ||
# Create tmp dir for storing peer data | ||
- name: Create temporary storage directory | ||
tempfile: | ||
ansible.builtin.tempfile: | ||
state: directory | ||
suffix: _peer | ||
register: tmpdir | ||
delegate_to: localhost | ||
run_once: True | ||
run_once: true | ||
|
||
# Set the glusterd location | ||
- name: Set the path of glusterd.info file | ||
set_fact: | ||
ansible.builtin.set_fact: | ||
glusterd_libdir: "/var/lib/glusterd" | ||
peer_tmp_dir: "{{ tmpdir['path'] }}" | ||
|
||
|
@@ -36,28 +36,27 @@ | |
|
||
# Detach the old host, to replace host with different FQDN usecase | ||
- name: Detach the peer, in the case of different host replacement | ||
command: "gluster peer detach {{ gluster_maintenance_old_node }} force" | ||
ansible.builtin.command: "gluster peer detach {{ gluster_maintenance_old_node }} force" | ||
when: gluster_maintenance_old_node != gluster_maintenance_new_node | ||
|
||
- name: Force removal of old node peer in new node | ||
file: | ||
ansible.builtin.file: | ||
path: "{{ glusterd_libdir }}/peers/{{ old_node_uuid.stdout | trim }}" | ||
state: absent | ||
when: gluster_maintenance_old_node != gluster_maintenance_new_node | ||
delegate_to : "{{ gluster_maintenance_new_node }}" | ||
delegate_to: "{{ gluster_maintenance_new_node }}" | ||
connection: ssh | ||
|
||
- name: Restart glusterd on the new node | ||
connection: ssh | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why was it here (and in the other places below) and now we remove it? Better to do this in a commit separate from the purely formal changes. |
||
service: | ||
ansible.builtin.service: | ||
name: glusterd | ||
state: restarted | ||
delegate_to: "{{ gluster_new_node }}" | ||
|
||
# Ensure to delete the temporary directory | ||
- name: Delete the temporary directory | ||
file: | ||
ansible.builtin.file: | ||
state: absent | ||
path: "{{ peer_tmp_dir }}" | ||
delegate_to: localhost | ||
run_once: True | ||
run_once: true |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not checking playbooks anymore?